diff --git a/constraint/go.mod b/constraint/go.mod index 3236a8f8c..58930d935 100644 --- a/constraint/go.mod +++ b/constraint/go.mod @@ -1,106 +1,109 @@ module github.com/open-policy-agent/frameworks/constraint -go 1.22.0 +go 1.23.0 -toolchain go1.22.5 +toolchain go1.23.5 require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/golang/glog v1.2.1 + github.com/golang/glog v1.2.4 github.com/google/go-cmp v0.6.0 - github.com/onsi/gomega v1.33.1 - github.com/open-policy-agent/opa v0.68.0 + github.com/onsi/gomega v1.36.2 + github.com/open-policy-agent/opa v1.1.0 + github.com/sashabaranov/go-openai v1.36.1 + github.com/sethvargo/go-retry v0.3.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - golang.org/x/net v0.30.0 - k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver v0.30.3 - k8s.io/apimachinery v0.30.3 - k8s.io/apiserver v0.30.3 - k8s.io/client-go v0.30.3 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.18.4 + github.com/walles/env v0.0.4 + golang.org/x/net v0.34.0 + k8s.io/api v0.32.1 + k8s.io/apiextensions-apiserver v0.32.1 + k8s.io/apimachinery v0.32.1 + k8s.io/client-go v0.32.1 + k8s.io/utils v0.0.0-20241210054802-24370beab758 + sigs.k8s.io/controller-runtime v0.20.1 sigs.k8s.io/yaml v1.4.0 ) require ( + cel.dev/expr v0.19.0 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.17.8 // indirect + github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.2 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/tchap/go-patricia/v2 v2.3.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.66.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.70.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.30.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/apiserver v0.32.1 // indirect + k8s.io/component-base v0.32.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) diff --git a/constraint/go.sum b/constraint/go.sum index ab5d65cfb..d877a72bb 100644 --- a/constraint/go.sum +++ b/constraint/go.sum @@ -1,9 +1,11 @@ +cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= +cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= @@ -16,8 +18,6 @@ github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HV github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= @@ -30,18 +30,16 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dgraph-io/badger/v4 v4.5.1 h1:7DCIXrQjo1LKmM96YD+hLVJ2EEsyyoWxJfpdd56HLps= +github.com/dgraph-io/badger/v4 v4.5.1/go.mod h1:qn3Be0j3TfV4kPbVoK0arXCD1/nr1ftth6sbL5jxdoA= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -50,8 +48,10 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -61,31 +61,32 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= -github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= +github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8= +github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -94,18 +95,16 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -114,8 +113,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -127,6 +126,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -136,19 +137,19 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= -github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= -github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= +github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= +github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/open-policy-agent/opa v1.1.0 h1:HMz2evdEMTyNqtdLjmu3Vyx06BmhNYAx67Yz3Ll9q2s= +github.com/open-policy-agent/opa v1.1.0/go.mod h1:T1pASQ1/vwfTa+e2fYcfpLCvWgYtqtiUv+IuA/dLPQs= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= -github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= @@ -157,30 +158,37 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sashabaranov/go-openai v1.36.1 h1:EVfRXwIlW2rUzpx6vR+aeIKCK/xylSrVYAx1TMTSX3g= +github.com/sashabaranov/go-openai v1.36.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/walles/env v0.0.4 h1:v+cQHLwlASHaybe9VPfRZsmHsdL9HNxfX1yvNkEQsno= +github.com/walles/env v0.0.4/go.mod h1:YBVhW14DflZB4j6OO2hyHzjSi3cBDi4lzPXG45hfoTo= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -189,132 +197,133 @@ github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBe github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= -go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= -go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= -go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= -go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= -go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= -k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= -k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s= -k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= +sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/constraint/pkg/client/drivers/llm/args.go b/constraint/pkg/client/drivers/llm/args.go new file mode 100644 index 000000000..ab7f3ba6e --- /dev/null +++ b/constraint/pkg/client/drivers/llm/args.go @@ -0,0 +1,3 @@ +package llm + +type Arg func(*Driver) error diff --git a/constraint/pkg/client/drivers/llm/driver.go b/constraint/pkg/client/drivers/llm/driver.go new file mode 100644 index 000000000..89c76b0d2 --- /dev/null +++ b/constraint/pkg/client/drivers/llm/driver.go @@ -0,0 +1,266 @@ +package llm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + + openai "github.com/sashabaranov/go-openai" + + apiconstraints "github.com/open-policy-agent/frameworks/constraint/pkg/apis/constraints" + "github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers" + llmSchema "github.com/open-policy-agent/frameworks/constraint/pkg/client/drivers/llm/schema" + "github.com/open-policy-agent/frameworks/constraint/pkg/core/templates" + "github.com/open-policy-agent/frameworks/constraint/pkg/types" + "github.com/open-policy-agent/opa/storage" + "github.com/sethvargo/go-retry" + flag "github.com/spf13/pflag" + "github.com/walles/env" + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +const ( + maxRetries = 10 + // need minimum of 2023-12-01-preview for JSON mode. + azureOpenAIAPIVersion = "2024-03-01-preview" + azureOpenAIURL = "openai.azure.com" + systemPrompt = "You are a policy engine for Kubernetes designed to output JSON. Input will be a policy definition, Kubernetes AdmissionRequest object, and parameters to apply to the policy if applicable. Output JSON should only have a 'decision' field with a boolean value and a 'reason' field with a string value explaining the decision, only if decision is false. Only output valid JSON." +) + +var ( + openAIAPIURLv1 = "https://api.openai.com/v1" + + openAIDeploymentName = flag.String("openai-deployment-name", env.GetOr("OPENAI_DEPLOYMENT_NAME", env.String, "gpt-3.5-turbo-0301"), "The deployment name used for the model in OpenAI service.") + openAIAPIKey = flag.String("openai-api-key", env.GetOr("OPENAI_API_KEY", env.String, ""), "The API key for the OpenAI service. This is required.") + openAIEndpoint = flag.String("openai-endpoint", env.GetOr("OPENAI_ENDPOINT", env.String, openAIAPIURLv1), "The endpoint for OpenAI service. Defaults to"+openAIAPIURLv1+". Set this to Azure OpenAI Service or OpenAI compatible API endpoint, if needed.") +) + +type Driver struct { + prompts map[string]string +} + +var _ drivers.Driver = &Driver{} + +type Decision struct { + Name string + Constraint *unstructured.Unstructured + Decision bool + Reason string +} + +type ARGetter interface { + GetAdmissionRequest() *admissionv1.AdmissionRequest +} + +// Name returns the name of the driver. +func (d *Driver) Name() string { + return llmSchema.Name +} + +func (d *Driver) AddTemplate(_ context.Context, ct *templates.ConstraintTemplate) error { + source, err := llmSchema.GetSourceFromTemplate(ct) + if err != nil { + return err + } + + prompt, err := source.GetPrompt() + if err != nil { + return err + } + if prompt == "" { + return fmt.Errorf("prompt is empty for template: %q", ct.Name) + } + + d.prompts[ct.Name] = prompt + return nil +} + +func (d *Driver) RemoveTemplate(_ context.Context, ct *templates.ConstraintTemplate) error { + delete(d.prompts, ct.Name) + + return nil +} + +func (d *Driver) AddConstraint(_ context.Context, constraint *unstructured.Unstructured) error { + promptName := strings.ToLower(constraint.GetKind()) + + _, found := d.prompts[promptName] + if !found { + return fmt.Errorf("no promptName with name: %q", promptName) + } + return nil +} + +func (d *Driver) RemoveConstraint(_ context.Context, _ *unstructured.Unstructured) error { + return nil +} + +func (d *Driver) AddData(_ context.Context, _ string, _ storage.Path, _ interface{}) error { + return nil +} + +func (d *Driver) RemoveData(_ context.Context, _ string, _ storage.Path) error { + return nil +} + +func (d *Driver) Query(ctx context.Context, _ string, constraints []*unstructured.Unstructured, review interface{}, _ ...drivers.QueryOpt) (*drivers.QueryResponse, error) { + llmc, err := newLLMClients() + if err != nil { + return nil, err + } + + arGetter, ok := review.(ARGetter) + if !ok { + return nil, errors.New("cannot convert review to ARGetter") + } + aRequest := arGetter.GetAdmissionRequest() + + var allDecisions []*Decision + for _, constraint := range constraints { + promptName := strings.ToLower(constraint.GetKind()) + prompt, found := d.prompts[promptName] + if !found { + continue + } + + paramsStruct, _, err := unstructured.NestedFieldNoCopy(constraint.Object, "spec", "parameters") + if err != nil { + return nil, err + } + + params, err := json.Marshal(paramsStruct) + if err != nil { + return nil, err + } + + llmPrompt := fmt.Sprintf("policy: %s\nadmission request: %s\nparameters: %s", prompt, string(aRequest.Object.Raw), string(params)) + + var resp string + r := retry.WithMaxRetries(maxRetries, retry.NewExponential(1*time.Second)) + if err := retry.Do(ctx, r, func(ctx context.Context) error { + resp, err = llmc.openaiGptChatCompletion(ctx, llmPrompt) + requestErr := &openai.APIError{} + if errors.As(err, &requestErr) { + switch requestErr.HTTPStatusCode { + case http.StatusTooManyRequests, http.StatusRequestTimeout, http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: + return retry.RetryableError(err) + } + } + return nil + }); err != nil { + return nil, err + } + + var decision Decision + err = json.Unmarshal([]byte(resp), &decision) + if err != nil { + return nil, err + } + + if !decision.Decision { + llmDecision := &Decision{ + Decision: decision.Decision, + Name: constraint.GetName(), + Constraint: constraint, + Reason: decision.Reason, + } + allDecisions = append(allDecisions, llmDecision) + } + } + if len(allDecisions) == 0 { + return nil, nil + } + + results := make([]*types.Result, len(allDecisions)) + for i, llmDecision := range allDecisions { + enforcementAction, found, err := unstructured.NestedString(llmDecision.Constraint.Object, "spec", "enforcementAction") + if err != nil { + return nil, err + } + if !found { + enforcementAction = apiconstraints.EnforcementActionDeny + } + + results[i] = &types.Result{ + Metadata: map[string]interface{}{ + "name": llmDecision.Name, + }, + Constraint: llmDecision.Constraint, + Msg: llmDecision.Reason, + EnforcementAction: enforcementAction, + } + } + return &drivers.QueryResponse{Results: results}, nil +} + +func (d *Driver) Dump(_ context.Context) (string, error) { + panic("implement me") +} + +func (d *Driver) GetDescriptionForStat(_ string) (string, error) { + panic("implement me") +} + +type llmClients struct { + openAIClient openai.Client +} + +func newLLMClients() (llmClients, error) { + var config openai.ClientConfig + // default to OpenAI API + config = openai.DefaultConfig(*openAIAPIKey) + + if openAIEndpoint != &openAIAPIURLv1 { + // Azure OpenAI + if strings.Contains(*openAIEndpoint, azureOpenAIURL) { + config = openai.DefaultAzureConfig(*openAIAPIKey, *openAIEndpoint) + } else { + // OpenAI API compatible endpoint or proxy + config.BaseURL = *openAIEndpoint + } + config.APIVersion = azureOpenAIAPIVersion + } + + clients := llmClients{ + openAIClient: *openai.NewClientWithConfig(config), + } + return clients, nil +} + +func (c *llmClients) openaiGptChatCompletion(ctx context.Context, prompt string) (string, error) { + req := openai.ChatCompletionRequest{ + Model: *openAIDeploymentName, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: systemPrompt, + }, + { + Role: openai.ChatMessageRoleUser, + Content: prompt, + }, + }, + N: 1, // Number of completions to generate + Temperature: 0, // 0 is more deterministic + ResponseFormat: &openai.ChatCompletionResponseFormat{ + Type: openai.ChatCompletionResponseFormatTypeJSONObject, + }, + } + + resp, err := c.openAIClient.CreateChatCompletion(ctx, req) + if err != nil { + return "", err + } + + if len(resp.Choices) != 1 { + return "", fmt.Errorf("expected choices to be 1 but received: %d", len(resp.Choices)) + } + + result := resp.Choices[0].Message.Content + return result, nil +} diff --git a/constraint/pkg/client/drivers/llm/new.go b/constraint/pkg/client/drivers/llm/new.go new file mode 100644 index 000000000..9fdeb3631 --- /dev/null +++ b/constraint/pkg/client/drivers/llm/new.go @@ -0,0 +1,13 @@ +package llm + +func New(args ...Arg) (*Driver, error) { + driver := &Driver{ + prompts: make(map[string]string), + } + for _, arg := range args { + if err := arg(driver); err != nil { + return nil, err + } + } + return driver, nil +} diff --git a/constraint/pkg/client/drivers/llm/schema/schema.go b/constraint/pkg/client/drivers/llm/schema/schema.go new file mode 100644 index 000000000..e8b300857 --- /dev/null +++ b/constraint/pkg/client/drivers/llm/schema/schema.go @@ -0,0 +1,65 @@ +package schema + +import ( + "errors" + + "github.com/open-policy-agent/frameworks/constraint/pkg/core/templates" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // Name is the name of the driver. + Name = "LLM" +) + +var ErrBadType = errors.New("could not recognize the type") + +type Source struct { + Prompt string `json:"prompt,omitempty"` +} + +func (in *Source) GetPrompt() (string, error) { + if in == nil { + return "", nil + } + return in.Prompt, nil +} + +func GetSource(code templates.Code) (*Source, error) { + rawCode := code.Source + v, ok := rawCode.Value.(map[string]interface{}) + if !ok { + return nil, ErrBadType + } + + out := &Source{} + + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(v, out); err != nil { + return nil, err + } + + return out, nil +} + +func GetSourceFromTemplate(ct *templates.ConstraintTemplate) (*Source, error) { + if len(ct.Spec.Targets) != 1 { + return nil, errors.New("wrong number of targets defined, only 1 target allowed") + } + + var source *Source + for _, code := range ct.Spec.Targets[0].Code { + if code.Engine != Name { + continue + } + var err error + source, err = GetSource(code) + if err != nil { + return nil, err + } + break + } + if source == nil { + return nil, errors.New("LLM code not defined") + } + return source, nil +} diff --git a/constraint/vendor/cel.dev/expr/.bazelversion b/constraint/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 000000000..26bc914a3 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +7.0.1 +# Keep this pinned version in parity with cel-go diff --git a/constraint/vendor/cel.dev/expr/.gitattributes b/constraint/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 000000000..3de1ec213 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/constraint/vendor/cel.dev/expr/.gitignore b/constraint/vendor/cel.dev/expr/.gitignore new file mode 100644 index 000000000..0d4fed27c --- /dev/null +++ b/constraint/vendor/cel.dev/expr/.gitignore @@ -0,0 +1,2 @@ +bazel-* +MODULE.bazel.lock diff --git a/constraint/vendor/cel.dev/expr/BUILD.bazel b/constraint/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 000000000..37d8adc95 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +go_library( + name = "expr", + srcs = [ + "checked.pb.go", + "eval.pb.go", + "explain.pb.go", + "syntax.pb.go", + "value.pb.go", + ], + importpath = "cel.dev/expr", + visibility = ["//visibility:public"], + deps = [ + "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +alias( + name = "go_default_library", + actual = ":expr", + visibility = ["//visibility:public"], +) diff --git a/constraint/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/constraint/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..59908e2d8 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/constraint/vendor/cel.dev/expr/CONTRIBUTING.md b/constraint/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 000000000..8f5fd5c31 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/constraint/vendor/cel.dev/expr/GOVERNANCE.md b/constraint/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 000000000..0a525bc17 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/constraint/vendor/cel.dev/expr/LICENSE b/constraint/vendor/cel.dev/expr/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/constraint/vendor/cel.dev/expr/MAINTAINERS.md b/constraint/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 000000000..1ed2eb8ab --- /dev/null +++ b/constraint/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/constraint/vendor/cel.dev/expr/MODULE.bazel b/constraint/vendor/cel.dev/expr/MODULE.bazel new file mode 100644 index 000000000..9794266f5 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/MODULE.bazel @@ -0,0 +1,70 @@ +module( + name = "cel-spec", +) + +bazel_dep( + name = "bazel_skylib", + version = "1.7.1", +) +bazel_dep( + name = "gazelle", + version = "0.36.0", + repo_name = "bazel_gazelle", +) +bazel_dep( + name = "googleapis", + version = "0.0.0-20240819-fe8ba054a", + repo_name = "com_google_googleapis", +) +bazel_dep( + name = "protobuf", + version = "26.0", + repo_name = "com_google_protobuf", +) +bazel_dep( + name = "rules_cc", + version = "0.0.9", +) +bazel_dep( + name = "rules_go", + version = "0.49.0", + repo_name = "io_bazel_rules_go", +) +bazel_dep( + name = "rules_java", + version = "7.6.5", +) +bazel_dep( + name = "rules_proto", + version = "6.0.0", +) +bazel_dep( + name = "rules_python", + version = "0.35.0", +) + +### PYTHON ### +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + ignore_root_user_error = True, + python_version = "3.11", +) + +switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") +switched_rules.use_languages( + cc = True, + go = True, + java = True, +) +use_repo(switched_rules, "com_google_googleapis_imports") + +go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") +go_sdk.download(version = "1.21.1") + +go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") +go_deps.from_file(go_mod = "//:go.mod") +use_repo( + go_deps, + "org_golang_google_genproto_googleapis_rpc", + "org_golang_google_protobuf", +) diff --git a/constraint/vendor/cel.dev/expr/README.md b/constraint/vendor/cel.dev/expr/README.md new file mode 100644 index 000000000..7930c0b75 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/README.md @@ -0,0 +1,73 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A representation of the program's abstract syntax tree (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +For use cases which require persistence or cross-process communcation, it is +highly recommended to serialize the type-checked expression as a protocol +buffer. The CEL team will maintains canonical protocol buffers for ASTs and +will keep these versions identical and wire-compatible in perpetuity: + +* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr) +* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1) + + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/constraint/vendor/cel.dev/expr/WORKSPACE b/constraint/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 000000000..b6dc9ed67 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 09/16/2024 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee", + sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8", + urls = [ + "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/constraint/vendor/cel.dev/expr/WORKSPACE.bzlmod b/constraint/vendor/cel.dev/expr/WORKSPACE.bzlmod new file mode 100644 index 000000000..e69de29bb diff --git a/constraint/vendor/cel.dev/expr/checked.pb.go b/constraint/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 000000000..bb225c8ab --- /dev/null +++ b/constraint/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/constraint/vendor/cel.dev/expr/cloudbuild.yaml b/constraint/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 000000000..c40881f12 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:7.0.1' + entrypoint: bazel + args: ['build', '...'] + id: bazel-build + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/constraint/vendor/cel.dev/expr/eval.pb.go b/constraint/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 000000000..8f651f9cc --- /dev/null +++ b/constraint/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/constraint/vendor/cel.dev/expr/explain.pb.go b/constraint/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 000000000..79fd5443b --- /dev/null +++ b/constraint/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/constraint/vendor/cel.dev/expr/regen_go_proto.sh b/constraint/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 000000000..fdcbb3ce2 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/cel/expr/conformance/... +files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/constraint/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/constraint/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 000000000..9a13479e4 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/constraint/vendor/cel.dev/expr/syntax.pb.go b/constraint/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 000000000..48a952872 --- /dev/null +++ b/constraint/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/constraint/vendor/cel.dev/expr/value.pb.go b/constraint/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 000000000..e5e29228c --- /dev/null +++ b/constraint/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/constraint/vendor/github.com/agnivade/levenshtein/.travis.yml b/constraint/vendor/github.com/agnivade/levenshtein/.travis.yml deleted file mode 100644 index 0873fa983..000000000 --- a/constraint/vendor/github.com/agnivade/levenshtein/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -# See https://travis-ci.community/t/goos-js-goarch-wasm-go-run-fails-panic-newosproc-not-implemented/1651 -#addons: -# chrome: stable - -before_install: -- export GO111MODULE=on - -#install: -#- go get github.com/agnivade/wasmbrowsertest -#- mv $GOPATH/bin/wasmbrowsertest $GOPATH/bin/go_js_wasm_exec -#- export PATH=$GOPATH/bin:$PATH - -go: -- 1.13.x -- 1.14.x -- 1.15.x -- tip - -script: -#- GOOS=js GOARCH=wasm go test -v -- go test -v diff --git a/constraint/vendor/github.com/agnivade/levenshtein/Makefile b/constraint/vendor/github.com/agnivade/levenshtein/Makefile index 5f6890d61..3bbda319e 100644 --- a/constraint/vendor/github.com/agnivade/levenshtein/Makefile +++ b/constraint/vendor/github.com/agnivade/levenshtein/Makefile @@ -4,12 +4,10 @@ install: go install lint: - gofmt -l -s -w . && go vet . && golint -set_exit_status=1 . + gofmt -l -s -w . && go vet . -test: # The first 2 go gets are to support older Go versions - go get github.com/arbovm/levenshtein - go get github.com/dgryski/trifles/leven - GO111MODULE=on go test -race -v -coverprofile=coverage.txt -covermode=atomic +test: + go test -race -v -coverprofile=coverage.txt -covermode=atomic bench: go test -run=XXX -bench=. -benchmem -count=5 diff --git a/constraint/vendor/github.com/agnivade/levenshtein/README.md b/constraint/vendor/github.com/agnivade/levenshtein/README.md index 13c52a210..34378aabe 100644 --- a/constraint/vendor/github.com/agnivade/levenshtein/README.md +++ b/constraint/vendor/github.com/agnivade/levenshtein/README.md @@ -1,4 +1,4 @@ -levenshtein [![Build Status](https://travis-ci.org/agnivade/levenshtein.svg?branch=master)](https://travis-ci.org/agnivade/levenshtein) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein) +levenshtein ![Build Status](https://github.com/agnivade/levenshtein/actions/workflows/ci.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein) =========== [Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance) diff --git a/constraint/vendor/github.com/agnivade/levenshtein/levenshtein.go b/constraint/vendor/github.com/agnivade/levenshtein/levenshtein.go index f727a66fe..861f409dd 100644 --- a/constraint/vendor/github.com/agnivade/levenshtein/levenshtein.go +++ b/constraint/vendor/github.com/agnivade/levenshtein/levenshtein.go @@ -41,6 +41,25 @@ func ComputeDistance(a, b string) int { if len(s1) > len(s2) { s1, s2 = s2, s1 } + + // remove trailing identical runes. + for i := 0; i < len(s1); i++ { + if s1[len(s1)-1-i] != s2[len(s2)-1-i] { + s1 = s1[:len(s1)-i] + s2 = s2[:len(s2)-i] + break + } + } + + // Remove leading identical runes. + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + s1 = s1[i:] + s2 = s2[i:] + break + } + } + lenS1 := len(s1) lenS2 := len(s2) @@ -71,7 +90,7 @@ func ComputeDistance(a, b string) int { for j := 1; j <= lenS1; j++ { current := x[j-1] // match if s2[i-1] != s1[j-1] { - current = min(min(x[j-1]+1, prev+1), x[j]+1) + current = min(x[j-1]+1, prev+1, x[j]+1) } x[j-1] = prev prev = current @@ -80,10 +99,3 @@ func ComputeDistance(a, b string) int { } return int(x[lenS1]) } - -func min(a, b uint16) uint16 { - if a < b { - return a - } - return b -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE deleted file mode 100644 index 52cf18e42..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright 2021 The ANTLR Project - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go deleted file mode 100644 index ab5121267..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package antlr implements the Go version of the ANTLR 4 runtime. - -# The ANTLR Tool - -ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, -or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. -From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface -(or visitor) that makes it easy to respond to the recognition of phrases of interest. - -# Code Generation - -ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a -runtime library, written specifically to support the generated code in the target language. This library is the -runtime for the Go target. - -To generate code for the go target, it is generally recommended to place the source grammar files in a package of -their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory -it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean -that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other -way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in -your IDE, or configuration in your CI system. - -Here is a general template for an ANTLR based recognizer in Go: - - . - ├── myproject - ├── parser - │ ├── mygrammar.g4 - │ ├── antlr-4.12.0-complete.jar - │ ├── error_listeners.go - │ ├── generate.go - │ ├── generate.sh - ├── go.mod - ├── go.sum - ├── main.go - └── main_test.go - -Make sure that the package statement in your grammar file(s) reflects the go package they exist in. -The generate.go file then looks like this: - - package parser - - //go:generate ./generate.sh - -And the generate.sh file will look similar to this: - - #!/bin/sh - - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' - antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 - -depending on whether you want visitors or listeners or any other ANTLR options. - -From the command line at the root of your package “myproject” you can then simply issue the command: - - go generate ./... - -# Copyright Notice - -Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - -Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. - -[target languages]: https://github.com/antlr/antlr4/tree/master/runtime -[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt -*/ -package antlr diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go deleted file mode 100644 index 7619fa172..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - Equals(o Collectable[ATNConfig]) bool - Hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// Equals is the default comparison function for an ATNConfig when no specialist implementation is required -// for a collection. -// -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { - if b == o { - return true - } else if o == nil { - return false - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.Equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.Equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -// Hash is the default hash function for BaseATNConfig, when no specialist hash function -// is required for a collection -func (b *BaseATNConfig) Hash() int { - var c int - if b.context != nil { - c = b.context.Hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.Hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.GetStateNumber()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.Hash()) - h = murmurUpdate(h, l.semanticContext.Hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.Hash()) - h = murmurFinish(h, 6) - return h -} - -// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { - if l == other { - return true - } - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.Equals(othert.BaseATNConfig) -} - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go deleted file mode 100644 index 43e9b33f3..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type ATNConfigSet interface { - Hash() int - Equals(o Collectable[ATNConfig]) bool - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *JStore[ATNState, Comparator[ATNState]] - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - Alts() *BitSet - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *JStore[ATNConfig, Comparator[ATNConfig]] - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not, protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func (b *BaseATNConfigSet) Alts() *BitSet { - alts := NewBitSet() - for _, it := range b.configs { - alts.add(it.GetAlt()) - } - return alts -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing, present := b.configLookup.Put(config) - - // The config was not already in the set - // - if !present { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { - - // states uses the standard comparator provided by the ATNState instance - // - states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) - - for i := 0; i < len(b.configs); i++ { - states.Put(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.Len() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -// Compare is a hack function just to verify that adding DFAstares to the known -// set works, so long as comparison of ATNConfigSet s works. For that to work, we -// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't -// know the order, so we do this inefficient hack. If this proves the point, then -// we can change the config set to a better structure. -func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { - if len(b.configs) != len(bs.configs) { - return false - } - - for _, c := range b.configs { - found := false - for _, c2 := range bs.configs { - if c.Equals(c2) { - found = true - break - } - } - - if !found { - return false - } - - } - return true -} - -func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext && - b.Compare(other2) -} - -func (b *BaseATNConfigSet) Hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := 1 - for _, config := range b.configs { - h = 31*h + config.Hash() - } - return h -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - // This set uses the standard Hash() and Equals() from ATNConfig - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func hashATNConfig(i interface{}) int { - o := i.(ATNConfig) - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { - return false - } - - if ai.GetAlt() != bi.GetAlt() { - return false - } - - return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go deleted file mode 100644 index 41529115f..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go deleted file mode 100644 index 1f2a56bc3..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - Hash() int - Equals(Collectable[ATNState]) bool -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) Hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &BasicBlockStartState{} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &PlusBlockStartState{} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &StarBlockStartState{} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go deleted file mode 100644 index f679f0dcd..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// Provides a default instance of {@link ConsoleErrorListener}. -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go deleted file mode 100644 index 5c0a637ba..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - InErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //InErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -// -func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.InErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.InErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ”} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// # EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - if parent, ok := context.GetParent().(ParserRuleContext); ok { - context = parent - } else { - context = nil - } - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go deleted file mode 100644 index 3954c1378..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go deleted file mode 100644 index bd6ad5efe..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go deleted file mode 100644 index a8b889ced..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go deleted file mode 100644 index e5a74f0c6..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go +++ /dev/null @@ -1,198 +0,0 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -import ( - "sort" -) - -// Collectable is an interface that a struct should implement if it is to be -// usable as a key in these collections. -type Collectable[T any] interface { - Hash() int - Equals(other Collectable[T]) bool -} - -type Comparator[T any] interface { - Hash1(o T) int - Equals2(T, T) bool -} - -// JStore implements a container that allows the use of a struct to calculate the key -// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just -// serve the needs of the ANTLR Go runtime. -// -// For ease of porting the logic of the runtime from the master target (Java), this collection -// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() -// function as the key. The values are stored in a standard go map which internally is a form of hashmap -// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with -// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't -// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and -// we understand the requirements, then this is fine - this is not a general purpose collection. -type JStore[T any, C Comparator[T]] struct { - store map[int][]T - len int - comparator Comparator[T] -} - -func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { - - if comparator == nil { - panic("comparator cannot be nil") - } - - s := &JStore[T, C]{ - store: make(map[int][]T, 1), - comparator: comparator, - } - return s -} - -// Put will store given value in the collection. Note that the key for storage is generated from -// the value itself - this is specifically because that is what ANTLR needs - this would not be useful -// as any kind of general collection. -// -// If the key has a hash conflict, then the value will be added to the slice of values associated with the -// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is -// tested by calling the equals() method on the key. -// -// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true -// -// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. -func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn - - kh := s.comparator.Hash1(value) - - for _, v1 := range s.store[kh] { - if s.comparator.Equals2(value, v1) { - return v1, true - } - } - s.store[kh] = append(s.store[kh], value) - s.len++ - return value, false -} - -// Get will return the value associated with the key - the type of the key is the same type as the value -// which would not generally be useful, but this is a specific thing for ANTLR where the key is -// generated using the object we are going to store. -func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn - - kh := s.comparator.Hash1(key) - - for _, v := range s.store[kh] { - if s.comparator.Equals2(key, v) { - return v, true - } - } - return key, false -} - -// Contains returns true if the given key is present in the store -func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn - - _, present := s.Get(key) - return present -} - -func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { - vs := make([]T, 0, len(s.store)) - for _, v := range s.store { - vs = append(vs, v...) - } - sort.Slice(vs, func(i, j int) bool { - return less(vs[i], vs[j]) - }) - - return vs -} - -func (s *JStore[T, C]) Each(f func(T) bool) { - for _, e := range s.store { - for _, v := range e { - f(v) - } - } -} - -func (s *JStore[T, C]) Len() int { - return s.len -} - -func (s *JStore[T, C]) Values() []T { - vs := make([]T, 0, len(s.store)) - for _, e := range s.store { - for _, v := range e { - vs = append(vs, v) - } - } - return vs -} - -type entry[K, V any] struct { - key K - val V -} - -type JMap[K, V any, C Comparator[K]] struct { - store map[int][]*entry[K, V] - len int - comparator Comparator[K] -} - -func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { - return &JMap[K, V, C]{ - store: make(map[int][]*entry[K, V], 1), - comparator: comparator, - } -} - -func (m *JMap[K, V, C]) Put(key K, val V) { - kh := m.comparator.Hash1(key) - - m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) - m.len++ -} - -func (m *JMap[K, V, C]) Values() []V { - vs := make([]V, 0, len(m.store)) - for _, e := range m.store { - for _, v := range e { - vs = append(vs, v.val) - } - } - return vs -} - -func (m *JMap[K, V, C]) Get(key K) (V, bool) { - - var none V - kh := m.comparator.Hash1(key) - for _, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - return e.val, true - } - } - return none, false -} - -func (m *JMap[K, V, C]) Len() int { - return len(m.store) -} - -func (m *JMap[K, V, C]) Delete(key K) { - kh := m.comparator.Hash1(key) - for i, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) - m.len-- - return - } - } -} - -func (m *JMap[K, V, C]) Clear() { - m.store = make(map[int][]*entry[K, V]) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go deleted file mode 100644 index be1ba7a7e..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "golang.org/x/exp/slices" - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) Hash() int { - if l == nil { - // TODO: Why is this here? l should not be nil - return 61 - } - - // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode - return l.cachedHash -} - -func (l *LexerActionExecutor) Equals(other interface{}) bool { - if l == other { - return true - } - othert, ok := other.(*LexerActionExecutor) - if !ok { - return false - } - if othert == nil { - return false - } - if l.cachedHash != othert.cachedHash { - return false - } - if len(l.lexerActions) != len(othert.lexerActions) { - return false - } - return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { - return i.Equals(j) - }) -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go deleted file mode 100644 index 76689615a..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -// * -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.Contains(c) { - return - } - - _, present := lookBusy.Put(c) - if present { - return - - } - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - removed := calledRuleStack.contains(s.GetRuleIndex()) - defer func() { - if removed { - calledRuleStack.add(s.GetRuleIndex()) - } - }() - calledRuleStack.remove(s.GetRuleIndex()) - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go deleted file mode 100644 index d26bf0639..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go +++ /dev/null @@ -1,708 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//

To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
-// -// @param listener the listener to add -// -// @panics nilPointerException if {@code} listener is {@code nil} -func (p *BaseParser) AddParseListener(listener ParseTreeListener) { - if listener == nil { - panic("listener") - } - if p.parseListeners == nil { - p.parseListeners = make([]ParseTreeListener, 0) - } - p.parseListeners = append(p.parseListeners, listener) -} - -// Remove {@code listener} from the list of parse listeners. -// -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-// 
- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.InErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go deleted file mode 100644 index 8bcc46a0d..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ /dev/null @@ -1,1559 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - p.atn.stateMu.RLock() - if dfa.getPrecedenceDfa() { - p.atn.edgeMu.RLock() - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - p.atn.edgeMu.RUnlock() - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.getS0() - } - p.atn.stateMu.RUnlock() - - if s0 == nil { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - if ParserATNSimulatorDebug { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) - - p.atn.stateMu.Lock() - if dfa.getPrecedenceDfa() { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0Closure - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - p.atn.edgeMu.Lock() - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - p.atn.edgeMu.Unlock() - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setS0(s0) - } - p.atn.stateMu.Unlock() - } - - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - ", DFA state " + s0.String() + - ", LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - - switch alts.length() { - case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - case 1: - return alts.minValue() - default: - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - if t+1 < 0 { - return nil - } - - p.atn.edgeMu.RLock() - defer p.atn.edgeMu.RUnlock() - edges := previousD.getEdges() - if edges == nil || t+1 >= len(edges) { - return nil - } - return previousD.getIthEdge(t + 1) -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } - if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var skippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - if _, ok := c.GetState().(*RuleStopState); ok { - if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for _, trans := range c.GetState().GetTransitions() { - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if skippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - treatEOFAsEpsilon := t == TokenEOF - amount := len(intermediate.configs) - for k := 0; k < amount; k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(skippedStopStates); l++ { - reach.Add(skippedStopStates[l], p.mergeCache) - } - } - - if ParserATNSimulatorTraceATNSim { - fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) - } - - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - if _, ok := config.GetState().(*RuleStopState); ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("computeStartState from ATN state " + a.String() + - " initialContext=" + initialContext.String()) - } - - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.Equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i <= nalts; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { - fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - if _, ok := config.GetState().(*RuleStopState); ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { - continue - } - - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if p.dfa != nil && p.dfa.getPrecedenceDfa() { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for right-recursive rules - continue - } - - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else { - - if !t.getIsEpsilon() { - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for EOF* and EOF+ - continue - } - } - if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { - return false - } - - _p := config.GetState() - - // First check to see if we are in StarLoopEntryState generated during - // left-recursion elimination. For efficiency, also check if - // the context has an empty stack case. If so, it would mean - // global FOLLOW so we can't perform optimization - if _p.GetStateType() != ATNStateStarLoopEntry { - return false - } - startLoop, ok := _p.(*StarLoopEntryState) - if !ok { - return false - } - if !startLoop.precedenceRuleDecision || - config.GetContext().isEmpty() || - config.GetContext().hasEmptyPath() { - return false - } - - // Require all return states to return back to the same rule - // that p is in. - numCtxs := config.GetContext().length() - for i := 0; i < numCtxs; i++ { - returnState := p.atn.states[config.GetContext().getReturnState(i)] - if returnState.GetRuleIndex() != _p.GetRuleIndex() { - return false - } - } - x := _p.GetTransitions()[0].getTarget() - decisionStartState := x.(BlockStartState) - blockEndStateNum := decisionStartState.getEndState().stateNumber - blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) - - // Verify that the top of each stack context leads to loop entry/exit - // state through epsilon edges and w/o leaving rule. - - for i := 0; i < numCtxs; i++ { // for each stack context - returnStateNumber := config.GetContext().getReturnState(i) - returnState := p.atn.states[returnStateNumber] - - // all states must have single outgoing epsilon edge - if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { - return false - } - - // Look for prefix op case like 'not expr', (' type ')' expr - returnStateTarget := returnState.GetTransitions()[0].getTarget() - if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { - continue - } - - // Look for 'expr op expr' or case where expr's return state is block end - // of (...)* internal block; the block end points to loop back - // which points to p but we don't need to check that - if returnState == blockEndState { - continue - } - - // Look for ternary expr ? expr : expr. The return state points at block end, - // which points at loop entry state - if returnStateTarget == blockEndState { - continue - } - - // Look for complex prefix 'between expr and expr' case where 2nd expr's - // return state points at block end state of (...)* internal block - if returnStateTarget.GetStateType() == ATNStateBlockEnd && - len(returnStateTarget.GetTransitions()) == 1 && - returnStateTarget.GetTransitions()[0].getIsEpsilon() && - returnStateTarget.GetTransitions()[0].getTarget() == _p { - continue - } - - // anything else ain't conforming - return false - } - - return true -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - var sb strings.Builder - sb.Grow(32) - - sb.WriteString("') - return sb.String() -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM, TransitionRANGE, TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && (!pt.isCtxDependent || inContext) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { - return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - p.atn.stateMu.Lock() - to = p.addDFAState(dfa, to) // used existing if possible not incoming - p.atn.stateMu.Unlock() - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - p.atn.edgeMu.Lock() - if from.getEdges() == nil { - from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) - } - from.setIthEdge(t+1, to) // connect - p.atn.edgeMu.Unlock() - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - existing, present := dfa.states.Get(d) - if present { - if ParserATNSimulatorTraceATNSim { - fmt.Print("addDFAState " + d.String() + " exists") - } - return existing - } - - // The state was not present, so update it with configs - // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { - fmt.Println("addDFAState new " + d.String()) - } - - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go deleted file mode 100644 index ba62af361..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc -} - -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems - if M == a { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), a) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), b) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), M) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) - } - return M -} - -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go deleted file mode 100644 index 7b9b72fab..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

-// -//

When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) - - for _, c := range configs.GetItems() { - - alts, ok := configToAlts.Get(c) - if !ok { - alts = NewBitSet() - configToAlts.Put(c, alts) - } - alts.add(c.GetAlt()) - } - - return configToAlts.Values() -} - -// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go deleted file mode 100644 index bfe542d09..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go deleted file mode 100644 index 210699ba2..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go deleted file mode 100644 index b3e38af34..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "fmt" -) - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-//

- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

- -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

- -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

- -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
- -//

-// Then in the rules, you can execute (assuming rewriter is visible):

- -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
- -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
- -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

- -const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index -} - -func (op *BaseRewriteOperation) GetIndex() int { - return op.index -} - -func (op *BaseRewriteOperation) GetText() string { - return op.text -} - -func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name -} - -func (op *BaseRewriteOperation) GetTokens() TokenStream { - return op.tokens -} - -func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val -} - -func (op *BaseRewriteOperation) SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation) SetText(val string) { - op.text = val -} - -func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val -} - -func (op *BaseRewriteOperation) SetTokens(val TokenStream) { - op.tokens = val -} - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { - return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct { - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, - }, - LastIndex: to, - } -} - -func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { - if op.text != "" { - buffer.WriteString(op.text) - } - return op.LastIndex + 1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) - } - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) -} - -type TokenStreamRewriter struct { - //Our source stream - tokens TokenStream - // You may have multiple, named streams of rewrite operations. - // I'm calling these things "programs." - // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int -} - -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { - return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), - }, - last_rewrite_token_indexes: map[string]int{}, - } -} - -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { - return tsr.tokens -} - -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { - is, ok := tsr.programs[program_name] - if ok { - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] - } -} - -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { - tsr.Rollback(Default_Program_Name, instruction_index) -} - -// Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included -} - -func (tsr *TokenStreamRewriter) DeleteProgramDefault() { - tsr.DeleteProgram(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { - // to insert after, just insert before next index (even if past end) - var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { - tsr.InsertAfter(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { - var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { - tsr.InsertBefore(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", - from, to, tsr.tokens.Size())) - } - var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { - tsr.ReplaceDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { - tsr.ReplaceToken(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { - tsr.ReplaceTokenDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { - tsr.Replace(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { - tsr.Delete(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { - tsr.DeleteDefault(index, index) -} - -func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { - tsr.DeleteToken(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok { - return -1 - } - return i -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { - tsr.last_rewrite_token_indexes[program_name] = i -} - -func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { - is := make([]RewriteOperation, 0, Program_Init_Size) - tsr.programs[name] = is - return is -} - -func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { - is := tsr.GetProgram(name) - is = append(is, op) - tsr.programs[name] = is -} - -func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { - is, ok := tsr.programs[name] - if !ok { - is = tsr.InitializeProgram(name) - } - return is -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetTextDefault() string { - return tsr.GetText( - Default_Program_Name, - NewInterval(0, tsr.tokens.Size()-1)) -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] - start := interval.Start - stop := interval.Stop - // ensure start/end are in range - stop = min(stop, tsr.tokens.Size()-1) - start = max(start, 0) - if rewrites == nil || len(rewrites) == 0 { - return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute - } - buf := bytes.Buffer{} - // First, optimize instruction stream - indexToOp := reduceToSingleOperationPerIndex(rewrites) - // Walk buffer, executing instructions and emitting tokens - for i := start; i <= stop && i < tsr.tokens.Size(); { - op := indexToOp[i] - delete(indexToOp, i) // remove so any left have index size-1 - t := tsr.tokens.Get(i) - if op == nil { - // no operation at that index, just dump token - if t.GetTokenType() != TokenEOF { - buf.WriteString(t.GetText()) - } - i++ // move to next token - } else { - i = op.Execute(&buf) // execute operation and skip - } - } - // include stuff after end if it's last index in buffer - // So, if they did an insertAfter(lastValidIndex, "foo"), include - // foo if end==lastValidIndex. - if stop == tsr.tokens.Size()-1 { - // Scan any remaining operations after last token - // should be included (they will be inserts). - for _, op := range indexToOp { - if op.GetIndex() >= tsr.tokens.Size()-1 { - buf.WriteString(op.GetText()) - } - } - } - return buf.String() -} - -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: -// -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu -// -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR -// -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) -// -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping -// -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u -// -// First we need to examine replaces. For any replace op: -// -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. -// -// Then we can deal with inserts: -// -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace -// -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. -// -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. -// -// Return a map from token index to operation. -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { - // WALK REPLACES - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - rop, ok := op.(*ReplaceOp) - if !ok { - continue - } - // Wipe prior inserts within range - for j := 0; j < i && j < len(rewrites); j++ { - if iop, ok := rewrites[j].(*InsertBeforeOp); ok { - if iop.index == rop.index { - // E.g., insert before 2, delete 2..2; update replace - // text to include insert before, kill insert - rewrites[iop.instruction_index] = nil - if rop.text != "" { - rop.text = iop.text + rop.text - } else { - rop.text = iop.text - } - } else if iop.index > rop.index && iop.index <= rop.LastIndex { - // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil - } - } - } - // Drop any prior replaces contained within - for j := 0; j < i && j < len(rewrites); j++ { - if prevop, ok := rewrites[j].(*ReplaceOp); ok { - if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { - // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil - continue - } - // throw exception unless disjoint or identical - disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex - // Delete special case of replace (text==null): - // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint { - rewrites[prevop.instruction_index] = nil - rop.index = min(prevop.index, rop.index) - rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - } else if !disjoint { - panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) - } - } - } - } - // WALK INSERTS - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - //hack to replicate inheritance in composition - _, iok := rewrites[i].(*InsertBeforeOp) - _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok { - continue - } - iop := rewrites[i] - // combine current insert with prior if any at same index - // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j := 0; j < i && j < len(rewrites); j++ { - if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { - if nextIop.index == iop.GetIndex() { - iop.SetText(nextIop.text + iop.GetText()) - rewrites[j] = nil - } - } - if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { - if prevIop.index == iop.GetIndex() { - iop.SetText(iop.GetText() + prevIop.text) - rewrites[prevIop.instruction_index] = nil - } - } - } - // look for replaces where iop.index is in range; error - for j := 0; j < i && j < len(rewrites); j++ { - if rop, ok := rewrites[j].(*ReplaceOp); ok { - if iop.GetIndex() == rop.index { - rop.text = iop.GetText() + rop.text - rewrites[i] = nil - continue - } - if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { - panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) - } - } - } - } - m := map[int]RewriteOperation{} - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - if _, ok := m[op.GetIndex()]; ok { - panic("should only be one op per index") - } - m[op.GetIndex()] = op - } - return m -} - -/* - Quick fixing Go lack of overloads -*/ - -func max(a, b int) int { - if a > b { - return a - } else { - return b - } -} -func min(a, b int) int { - if a < b { - return a - } else { - return b - } -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go deleted file mode 100644 index 36be4f733..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -// atom, set, epsilon, action, predicate, rule transitions. -// -//

This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - var sb strings.Builder - sb.WriteByte('\'') - sb.WriteRune(rune(t.start)) - sb.WriteString("'..'") - sb.WriteRune(rune(t.stop)) - sb.WriteByte('\'') - return sb.String() -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go deleted file mode 100644 index 85b4f137b..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} -// then by triggering the event specific to the given parse tree node -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go deleted file mode 100644 index c9bd6751e..000000000 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go +++ /dev/null @@ -1,235 +0,0 @@ -package antlr - -import "math" - -const ( - _initalCapacity = 16 - _initalBucketCapacity = 8 - _loadFactor = 0.75 -) - -type Set interface { - Add(value interface{}) (added interface{}) - Len() int - Get(value interface{}) (found interface{}) - Contains(value interface{}) bool - Values() []interface{} - Each(f func(interface{}) bool) -} - -type array2DHashSet struct { - buckets [][]Collectable[any] - hashcodeFunction func(interface{}) int - equalsFunction func(Collectable[any], Collectable[any]) bool - - n int // How many elements in set - threshold int // when to expand - - currentPrime int // jump by 4 primes each expand or whatever - initialBucketCapacity int -} - -func (as *array2DHashSet) Each(f func(interface{}) bool) { - if as.Len() < 1 { - return - } - - for _, bucket := range as.buckets { - for _, o := range bucket { - if o == nil { - break - } - if !f(o) { - return - } - } - } -} - -func (as *array2DHashSet) Values() []interface{} { - if as.Len() < 1 { - return nil - } - - values := make([]interface{}, 0, as.Len()) - as.Each(func(i interface{}) bool { - values = append(values, i) - return true - }) - return values -} - -func (as *array2DHashSet) Contains(value Collectable[any]) bool { - return as.Get(value) != nil -} - -func (as *array2DHashSet) Add(value Collectable[any]) interface{} { - if as.n > as.threshold { - as.expand() - } - return as.innerAdd(value) -} - -func (as *array2DHashSet) expand() { - old := as.buckets - - as.currentPrime += 4 - - var ( - newCapacity = len(as.buckets) << 1 - newTable = as.createBuckets(newCapacity) - newBucketLengths = make([]int, len(newTable)) - ) - - as.buckets = newTable - as.threshold = int(float64(newCapacity) * _loadFactor) - - for _, bucket := range old { - if bucket == nil { - continue - } - - for _, o := range bucket { - if o == nil { - break - } - - b := as.getBuckets(o) - bucketLength := newBucketLengths[b] - var newBucket []Collectable[any] - if bucketLength == 0 { - // new bucket - newBucket = as.createBucket(as.initialBucketCapacity) - newTable[b] = newBucket - } else { - newBucket = newTable[b] - if bucketLength == len(newBucket) { - // expand - newBucketCopy := make([]Collectable[any], len(newBucket)<<1) - copy(newBucketCopy[:bucketLength], newBucket) - newBucket = newBucketCopy - newTable[b] = newBucket - } - } - - newBucket[bucketLength] = o - newBucketLengths[b]++ - } - } -} - -func (as *array2DHashSet) Len() int { - return as.n -} - -func (as *array2DHashSet) Get(o Collectable[any]) interface{} { - if o == nil { - return nil - } - - b := as.getBuckets(o) - bucket := as.buckets[b] - if bucket == nil { // no bucket - return nil - } - - for _, e := range bucket { - if e == nil { - return nil // empty slot; not there - } - if as.equalsFunction(e, o) { - return e - } - } - - return nil -} - -func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { - b := as.getBuckets(o) - - bucket := as.buckets[b] - - // new bucket - if bucket == nil { - bucket = as.createBucket(as.initialBucketCapacity) - bucket[0] = o - - as.buckets[b] = bucket - as.n++ - return o - } - - // look for it in bucket - for i := 0; i < len(bucket); i++ { - existing := bucket[i] - if existing == nil { // empty slot; not there, add. - bucket[i] = o - as.n++ - return o - } - - if as.equalsFunction(existing, o) { // found existing, quit - return existing - } - } - - // full bucket, expand and add to end - oldLength := len(bucket) - bucketCopy := make([]Collectable[any], oldLength<<1) - copy(bucketCopy[:oldLength], bucket) - bucket = bucketCopy - as.buckets[b] = bucket - bucket[oldLength] = o - as.n++ - return o -} - -func (as *array2DHashSet) getBuckets(value Collectable[any]) int { - hash := as.hashcodeFunction(value) - return hash & (len(as.buckets) - 1) -} - -func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { - return make([][]Collectable[any], cap) -} - -func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { - return make([]Collectable[any], cap) -} - -func newArray2DHashSetWithCap( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, - initCap int, - initBucketCap int, -) *array2DHashSet { - if hashcodeFunction == nil { - hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - equalsFunction = standardEqualsFunction - } - - ret := &array2DHashSet{ - hashcodeFunction: hashcodeFunction, - equalsFunction: equalsFunction, - - n: 0, - threshold: int(math.Floor(_initalCapacity * _loadFactor)), - - currentPrime: 1, - initialBucketCapacity: initBucketCap, - } - - ret.buckets = ret.createBuckets(initCap) - return ret -} - -func newArray2DHashSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, -) *array2DHashSet { - return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) -} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/constraint/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 000000000..38ea34ff5 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/constraint/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 000000000..a22292eb5 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/README.md b/constraint/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 000000000..03e5b83eb --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 000000000..3bb4fd7c4 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.12.0 - this was confusing, to say the least. + +As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.12.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn.go similarity index 94% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/atn.go index 98010d2e6..cdeefed24 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -20,10 +20,11 @@ var ATNInvalidAltNumber int // [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf // [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... + // all the rules, sub-rules, optional blocks, ()+, ()* etc... DecisionToState []DecisionState // grammarType is the ATN type and is used for deserializing ATNs from strings. @@ -51,6 +52,8 @@ type ATN struct { // specified, and otherwise is nil. ruleToTokenType []int + // ATNStates is a list of all states in the ATN, ordered by state number. + // states []ATNState mu sync.Mutex diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 000000000..a83f25d34 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,335 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed + } + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 000000000..52dbaf806 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go similarity index 86% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go index 3c975ec7b..bdb30b362 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool { func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.readOnly = readOnly } @@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool { func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.verifyATN = verifyATN } @@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.generateRuleBypassTransitions = generateRuleBypassTransitions } +//goland:noinspection GoUnusedExportedFunction func DefaultATNDeserializationOptions() *ATNDeserializationOptions { return NewATNDeserializationOptions(&defaultATNDeserializationOptions) } diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go similarity index 97% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go index 3888856b4..2dcb9ae11 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { return &ATNDeserializer{options: options} } +//goland:noinspection GoUnusedFunction func stringInSlice(a string, list []string) int { for i, b := range list { if b == a { @@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) { } } -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { m := a.readInt() // Preallocate the needed capacity. @@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { bypassStart.endState = bypassStop - atn.defineDecisionState(bypassStart.BaseDecisionState) + atn.defineDecisionState(&bypassStart.BaseDecisionState) bypassStop.startState = bypassStart @@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { continue } - // We analyze the ATN to determine if a ATN decision state is the + // We analyze the [ATN] to determine if an ATN decision state is the // decision for the closure block that determines whether a // precedence rule should continue or complete. if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { @@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int { return int(v) // data is 32 bits but int is at least that big } -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { target := atn.states[trg] switch typeIndex { diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go new file mode 100644 index 000000000..afe6c9f80 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { + if b.sharedContextCache == nil { + return context + } + + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_state.go new file mode 100644 index 000000000..2ae5807cd --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +//goland:noinspection GoUnusedGlobalVariable +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) + as.epsilonOnlyTransitions = false + } + + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} +} + +type BasicState struct { + BaseATNState +} + +func NewBasicState() *BasicState { + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + BaseATNState +} + +func NewRuleStopState() *RuleStopState { + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } +} + +type RuleStartState struct { + BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } +} + +type StarLoopEntryState struct { + BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/atn_type.go similarity index 100% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/atn_type.go diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/char_stream.go similarity index 89% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/char_stream.go index c33f0adb5..bd8127b6b 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -8,5 +8,5 @@ type CharStream interface { IntStream GetText(int, int) string GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string } diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go similarity index 100% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go similarity index 88% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go index c6c9485a2..b75da9df0 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -28,22 +28,24 @@ type CommonTokenStream struct { // trivial with bt field. fetchedEOF bool - // index indexs into tokens of the current token (next token to consume). + // index into [tokens] of the current token (next token to consume). // tokens[p] should be LT(1). It is set to -1 when the stream is first // constructed or when SetTokenSource is called, indicating that the first token // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. + // see the documentation of [IntStream] for a description of initializing methods. index int - // tokenSource is the TokenSource from which tokens for the bt stream are + // tokenSource is the [TokenSource] from which tokens for the bt stream are // fetched. tokenSource TokenSource - // tokens is all tokens fetched from the token source. The list is considered a + // tokens contains all tokens fetched from the token source. The list is considered a // complete view of the input once fetchedEOF is set to true. tokens []Token } +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { return &CommonTokenStream{ channel: channel, @@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { } } +// GetAllTokens returns all tokens currently pulled from the token source. func (c *CommonTokenStream) GetAllTokens() []Token { return c.tokens } @@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int { return 0 } -func (c *CommonTokenStream) Release(marker int) {} +func (c *CommonTokenStream) Release(_ int) {} -func (c *CommonTokenStream) reset() { +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) c.Seek(0) } @@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() { // Sync makes sure index i in tokens has a token and returns true if a token is // located at index i and otherwise false. func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + n := i - len(c.tokens) + 1 // How many more elements do we need? if n > 0 { fetched := c.fetch(n) @@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { c.tokenSource = tokenSource c.tokens = make([]Token, 0) c.index = -1 + c.fetchedEOF = false } // NextTokenOnChannel returns the index of the next token on channel given a // starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { c.Sync(i) if i >= len(c.tokens) { @@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) from := tokenIndex + 1 - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token var to int if nextOnChannel == -1 { @@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int { } func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) } func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { @@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string return c.GetTextFromInterval(interval.GetSourceInterval()) } -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } + c.Sync(interval.Stop) start := interval.Start stop := interval.Stop diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/comparators.go similarity index 82% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/comparators.go index 9ea320053..7467e9b43 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -18,17 +18,20 @@ package antlr // type safety and avoid having to implement this for every type that we want to perform comparison on. // // This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which -// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +// allows us to use it in any collection instance that does not require a special hash or equals implementation. type ObjEqComparator[T Collectable[T]] struct{} var ( - aStateEqInst = &ObjEqComparator[ATNState]{} - aConfEqInst = &ObjEqComparator[ATNConfig]{} - aConfCompInst = &ATNConfigComparator[ATNConfig]{} - atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} dfaStateEqInst = &ObjEqComparator[*DFAState]{} semctxEqInst = &ObjEqComparator[SemanticContext]{} - atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} ) // Equals2 delegates to the Equals() method of type T @@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int { type SemCComparator[T Collectable[T]] struct{} -// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type ATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + hash := 7 hash = 31*hash + o.GetState().GetStateNumber() hash = 31*hash + o.GetAlt() @@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { h := murmurInit(7) h = murmurUpdate(h, o.GetState().GetStateNumber()) h = murmurUpdate(h, o.GetContext().Hash()) return murmurFinish(h, 2) } -// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type BaseATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet -func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just // delegates to the standard Hash() method of the ATNConfig type. -func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { - +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { return o.Hash() } diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 000000000..c2b724514 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa.go similarity index 76% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/dfa.go index bfd43e1f7..6b63eb158 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -4,6 +4,8 @@ package antlr +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. type DFA struct { // atnStartState is the ATN state in which this was created atnStartState DecisionState @@ -12,10 +14,9 @@ type DFA struct { // states is all the DFA states. Use Map to get the old state back; Set can only // indicate whether it is there. Go maps implement key hash collisions and so on and are very - // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them - // to see if they really are the same object. - // + // to see if they really are the same object. Hence, we have our own map storage. // states *JStore[*DFAState, *ObjEqComparator[*DFAState]] @@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA { dfa := &DFA{ atnStartState: atnStartState, decision: decision, - states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), + states: nil, // Lazy initialize } if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { dfa.precedenceDfa = true - dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) dfa.s0.isAcceptState = false dfa.s0.requiresFullContext = false } @@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool { // true or nil otherwise, and d.precedenceDfa is updated. func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { if d.getPrecedenceDfa() != precedenceDfa { - d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) + d.states = nil // Lazy initialize d.numstates = 0 if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) precedenceState.setEdges(make([]*DFAState, 0)) precedenceState.isAcceptState = false precedenceState.requiresFullContext = false @@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { } } +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + func (d *DFA) getS0() *DFAState { return d.s0 } @@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) { d.s0 = s } -// sortedStates returns the states in d sorted by their state number. +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. func (d *DFA) sortedStates() []*DFAState { - + if d.states == nil { + return []*DFAState{} + } vs := d.states.SortedSlice(func(i, j *DFAState) bool { return i.stateNumber < j.stateNumber }) diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go similarity index 97% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go index 84d0a31e5..0e1100989 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -10,7 +10,7 @@ import ( "strings" ) -// DFASerializer is a DFA walker that knows how to dump them to serialized +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized // strings. type DFASerializer struct { dfa *DFA diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go similarity index 81% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go index c90dec55c..654143074 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -22,30 +22,31 @@ func (p *PredPrediction) String() string { return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" } -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, // Ullman p. 117 says: "The DFA uses its state to keep track of all possible // states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the +// after reading input a1, a2,..an, the DFA is in a state that represents the // subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state // as well, however. More importantly, we need to maintain a stack of states, // tracking the closure operations as they jump from rule to rule, emulating // rule invocations (method calls). I have to add a stack to simulate the proper // lookahead sequences for the underlying LL grammar from which the ATN was // derived. // -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules // (if any) followed to arrive at that state. // -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was // reached via a different set of rule invocations. type DFAState struct { stateNumber int - configs ATNConfigSet + configs *ATNConfigSet // edges elements point to the target of the symbol. Shift up by 1 so (-1) // Token.EOF maps to the first element. @@ -53,7 +54,7 @@ type DFAState struct { isAcceptState bool - // prediction is the ttype we match or alt we predict if the state is accept. + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or // requiresFullContext. prediction int @@ -81,9 +82,9 @@ type DFAState struct { predicates []*PredPrediction } -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { if configs == nil { - configs = NewBaseATNConfigSet(false) + configs = NewATNConfigSet(false) } return &DFAState{configs: configs, stateNumber: stateNumber} @@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int { var alts []int if d.configs != nil { - for _, c := range d.configs.GetItems() { + for _, c := range d.configs.configs { alts = append(alts, c.GetAlt()) } } diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go similarity index 92% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go index c55bcc19b..bd2cd8bc3 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -33,6 +33,7 @@ type DiagnosticErrorListener struct { exactOnly bool } +//goland:noinspection GoUnusedExportedFunction func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { n := new(DiagnosticErrorListener) @@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { return n } -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { if d.exactOnly && !exact { return } @@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { msg := "reportAttemptingFullContext d=" + d.getDecisionDescription(recognizer, dfa) + @@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { msg := "reportContextSensitivity d=" + d.getDecisionDescription(recognizer, dfa) + ", input='" + @@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa // @param configs The conflicting or ambiguous configuration set. // @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise // returns the set of alternatives represented in {@code configs}. -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { if ReportedAlts != nil { return ReportedAlts } result := NewBitSet() - for _, c := range set.GetItems() { + for _, c := range set.configs { result.add(c.GetAlt()) } diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/error_listener.go new file mode 100644 index 000000000..21a021643 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +//goland:noinspection GoUnusedExportedFunction +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: +// +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go new file mode 100644 index 000000000..9db2be1c7 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -0,0 +1,702 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseam. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.GetErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. +// +// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: +// +// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} +// +// At the start of a sub-rule upon error, Sync performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, +// +// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. +// +// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. +// +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule: +// +// classfunc : 'class' ID '{' member* '}' +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + recognizer.SetError(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. +// +// See also [ReportError] +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportUnwantedToken is called to report a syntax error that requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. +// +// This method is called when singleTokenDeletion identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling +// [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. +// +// This method is called when singleTokenInsertion identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// The RecoverInline default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI +// +// # EXTRA TOKEN (single token deletion) +// +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the LA(2) token) as the successful result of the Match operation. +// +// # This recovery strategy is implemented by singleTokenDeletion +// +// # MISSING TOKEN (single token insertion) +// +// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. +// +// This recovery strategy is implemented by [SingleTokenInsertion]. +// +// # Example +// +// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: +// +// stat → expr → atom +// +// and it will be trying to Match the ')' at this point in the +// derivation: +// +// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ +// +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil +} + +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce this behavior.

+// +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// recognizer will not be in error recovery mode since the +// returned token was a successful Match. +// +// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. +// +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// GetMissingSymbol conjures up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// this token is missing, and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO: matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a new type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// GetErrorRecoverySet computes the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack. This amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # Example +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r or any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; +// +// b : c '^' INT +// ; +// +// c : ID +// | INT +// ; +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input “[]”, the call chain is +// +// a → b → c +// +// and, hence, the follow context stack is: +// +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets - the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. +// +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs +// +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors +// by immediately canceling the parse operation with a +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +// This error strategy is useful in the following scenarios. +// +// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. +// +// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. +// +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +//goland:noinspection GoUnusedExportedFunction +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly +} + +// RecoverInline makes sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/errors.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/errors.go new file mode 100644 index 000000000..8f0f2f601 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -0,0 +1,259 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + + // The current Token when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + // + t.offendingToken = nil + + // Get the ATN state number the parser was in at the time the error + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. +// +// If the set of expected tokens is not known and could not be computed, +// this method returns nil. +// +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs *ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs *ATNConfigSet +} + +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1) + n.deadEndConfigs = deadEndConfigs + + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/file_stream.go new file mode 100644 index 000000000..5f65f809b --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + InputStream + filename string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFileStream(fileName string) (*FileStream, error) { + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } + + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + + // All done. + // + return fs, nil +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 000000000..b737fe85f --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/int_stream.go similarity index 100% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/int_stream.go diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/interval_set.go similarity index 82% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/interval_set.go index c1e155e81..cc5066067 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -14,20 +14,21 @@ type Interval struct { Stop int } -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } } -func (i *Interval) Contains(item int) bool { +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { return item >= i.Start && item < i.Stop } -func (i *Interval) String() string { +// String generates a string representation of the interval. +func (i Interval) String() string { if i.Start == i.Stop-1 { return strconv.Itoa(i.Start) } @@ -35,15 +36,18 @@ func (i *Interval) String() string { return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } -func (i *Interval) length() int { +// Length returns the length of the interval. +func (i Interval) Length() int { return i.Stop - i.Start } +// IntervalSet represents a collection of [Intervals], which may be read-only. type IntervalSet struct { - intervals []*Interval + intervals []Interval readOnly bool } +// NewIntervalSet creates a new empty, writable, interval set. func NewIntervalSet() *IntervalSet { i := new(IntervalSet) @@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet { return i } +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + func (i *IntervalSet) first() int { if len(i.intervals) == 0 { return TokenInvalidType @@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) { i.addInterval(NewInterval(l, h+1)) } -func (i *IntervalSet) addInterval(v *Interval) { +func (i *IntervalSet) addInterval(v Interval) { if i.intervals == nil { - i.intervals = make([]*Interval, 0) + i.intervals = make([]Interval, 0) i.intervals = append(i.intervals, v) } else { // find insert pos for k, interval := range i.intervals { // distinct range -> insert if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) return } else if v.Stop == interval.Start { i.intervals[k].Start = v.Start @@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool { } func (i *IntervalSet) length() int { - len := 0 + iLen := 0 for _, v := range i.intervals { - len += v.length() + iLen += v.Length() } - return len + return iLen } -func (i *IntervalSet) removeRange(v *Interval) { +func (i *IntervalSet) removeRange(v Interval) { if v.Start == v.Stop-1 { i.removeOne(v.Start) } else if i.intervals != nil { @@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) { i.intervals[k] = NewInterval(ni.Start, v.Start) x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) @@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) { x := NewInterval(ki.Start, v) ki.Start = v + 1 // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } } @@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } -func (i *IntervalSet) GetIntervals() []*Interval { +func (i *IntervalSet) GetIntervals() []Interval { return i.intervals } diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 000000000..ceccd96d2 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,685 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" + "sync" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock sync.RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: sync.RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer.go similarity index 78% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/lexer.go index 6533f0516..3c7896a91 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer { // create a single token. NextToken will return l object after // Matching lexer rule(s). If you subclass to allow multiple token // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not + // something non nil so that the auto token emit mechanism will not // emit another token. lexer.token = nil @@ -111,6 +111,7 @@ const ( LexerSkip = -3 ) +//goland:noinspection GoUnusedConst const ( LexerDefaultTokenChannel = TokenDefaultChannel LexerHidden = TokenHiddenChannel @@ -118,7 +119,7 @@ const ( LexerMaxCharValue = 0x10FFFF ) -func (b *BaseLexer) reset() { +func (b *BaseLexer) Reset() { // wack Lexer state variables if b.input != nil { b.input.Seek(0) // rewind the input @@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) { return b.Interpreter.Match(b.input, b.mode) } -// Return a token from l source i.e., Match a token on the char stream. +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. func (b *BaseLexer) NextToken() Token { if b.input == nil { panic("NextToken requires a non-nil input stream.") @@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token { continueOuter := false for { b.thetype = TokenInvalidType - ttype := LexerSkip - ttype = b.safeMatch() + ttype := b.safeMatch() if b.input.LA(1) == TokenEOF { b.hitEOF = true @@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token { } } -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that // if token==nil at end of any token rule, it creates one for you // and emits it. -// / func (b *BaseLexer) Skip() { b.thetype = LexerSkip } @@ -248,23 +247,29 @@ func (b *BaseLexer) More() { b.thetype = LexerMore } +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. func (b *BaseLexer) SetMode(m int) { b.mode = m } +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("pushMode " + strconv.Itoa(m)) } b.modeStack.Push(b.mode) b.mode = m } +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. func (b *BaseLexer) PopMode() int { if len(b.modeStack) == 0 { panic("Empty Stack") } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) } i, _ := b.modeStack.Pop() @@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream { func (b *BaseLexer) SetInputStream(input CharStream) { b.input = nil b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() + b.Reset() b.input = input b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} } @@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { return b.tokenFactorySourcePair } -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). func (b *BaseLexer) EmitToken(token Token) { b.token = token } -// The standard method called to automatically emit a token at the +// Emit is the standard method called to automatically emit a token at the // outermost lexical rule. The token object should point into the // char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. // / func (b *BaseLexer) Emit() Token { t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) @@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token { return t } +// EmitEOF emits an EOF token. By default, this is the last token emitted func (b *BaseLexer) EmitEOF() Token { cpos := b.GetCharPositionInLine() lpos := b.GetLine() @@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token { return eof } +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. func (b *BaseLexer) GetCharPositionInLine() int { return b.Interpreter.GetCharPositionInLine() } @@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) { b.thetype = t } -// What is the index of the current character of lookahead?/// +// GetCharIndex returns the index of the current character of lookahead func (b *BaseLexer) GetCharIndex() int { return b.input.Index() } -// Return the text Matched so far for the current token or any text override. -// Set the complete text of l token it wipes any previous changes to the text. +// GetText returns the text Matched so far for the current token or any text override. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text @@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string { return b.Interpreter.GetText(b.input) } +// SetText sets the complete text of this token; it wipes any previous changes to the text. func (b *BaseLexer) SetText(text string) { b.text = text } +// GetATN returns the ATN used by the lexer. func (b *BaseLexer) GetATN() *ATN { return b.Interpreter.ATN() } -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. func (b *BaseLexer) GetAllTokens() []Token { vl := b.Virt tokens := make([]Token, 0) @@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string { return "'" + b.getErrorDisplayForChar(c) + "'" } -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope // it all works out. You can instead use the rule invocation stack // to do sophisticated error recovery if you are in a fragment rule. -// / +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. func (b *BaseLexer) Recover(re RecognitionException) { if b.input.LA(1) != TokenEOF { if _, ok := re.(*LexerNoViableAltException); ok { diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go similarity index 78% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go index 111656c29..eaa7393e0 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -7,14 +7,29 @@ package antlr import "strconv" const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 ) type LexerAction interface { @@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction { return la } -func (b *BaseLexerAction) execute(lexer Lexer) { +func (b *BaseLexerAction) execute(_ Lexer) { panic("Not implemented") } @@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { } func (b *BaseLexerAction) Hash() int { - return b.actionType + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) } func (b *BaseLexerAction) Equals(other LexerAction) bool { - return b == other + return b.actionType == other.getActionType() } -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. // -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. type LexerSkipAction struct { *BaseLexerAction } @@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } +// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction - mode int } @@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. // -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] type LexerPopModeAction struct { *BaseLexerAction } @@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with // the assigned mode. type LexerModeAction struct { *BaseLexerAction - mode int } @@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. type LexerChannelAction struct { *BaseLexerAction - channel int } +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}.

-// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction - offset int lexerAction LexerAction isPositionDependent bool } +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go new file mode 100644 index 000000000..dfc28c32b --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) + + return l +} + +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// for position-dependent lexer actions. +// +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters. +// +// Prior to traversing a Match transition in the [ATN], the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the [DFA] representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. +// +// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. +// +// The offset is assigned to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// The func returns a [LexerActionExecutor] that stores input stream offsets +// for all position-dependent lexer actions. +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go index c573b7521..fe938b025 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -10,10 +10,8 @@ import ( "strings" ) +//goland:noinspection GoUnusedGlobalVariable var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -32,11 +30,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator recog Lexer predictionMode int - mergeCache DoubleDict + mergeCache *JPCMap2 startIndex int Line int CharPositionInLine int @@ -46,27 +44,35 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } l.decisionToDFA = decisionToDFA l.recog = recog + // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - // line number 1..n within the input/// + + // line number 1..n within the input l.Line = 1 + // The index of the character relative to the beginning of the line - // 0..n-1/// + // 0..n-1 l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - // done + return l } @@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-length tokens + // allow zero-Length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state + s = target // flip current DFA target becomes new src/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { + if runtimeConfig.lexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. // -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + l.getReachableConfigSet(input, s.configs, reach, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. + // cause a fail-over from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) + return l.addDFAEdge(s, t, nil, reach) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule + // than a runtimeConfig that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { continue } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + lexerActionExecutor := cfg.lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord return configs } -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") } _, ok := config.state.(*RuleStopState) if ok { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { - var cfg *LexerATNConfig + var cfg *ATNConfig if trans.getSerializationType() == TransitionRULE { @@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC pt := trans.(*PredicateTransition) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.SetHasSemanticContext(true) + configs.hasSemanticContext = true if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be + // isEmpty() is false. In this case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC return cfg } -// Evaluate a predicate specified in the lexer. +// evaluatePredicate eEvaluates a predicate specified in the lexer. // -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

+// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. // -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// The func returns true if the specified predicate evaluates to true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false to = l.addDFAState(cfgs, true) if suppressEdge { @@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { + var firstConfigWithRuleStopState *ATNConfig + for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) + existing, present := dfa.Get(proposed) if present { // This state was already present, so just return it. @@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) // We need to add the new state // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now proposed.configs = configs - dfa.states.Put(proposed) + dfa.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText returns the text [Match]ed so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go new file mode 100644 index 000000000..4955ac876 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,218 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + + look[alt] = NewIntervalSet() + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. +// +// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. +// +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context +// should be ignored +// +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + var lookContext *PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx.pcType != PredictionContextEmpty { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 000000000..923c7b52c --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/parser.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser.go new file mode 100644 index 000000000..fb57ac15d --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -0,0 +1,700 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + + // The ParserRuleContext object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + + // Specifies whether the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + + // The list of ParseTreeListener listeners registered to receive + // events during the parse. + p.parseListeners = nil + + // The number of syntax errors Reported during parsing. p.value is + // incremented each time NotifyErrorListeners is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// This field maps from the serialized ATN string to the deserialized [ATN] with +// bypass alternatives. +// +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// AddParseListener registers listener to receive events during the parsing process. +// +// To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted. +// +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. +// +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// RemoveParseListener removes listener from the list of parse listeners. +// +// If listener is nil or has not been added as a parse +// listener, this func does nothing. +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it +// lazily. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO - Implement this? + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +//goland:noinspection GoUnusedParameter +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// SetTokenStream installs input as the token stream and resets the parser. +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have a new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + _, _ = p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +//goland:noinspection GoUnusedParameter +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +// return getExpectedTokens().contains(symbol) +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], +// respectively. +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// GetRuleInvocationStack returns a list of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// GetDFAStrings returns a list of all DFA states used for debugging purposes +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// DumpDFA prints the whole of the DFA for debugging +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go new file mode 100644 index 000000000..ae2869692 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -0,0 +1,1668 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var () + +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} + +type ParserATNSimulator struct { + BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *JPCMap + outerContext ParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// +// We also have some key operations to do: +// +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + p.parser.SetError(e) + return ATNInvalidAltNumber, e + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.conflictingAlts + if D.predicates != nil { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue(), nil + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction, nil + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) + case 1: + return alts.minValue(), nil + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue(), nil + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create new target state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if runtimeConfig.parserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.uniqueAlt = predictedAlt + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = p.getConflictingAlts(reach) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.conflictingAlts.minValue()) + } + if D.isAcceptState && D.configs.hasSemanticContext { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach *ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + return alt, p.noViableAlt(input, outerContext, previous, startIndex) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.uniqueAlt = p.getUniqueAlt(reach) + // unique prediction? + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.uniqueAlt != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt, nil + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have non-conflicting alt subsets as in: + // + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + // + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt, nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { + if p.mergeCache == nil { + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") + } + intermediate := NewATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*ATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach *ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet this condition whether it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + + if len(reach.configs) == 0 { + return nil + } + + return reach +} + +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. +// +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions. +// +// When lookToEndOfRule is true, this method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// configs. +// +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' +// +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// statement. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to prog, and then back in to statement +// from being eliminated by the filter. +// +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { + + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) + + for _, config := range configs.configs { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.configs { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.configs { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // unambiguous alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // un-predicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. +// +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// [ParserRuleContext] returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location. +// +// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. +// +// In some scenarios, the algorithm described above could predict an +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +// +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// Teh func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.configs) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.configs { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 *ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) + + for _, c := range configs.configs { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []*ATNConfigSet{succeeded, failed} +} + +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } + } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if c != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + newDepth-- + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewATNConfig4(config, t.getTarget()) +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the runtimeConfig sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. +// +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state config-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.uniqueAlt != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.uniqueAlt) + } else { + conflictingAlts = configs.conflictingAlts + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.configs { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if runtimeConfig.parserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. +// +// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + + existing, present := dfa.Get(d) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. + // + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil + } + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go similarity index 77% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go index 1c8cee747..c249bc138 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -31,7 +31,9 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - *BaseRuleContext + parentCtx RuleContext + invokingState int + RuleIndex int start, stop Token exception RecognitionException @@ -40,8 +42,22 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - - return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { } -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { } -// * Does not set parent link other add methods do that/// +// * Does not set parent link other add methods do that func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 000000000..c1b80cc1f --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 000000000..25dfb11e8 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 000000000..3f85a6a52 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/recognizer.go new file mode 100644 index 000000000..2e0b504fb --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string + SynErr RecognitionException +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +//goland:noinspection GoUnusedGlobalVariable +var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.12.0" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// GetRuleIndexMap Get a map from rule names to rule indexes. +// +// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// GetTokenErrorDisplay shows how a token should be displayed in an error message. +// +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { + return true +} + +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { + return true +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 000000000..f2ad04793 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go similarity index 92% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go index a702e99de..68cb9061e 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -9,14 +9,13 @@ import ( "strconv" ) -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// SemanticContext is a tree structure used to record the semantic context in which // -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

+// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. // - +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { return p } @@ -198,7 +197,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) and := new(AND) and.opnds = opnds @@ -316,12 +313,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(a.opnds)) + return murmurFinish(h, len(o.opnds)) } func (a *AND) String() string { @@ -349,7 +346,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) o := new(OR) o.opnds = opnds diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 000000000..70c0673a0 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,281 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "sync" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock sync.RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 000000000..4d9eb94e5 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/token.go similarity index 86% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/token.go index f73b06bc6..9670efb82 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -35,6 +35,8 @@ type Token interface { GetTokenSource() TokenSource GetInputStream() CharStream + + String() string } type BaseToken struct { @@ -53,7 +55,7 @@ type BaseToken struct { const ( TokenInvalidType = 0 - // During lookahead operations, this "token" signifies we hit rule end ATN state + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state // and did not follow it despite needing to. TokenEpsilon = -2 @@ -61,15 +63,16 @@ const ( TokenEOF = -1 - // All tokens go to the parser (unless Skip() is called in that rule) + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. - TokenDefaultChannel = 0 - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. TokenHiddenChannel = 1 ) @@ -118,21 +121,22 @@ func (b *BaseToken) GetInputStream() CharStream { } type CommonToken struct { - *BaseToken + BaseToken } func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - t := new(CommonToken) - - t.BaseToken = new(BaseToken) + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 if t.source.tokenSource != nil { t.line = source.tokenSource.GetLine() t.column = source.tokenSource.GetCharPositionInLine() diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/token_source.go similarity index 100% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/token_source.go diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/token_stream.go similarity index 90% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/token_stream.go index 1527d43f6..bf4ff6633 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -8,13 +8,14 @@ type TokenStream interface { IntStream LT(k int) Token + Reset() Get(index int) Token GetTokenSource() TokenSource SetTokenSource(TokenSource) GetAllText() string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string } diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 000000000..ccf59b465 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,662 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instructionIndex int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + opName string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.opName, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), + }, + lastRewriteTokenIndexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] + if ok { + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) +} + +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, ProgramInitSize) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// GetTextDefault returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + DefaultProgramName, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// GetText returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: +// +// I.i.u I.j.v leave alone, non-overlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// The func returns a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instructionIndex] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instructionIndex] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instructionIndex] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instructionIndex] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instructionIndex] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go similarity index 100% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/transition.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/transition.go new file mode 100644 index 000000000..313b0fc12 --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -0,0 +1,439 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(_, _, _ int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +//goland:noinspection GoUnusedGlobalVariable +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// AtomTransition +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } + t.intervalSet = t.makeLabel() + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, _, _ int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + BaseTransition + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } +} + +func (t *RuleTransition) Matches(_, _, _ int) bool { + return false +} + +type EpsilonTransition struct { + BaseTransition + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } +} + +func (t *EpsilonTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + BaseTransition + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, _, _ int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + BaseAbstractPredicateTransition + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } +} + +func (t *PredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + BaseTransition + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } +} + +func (t *ActionTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } + + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + return t +} + +func (t *SetTransition) Matches(symbol, _, _ int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + BaseAbstractPredicateTransition + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } +} + +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/constraint/vendor/github.com/antlr4-go/antlr/v4/tree.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/tree.go new file mode 100644 index 000000000..c288420fb --- /dev/null +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + GetSourceInterval() Interval +} + +type ParseTree interface { + SyntaxTree + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + GetRuleContext() RuleContext +} + +type TerminalNode interface { + ParseTree + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } + +// TODO: Implement this? +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(_ int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +//goland:noinspection GoUnusedGlobalVariable +var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/trees.go similarity index 81% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/trees.go index d7dbb0322..f44c05d81 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -8,10 +8,8 @@ import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { if recog != nil { @@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { } for i := 1; i < c; i++ { s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) + res += " " + s } res += ")" return res @@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { } } - // no recog for rule names + // no recognition for rule names payload := t.GetPayload() if p2, ok := payload.(Token); ok { return p2.GetText() @@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { return fmt.Sprint(t.GetPayload()) } -// Return ordered list of all children of this node +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction func TreesGetChildren(t Tree) []Tree { list := make([]Tree, 0) for i := 0; i < t.GetChildCount(); i++ { @@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree { return list } -// Return a list of all ancestors of this node. The first node of +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. // -// list is the root and the last is the parent of this node. +//goland:noinspection GoUnusedExportedFunction func TreesgetAncestors(t Tree) []Tree { ancestors := make([]Tree, 0) t = t.GetParent() @@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree { return ancestors } +//goland:noinspection GoUnusedExportedFunction func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { return TreesfindAllNodes(t, ttype, true) } +//goland:noinspection GoUnusedExportedFunction func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { return TreesfindAllNodes(t, ruleIndex, false) } @@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr } } +//goland:noinspection GoUnusedExportedFunction func TreesDescendants(t ParseTree) []ParseTree { nodes := []ParseTree{t} for i := 0; i < t.GetChildCount(); i++ { diff --git a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/constraint/vendor/github.com/antlr4-go/antlr/v4/utils.go similarity index 85% rename from constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go rename to constraint/vendor/github.com/antlr4-go/antlr/v4/utils.go index 9fad5d916..733d7df9d 100644 --- a/constraint/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go +++ b/constraint/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -9,8 +9,10 @@ import ( "errors" "fmt" "math/bits" + "os" "strconv" "strings" + "syscall" ) func intMin(a, b int) int { @@ -31,7 +33,7 @@ func intMax(a, b int) int { type IntStack []int -var ErrEmptyStack = errors.New("Stack is empty") +var ErrEmptyStack = errors.New("stack is empty") func (s *IntStack) Pop() (int, error) { l := len(*s) - 1 @@ -47,33 +49,13 @@ func (s *IntStack) Push(e int) { *s = append(*s, e) } -type comparable interface { - Equals(other Collectable[any]) bool -} - -func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - - return a.Equals(b) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.Hash() - } - - panic("Not Hasher") -} - -type hasher interface { - Hash() int -} - const bitsPerWord = 64 func indexForBit(bit int) int { return bit / bitsPerWord } +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction func wordForBit(data []uint64, bit int) uint64 { idx := indexForBit(bit) if idx >= len(data) { @@ -94,6 +76,8 @@ type BitSet struct { data []uint64 } +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet func NewBitSet() *BitSet { return &BitSet{} } @@ -123,7 +107,7 @@ func (b *BitSet) or(set *BitSet) { setLen := set.minLen() maxLen := intMax(bLen, setLen) if maxLen > len(b.data) { - // Increase the size of len(b.data) to repesent the bits in both sets. + // Increase the size of len(b.data) to represent the bits in both sets. data := make([]uint64, maxLen) copy(data, b.data) b.data = data @@ -246,37 +230,6 @@ func (a *AltDict) values() []interface{} { return vs } -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - func EscapeWhitespace(s string, escapeSpaces bool) string { s = strings.Replace(s, "\t", "\\t", -1) @@ -288,6 +241,7 @@ func EscapeWhitespace(s string, escapeSpaces bool) string { return s } +//goland:noinspection GoUnusedExportedFunction func TerminalNodeToStringArray(sa []TerminalNode) []string { st := make([]string, len(sa)) @@ -298,6 +252,7 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string { return st } +//goland:noinspection GoUnusedExportedFunction func PrintArrayJavaStyle(sa []string) string { var buffer bytes.Buffer @@ -350,3 +305,24 @@ func murmurFinish(h int, numberOfWords int) int { return int(hash) } + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/constraint/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig b/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/constraint/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes b/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/constraint/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore b/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/constraint/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/constraint/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/constraint/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/constraint/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/constraint/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/constraint/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/backend_other.go b/constraint/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/constraint/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/constraint/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/constraint/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/constraint/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/constraint/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/constraint/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/constraint/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/constraint/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/constraint/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/constraint/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/.gitignore b/constraint/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 000000000..f1c181ec9 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/constraint/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 000000000..38cb9ae10 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/constraint/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..c794b2b0c --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/constraint/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 000000000..de0965e12 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/LICENSE b/constraint/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 000000000..eaa850492 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/README.md b/constraint/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 000000000..af0a79507 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/constraint/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 000000000..9c05146d1 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/constraint/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 000000000..823bff12c --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/cache.go b/constraint/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 000000000..ea0f39e24 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/common.go b/constraint/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 000000000..ec038a49e --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/decode.go b/constraint/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 000000000..85842ac73 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/constraint/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 000000000..44afb8660 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/doc.go b/constraint/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 000000000..23f68b984 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/encode.go b/constraint/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 000000000..6508e291d --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 000000000..8b4b4bbc5 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 000000000..31c39336d --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/constraint/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 000000000..de175cee4 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/stream.go b/constraint/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 000000000..507ab6c18 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/structfields.go b/constraint/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 000000000..81228acf0 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/tag.go b/constraint/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 000000000..5c4d2b7a4 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/constraint/vendor/github.com/fxamacker/cbor/v2/valid.go b/constraint/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 000000000..b40793b95 --- /dev/null +++ b/constraint/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/constraint/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/constraint/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 000000000..22f8d21cc --- /dev/null +++ b/constraint/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/constraint/vendor/github.com/go-openapi/jsonpointer/README.md b/constraint/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff..0108f1d57 100644 --- a/constraint/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/constraint/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/constraint/vendor/github.com/go-openapi/jsonpointer/pointer.go b/constraint/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853de..d970c7cf4 100644 --- a/constraint/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/constraint/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -363,6 +382,128 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / @@ -377,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/constraint/vendor/github.com/go-openapi/swag/.gitignore b/constraint/vendor/github.com/go-openapi/swag/.gitignore index d69b53acc..c4b1b64f0 100644 --- a/constraint/vendor/github.com/go-openapi/swag/.gitignore +++ b/constraint/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/constraint/vendor/github.com/go-openapi/swag/.golangci.yml b/constraint/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e400..80e2be004 100644 --- a/constraint/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/constraint/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/constraint/vendor/github.com/go-openapi/swag/BENCHMARK.md b/constraint/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 000000000..e7f28ed6b --- /dev/null +++ b/constraint/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/constraint/vendor/github.com/go-openapi/swag/README.md b/constraint/vendor/github.com/go-openapi/swag/README.md index 217f6fa50..a72922299 100644 --- a/constraint/vendor/github.com/go-openapi/swag/README.md +++ b/constraint/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/constraint/vendor/github.com/go-openapi/swag/initialism_index.go b/constraint/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 000000000..20a359bb6 --- /dev/null +++ b/constraint/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/constraint/vendor/github.com/go-openapi/swag/loading.go b/constraint/vendor/github.com/go-openapi/swag/loading.go index 00038c377..783442fdd 100644 --- a/constraint/vendor/github.com/go-openapi/swag/loading.go +++ b/constraint/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/constraint/vendor/github.com/go-openapi/swag/name_lexem.go b/constraint/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb..8bb64ac32 100644 --- a/constraint/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/constraint/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/constraint/vendor/github.com/go-openapi/swag/post_go18.go b/constraint/vendor/github.com/go-openapi/swag/post_go18.go deleted file mode 100644 index f5228b82c..000000000 --- a/constraint/vendor/github.com/go-openapi/swag/post_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/constraint/vendor/github.com/go-openapi/swag/post_go19.go b/constraint/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c08..000000000 --- a/constraint/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/constraint/vendor/github.com/go-openapi/swag/pre_go18.go b/constraint/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 2757d9b95..000000000 --- a/constraint/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.8 -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/constraint/vendor/github.com/go-openapi/swag/pre_go19.go b/constraint/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377..000000000 --- a/constraint/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/constraint/vendor/github.com/go-openapi/swag/split.go b/constraint/vendor/github.com/go-openapi/swag/split.go index a1825fb7d..274727a86 100644 --- a/constraint/vendor/github.com/go-openapi/swag/split.go +++ b/constraint/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/constraint/vendor/github.com/go-openapi/swag/string_bytes.go b/constraint/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 000000000..90745d5ca --- /dev/null +++ b/constraint/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/constraint/vendor/github.com/go-openapi/swag/util.go b/constraint/vendor/github.com/go-openapi/swag/util.go index f78ab684a..5051401c4 100644 --- a/constraint/vendor/github.com/go-openapi/swag/util.go +++ b/constraint/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() - result := "" - for _, lexem := range lexems { + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } + + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -341,13 +321,22 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -358,24 +347,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() } // CommandLineOptionsGroup represents a group of user-defined command line options diff --git a/constraint/vendor/github.com/go-openapi/swag/yaml.go b/constraint/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f..f59e02593 100644 --- a/constraint/vendor/github.com/go-openapi/swag/yaml.go +++ b/constraint/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/constraint/vendor/github.com/golang/glog/glog.go b/constraint/vendor/github.com/golang/glog/glog.go index 8c00e737a..1b632e077 100644 --- a/constraint/vendor/github.com/golang/glog/glog.go +++ b/constraint/vendor/github.com/golang/glog/glog.go @@ -76,7 +76,7 @@ // -log_backtrace_at=gopherflakes.go:234 // A stack trace will be written to the Info log whenever execution // hits one of these statements. (Unlike with -vmodule, the ".go" -// must bepresent.) +// must be present.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" diff --git a/constraint/vendor/github.com/golang/glog/glog_file.go b/constraint/vendor/github.com/golang/glog/glog_file.go index a1551dbc8..b54bd4052 100644 --- a/constraint/vendor/github.com/golang/glog/glog_file.go +++ b/constraint/vendor/github.com/golang/glog/glog_file.go @@ -26,7 +26,6 @@ import ( "fmt" "io" "os" - "os/user" "path/filepath" "runtime" "strings" @@ -68,9 +67,8 @@ func init() { host = shortHostname(h) } - current, err := user.Current() - if err == nil { - userName = current.Username + if u := lookupUser(); u != "" { + userName = u } // Sanitize userName since it is used to construct file paths. userName = strings.Map(func(r rune) rune { @@ -118,32 +116,53 @@ var onceLogDirs sync.Once // contains tag ("INFO", "FATAL", etc.) and t. If the file is created // successfully, create also attempts to update the symlink for that tag, ignoring // errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { +func create(tag string, t time.Time, dir string) (f *os.File, filename string, err error) { + if dir != "" { + f, name, err := createInDir(dir, tag, t) + if err == nil { + return f, name, err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", err) + } + onceLogDirs.Do(createLogDirs) if len(logDirs) == 0 { return nil, "", errors.New("log: no log dirs") } - name, link := logName(tag, t) var lastErr error for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) + f, name, err := createInDir(dir, tag, t) if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - if *logLink != "" { - lsymlink := filepath.Join(*logLink, link) - os.Remove(lsymlink) // ignore err - os.Symlink(fname, lsymlink) // ignore err - } - return f, fname, nil + return f, name, err } lastErr = err } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } +func createInDir(dir, tag string, t time.Time) (f *os.File, name string, err error) { + name, link := logName(tag, t) + fname := filepath.Join(dir, name) + // O_EXCL is important here, as it prevents a vulnerability. The general idea is that logs often + // live in an insecure directory (like /tmp), so an unprivileged attacker could create fname in + // advance as a symlink to a file the logging process can access, but the attacker cannot. O_EXCL + // fails the open if it already exists, thus prevent our this code from opening the existing file + // the attacker points us to. + f, err = os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + if *logLink != "" { + lsymlink := filepath.Join(*logLink, link) + os.Remove(lsymlink) // ignore err + os.Symlink(fname, lsymlink) // ignore err + } + return f, fname, nil + } + return nil, "", err +} + // flushSyncWriter is the interface satisfied by logging destinations. type flushSyncWriter interface { Flush() error @@ -160,7 +179,10 @@ var sinks struct { func init() { // Register stderr first: that way if we crash during file-writing at least // the log will have gone somewhere. - logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + if shouldRegisterStderrSink() { + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr) + } + logsink.TextSinks = append(logsink.TextSinks, &sinks.file) sinks.file.flushChan = make(chan logsink.Severity, 1) go sinks.file.flushDaemon() @@ -247,6 +269,7 @@ type syncBuffer struct { names []string sev logsink.Severity nbytes uint64 // The number of bytes written to this file + madeAt time.Time } func (sb *syncBuffer) Sync() error { @@ -254,9 +277,14 @@ func (sb *syncBuffer) Sync() error { } func (sb *syncBuffer) Write(p []byte) (n int, err error) { + // Rotate the file if it is too large, but ensure we only do so, + // if rotate doesn't create a conflicting filename. if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - return 0, err + now := timeNow() + if now.After(sb.madeAt.Add(1*time.Second)) || now.Second() != sb.madeAt.Second() { + if err := sb.rotateFile(now); err != nil { + return 0, err + } } } n, err = sb.Writer.Write(p) @@ -274,7 +302,8 @@ const footer = "\nCONTINUED IN NEXT FILE\n" func (sb *syncBuffer) rotateFile(now time.Time) error { var err error pn := "" - file, name, err := create(sb.sev.String(), now) + file, name, err := create(sb.sev.String(), now, "") + sb.madeAt = now if sb.file != nil { // The current log file becomes the previous log at the end of diff --git a/constraint/vendor/github.com/golang/glog/glog_file_nonwindows.go b/constraint/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 000000000..a0089ba4a --- /dev/null +++ b/constraint/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,19 @@ +//go:build !windows + +package glog + +import "os/user" + +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this always returns true on non-Windows platforms, as it specifically checks for a +// condition that is only present on Windows. +func shouldRegisterStderrSink() bool { + return true +} + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/constraint/vendor/github.com/golang/glog/glog_file_windows.go b/constraint/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 000000000..2f032e19b --- /dev/null +++ b/constraint/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,43 @@ +//go:build windows + +package glog + +import ( + "os" + "syscall" +) + +// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr. +// Today, this checks if stderr is "valid", in that it maps to a non-NULL Handle. +// Windows Services are spawned without Stdout and Stderr, so any attempt to use them equates to +// referencing an invalid file Handle. +// os.Stderr's FD is derived from a call to `syscall.GetStdHandle(syscall.STD_ERROR_HANDLE)`. +// Documentation[1] for the GetStdHandle function indicates the return value may be NULL if the +// application lacks the standard handle, so consider Stderr valid if its FD is non-NULL. +// [1]: https://learn.microsoft.com/en-us/windows/console/getstdhandle +func shouldRegisterStderrSink() bool { + return os.Stderr.Fd() != 0 +} + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/constraint/vendor/github.com/google/cel-go/cel/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/cel/BUILD.bazel index 0905f6353..81549fb4c 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -10,9 +10,12 @@ go_library( "cel.go", "decls.go", "env.go", + "folding.go", "io.go", + "inlining.go", "library.go", "macro.go", + "optimizer.go", "options.go", "program.go", "validator.go", @@ -36,6 +39,7 @@ go_library( "//common/types/traits:go_default_library", "//interpreter:go_default_library", "//parser:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protodesc:go_default_library", @@ -56,7 +60,11 @@ go_test( "cel_test.go", "decls_test.go", "env_test.go", + "folding_test.go", "io_test.go", + "inlining_test.go", + "optimizer_test.go", + "validator_test.go", ], data = [ "//cel/testdata:gen_test_fds", @@ -70,10 +78,10 @@ go_test( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", + "//ext:go_default_library", "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//encoding/prototext:go_default_library", diff --git a/constraint/vendor/github.com/google/cel-go/cel/decls.go b/constraint/vendor/github.com/google/cel-go/cel/decls.go index 0f9501341..418806021 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/decls.go +++ b/constraint/vendor/github.com/google/cel-go/cel/decls.go @@ -23,6 +23,7 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -312,20 +313,34 @@ func ExprTypeToType(t *exprpb.Type) (*Type, error) { // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { + return AlphaProtoAsDeclaration(d) +} + +// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption. +func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) { + canonical := &celpb.Decl{} + if err := convertProto(d, canonical); err != nil { + return nil, err + } + return ProtoAsDeclaration(canonical) +} + +// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption. +func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { switch d.GetDeclKind().(type) { - case *exprpb.Decl_Function: + case *celpb.Decl_Function: overloads := d.GetFunction().GetOverloads() opts := make([]FunctionOpt, len(overloads)) for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { - a, err := types.ExprTypeToType(p) + a, err := types.ProtoAsType(p) if err != nil { return nil, err } args[j] = a } - res, err := types.ExprTypeToType(o.GetResultType()) + res, err := types.ProtoAsType(o.GetResultType()) if err != nil { return nil, err } @@ -336,15 +351,15 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { } } return Function(d.GetName(), opts...), nil - case *exprpb.Decl_Ident: - t, err := types.ExprTypeToType(d.GetIdent().GetType()) + case *celpb.Decl_Ident: + t, err := types.ProtoAsType(d.GetIdent().GetType()) if err != nil { return nil, err } if d.GetIdent().GetValue() == nil { return Variable(d.GetName(), t), nil } - val, err := ast.ConstantToVal(d.GetIdent().GetValue()) + val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue()) if err != nil { return nil, err } @@ -353,43 +368,3 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { return nil, fmt.Errorf("unsupported decl: %v", d) } } - -func typeValueToKind(tv ref.Type) (Kind, error) { - switch tv { - case types.BoolType: - return BoolKind, nil - case types.DoubleType: - return DoubleKind, nil - case types.IntType: - return IntKind, nil - case types.UintType: - return UintKind, nil - case types.ListType: - return ListKind, nil - case types.MapType: - return MapKind, nil - case types.StringType: - return StringKind, nil - case types.BytesType: - return BytesKind, nil - case types.DurationType: - return DurationKind, nil - case types.TimestampType: - return TimestampKind, nil - case types.NullType: - return NullTypeKind, nil - case types.TypeType: - return TypeKind, nil - default: - switch tv.TypeName() { - case "dyn": - return DynKind, nil - case "google.protobuf.Any": - return AnyKind, nil - case "optional": - return OpaqueKind, nil - default: - return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName()) - } - } -} diff --git a/constraint/vendor/github.com/google/cel-go/cel/env.go b/constraint/vendor/github.com/google/cel-go/cel/env.go index b5c3b4cc5..ab736b776 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/env.go +++ b/constraint/vendor/github.com/google/cel-go/cel/env.go @@ -38,26 +38,42 @@ type Source = common.Source // Ast representing the checked or unchecked expression, its source, and related metadata such as // source position information. type Ast struct { - expr *exprpb.Expr - info *exprpb.SourceInfo - source Source - refMap map[int64]*celast.ReferenceInfo - typeMap map[int64]*types.Type + source Source + impl *celast.AST +} + +// NativeRep converts the AST to a Go-native representation. +func (ast *Ast) NativeRep() *celast.AST { + if ast == nil { + return nil + } + return ast.impl } // Expr returns the proto serializable instance of the parsed/checked expression. +// +// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr() +// the result instead. func (ast *Ast) Expr() *exprpb.Expr { - return ast.expr + if ast == nil { + return nil + } + pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) + return pbExpr } // IsChecked returns whether the Ast value has been successfully type-checked. func (ast *Ast) IsChecked() bool { - return ast.typeMap != nil && len(ast.typeMap) > 0 + return ast.NativeRep().IsChecked() } // SourceInfo returns character offset and newline position information about expression elements. func (ast *Ast) SourceInfo() *exprpb.SourceInfo { - return ast.info + if ast == nil { + return nil + } + pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) + return pbInfo } // ResultType returns the output type of the expression if the Ast has been type-checked, else @@ -65,9 +81,6 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { // // Deprecated: use OutputType func (ast *Ast) ResultType() *exprpb.Type { - if !ast.IsChecked() { - return chkdecls.Dyn - } out := ast.OutputType() t, err := TypeToExprType(out) if err != nil { @@ -79,16 +92,18 @@ func (ast *Ast) ResultType() *exprpb.Type { // OutputType returns the output type of the expression if the Ast has been type-checked, else // returns cel.DynType as the parse step cannot infer types. func (ast *Ast) OutputType() *Type { - t, found := ast.typeMap[ast.expr.GetId()] - if !found { - return DynType + if ast == nil { + return types.ErrorType } - return t + return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) } // Source returns a view of the input used to create the Ast. This source may be complete or // constructed from the SourceInfo. func (ast *Ast) Source() Source { + if ast == nil { + return nil + } return ast.source } @@ -198,29 +213,28 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { // It is possible to have both non-nil Ast and Issues values returned from this call: however, // the mere presence of an Ast does not imply that it is valid for use. func (e *Env) Check(ast *Ast) (*Ast, *Issues) { - // Note, errors aren't currently possible on the Ast to ParsedExpr conversion. - pe, _ := AstToParsedExpr(ast) - // Construct the internal checker env, erroring if there is an issue adding the declarations. chk, err := e.initChecker() if err != nil { errs := common.NewErrors(ast.Source()) errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } - res, errs := checker.Check(pe, ast.Source(), chk) + checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) if len(errs.GetErrors()) > 0 { - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. ast = &Ast{ - source: ast.Source(), - expr: res.Expr, - info: res.SourceInfo, - refMap: res.ReferenceMap, - typeMap: res.TypeMap} + source: ast.Source(), + impl: checked} + + // Avoid creating a validator config if it's not needed. + if len(e.validators) == 0 { + return ast, nil + } // Generate a validator configuration from the set of configured validators. vConfig := newValidatorConfig() @@ -230,9 +244,9 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { } } // Apply additional validators on the type-checked result. - iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) for _, v := range e.validators { - v.Validate(e, vConfig, res, iss) + v.Validate(e, vConfig, checked, iss) } if iss.Err() != nil { return nil, iss @@ -295,17 +309,13 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. - varsCopy := []*decls.VariableDecl{} if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) - } else { - // If the type-checker has not been instantiated, ensure the unvalidated declarations are - // provided to the extended Env instance. - varsCopy = make([]*decls.VariableDecl, len(e.variables)) - copy(varsCopy, e.variables) } + varsCopy := make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) // Copy macros and program options macsCopy := make([]parser.Macro, len(e.macros)) @@ -402,6 +412,17 @@ func (e *Env) Libraries() []string { return libraries } +// HasFunction returns whether a specific function has been configured in the environment +func (e *Env) HasFunction(functionName string) bool { + _, ok := e.functions[functionName] + return ok +} + +// Functions returns map of Functions, keyed by function name, that have been configured in the environment. +func (e *Env) Functions() map[string]*decls.FunctionDecl { + return e.functions +} + // HasValidator returns whether a specific ASTValidator has been configured in the environment. func (e *Env) HasValidator(name string) bool { for _, v := range e.validators { @@ -429,20 +450,21 @@ func (e *Env) Parse(txt string) (*Ast, *Issues) { // It is possible to have both non-nil Ast and Issues values returned from this call; however, // the mere presence of an Ast does not imply that it is valid for use. func (e *Env) ParseSource(src Source) (*Ast, *Issues) { - res, errs := e.prsr.Parse(src) + parsed, errs := e.prsr.Parse(src) if len(errs.GetErrors()) > 0 { return nil, &Issues{errs: errs} } - // Manually create the Ast to ensure that the text source information is propagated on - // subsequent calls to Check. - return &Ast{ - source: src, - expr: res.GetExpr(), - info: res.GetSourceInfo()}, nil + return &Ast{source: src, impl: parsed}, nil } // Program generates an evaluable instance of the Ast within the environment (Env). func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { + return e.PlanProgram(ast.NativeRep(), opts...) +} + +// PlanProgram generates an evaluable instance of the AST in the go-native representation within +// the environment (Env). +func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) { optSet := e.progOpts if len(opts) != 0 { mergedOpts := []ProgramOption{} @@ -450,7 +472,7 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { mergedOpts = append(mergedOpts, opts...) optSet = mergedOpts } - return newProgram(e, ast, optSet) + return newProgram(e, a, optSet) } // CELTypeAdapter returns the `types.Adapter` configured for the environment. @@ -534,8 +556,9 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { // TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an // Ast format and then Program again. func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { - pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State()) - expr, err := AstToString(ParsedExprToAst(pruned)) + pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) + newAST := &Ast{source: a.Source(), impl: pruned} + expr, err := AstToString(newAST) if err != nil { return nil, err } @@ -556,16 +579,10 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // extension functions provided by estimator. func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, - } extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) extendedOpts = append(extendedOpts, opts...) extendedOpts = append(extendedOpts, e.costOptions...) - return checker.Cost(checked, estimator, extendedOpts...) + return checker.Cost(ast.impl, estimator, extendedOpts...) } // configure applies a series of EnvOptions to the current environment. @@ -707,7 +724,7 @@ type Error = common.Error // Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. type Issues struct { errs *common.Errors - info *exprpb.SourceInfo + info *celast.SourceInfo } // NewIssues returns an Issues struct from a common.Errors object. @@ -718,7 +735,7 @@ func NewIssues(errs *common.Errors) *Issues { // NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata // which can be used with the `ReportErrorAtID` method for additional error reports within the context // information that's inferred from an expression id. -func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues { +func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues { return &Issues{ errs: errs, info: info, @@ -749,10 +766,10 @@ func (i *Issues) Append(other *Issues) *Issues { if i == nil { return other } - if other == nil { + if other == nil || i == other { return i } - return NewIssues(i.errs.Append(other.errs.GetErrors())) + return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) } // String converts the issues to a suitable display string. @@ -768,30 +785,7 @@ func (i *Issues) String() string { // The source metadata for the expression at `id`, if present, is attached to the error report. // To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { - i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...) -} - -// locationByID returns a common.Location given an expression id. -// -// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source -// as this implementation relies on the abstractions present in the protobuf SourceInfo object, -// and is replicated in the checker. -func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location { - positions := sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range sourceInfo.GetLineOffsets() { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation + i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...) } // getStdEnv lazy initializes the CEL standard environment. @@ -809,7 +803,7 @@ type interopCELTypeProvider struct { // FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if et, found := p.FindType(typeName); found { @@ -822,10 +816,17 @@ func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, b return nil, false } +// FindStructFieldNames returns an empty set of field for the interop provider. +// +// To inspect the field names, migrate to a `types.Provider` implementation. +func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + // FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field // name, if one exists. // -// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type +// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type // into a native type representation. If the conversion fails, the type is listed as not found. func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { if ft, found := p.FindFieldType(structType, fieldName); found { diff --git a/constraint/vendor/github.com/google/cel-go/cel/folding.go b/constraint/vendor/github.com/google/cel-go/cel/folding.go new file mode 100644 index 000000000..d7060896d --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/cel/folding.go @@ -0,0 +1,559 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// ConstantFoldingOption defines a functional option for configuring constant folding. +type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) + +// MaxConstantFoldIterations limits the number of times literals may be folding during optimization. +// +// Defaults to 100 if not set. +func MaxConstantFoldIterations(limit int) ConstantFoldingOption { + return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) { + opt.maxFoldIterations = limit + return opt, nil + } +} + +// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate +// literal values within function calls and select statements with their evaluated result. +func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) { + folder := &constantFoldingOptimizer{ + maxFoldIterations: defaultMaxConstantFoldIterations, + } + var err error + for _, o := range opts { + folder, err = o(folder) + if err != nil { + return nil, err + } + } + return folder, nil +} + +type constantFoldingOptimizer struct { + maxFoldIterations int +} + +// Optimize queries the expression graph for scalar and aggregate literal expressions within call and +// select statements and then evaluates them and replaces the call site with the literal result. +// +// Note: only values which can be represented as literals in CEL syntax are supported. +func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + + // Walk the list of foldable expression and continue to fold until there are no more folds left. + // All of the fold candidates returned by the constantExprMatcher should succeed unless there's + // a logic bug with the selection of expressions. + foldableExprs := ast.MatchDescendants(root, constantExprMatcher) + foldCount := 0 + for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations { + for _, fold := range foldableExprs { + // If the expression could be folded because it's a non-strict call, and the + // branches are pruned, continue to the next fold. + if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) { + continue + } + // Otherwise, assume all context is needed to evaluate the expression. + err := tryFold(ctx, a, fold) + if err != nil { + ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error()) + return a + } + } + foldCount++ + foldableExprs = ast.MatchDescendants(root, constantExprMatcher) + } + // Once all of the constants have been folded, try to run through the remaining comprehensions + // one last time. In this case, there's no guarantee they'll run, so we only update the + // target comprehension node with the literal value if the evaluation succeeds. + for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) { + tryFold(ctx, a, compre) + } + + // If the output is a list, map, or struct which contains optional entries, then prune it + // to make sure that the optionals, if resolved, do not surface in the output literal. + pruneOptionalElements(ctx, root) + + // Ensure that all intermediate values in the folded expression can be represented as valid + // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents` + // to avoid extra allocations during this final pass through the AST. + ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() != ast.LiteralKind { + return + } + val := e.AsLiteral() + adapted, err := adaptLiteral(ctx, val) + if err != nil { + ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error()) + return + } + ctx.UpdateExpr(e, adapted) + })) + + return a +} + +// tryFold attempts to evaluate a sub-expression to a literal. +// +// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise +// the method will return an error. +func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error { + // Assume all context is needed to evaluate the expression. + subAST := &Ast{ + impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()), + } + prg, err := ctx.Program(subAST) + if err != nil { + return err + } + out, _, err := prg.Eval(NoVars()) + if err != nil { + return err + } + // Update the fold expression to be a literal. + ctx.UpdateExpr(expr, ctx.NewLiteral(out)) + return nil +} + +// maybePruneBranches inspects the non-strict call expression to determine whether +// a branch can be removed. Evaluation will naturally prune logical and / or calls, +// but conditional will not be pruned cleanly, so this is one small area where the +// constant folding step reimplements a portion of the evaluator. +func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool { + call := expr.AsCall() + args := call.Args() + switch call.FunctionName() { + case operators.LogicalAnd, operators.LogicalOr: + return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr) + case operators.Conditional: + cond := args[0] + truthy := args[1] + falsy := args[2] + if cond.Kind() != ast.LiteralKind { + return false + } + if cond.AsLiteral() == types.True { + ctx.UpdateExpr(expr, truthy) + } else { + ctx.UpdateExpr(expr, falsy) + } + return true + case operators.In: + haystack := args[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.False)) + return true + } + needle := args[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.True)) + return true + } + } + } + } + return false +} + +func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool { + shortcircuit := types.False + skip := types.True + if function == operators.LogicalOr { + shortcircuit = types.True + skip = types.False + } + newArgs := []ast.Expr{} + for _, arg := range args { + if arg.Kind() != ast.LiteralKind { + newArgs = append(newArgs, arg) + continue + } + if arg.AsLiteral() == skip { + continue + } + if arg.AsLiteral() == shortcircuit { + ctx.UpdateExpr(expr, arg) + return true + } + } + if len(newArgs) == 0 { + newArgs = append(newArgs, args[0]) + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + if len(newArgs) == 1 { + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...)) + return true +} + +// pruneOptionalElements works from the bottom up to resolve optional elements within +// aggregate literals. +// +// Note, many aggregate literals will be resolved as arguments to functions or select +// statements, so this method exists to handle the case where the literal could not be +// fully resolved or exists outside of a call, select, or comprehension context. +func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) { + aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher) + for _, lit := range aggregateLiterals { + switch lit.Kind() { + case ast.ListKind: + pruneOptionalListElements(ctx, lit) + case ast.MapKind: + pruneOptionalMapEntries(ctx, lit) + case ast.StructKind: + pruneOptionalStructFields(ctx, lit) + } + } +} + +func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) { + l := e.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + if len(optIndices) == 0 { + return + } + updatedElems := []ast.Expr{} + updatedIndices := []int32{} + newOptIndex := -1 + for _, e := range elems { + newOptIndex++ + if !l.IsOptional(int32(newOptIndex)) { + updatedElems = append(updatedElems, e) + continue + } + if e.Kind() != ast.LiteralKind { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + optElemVal, ok := e.AsLiteral().(*types.Optional) + if !ok { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + if !optElemVal.HasValue() { + newOptIndex-- // Skipping causes the list to get smaller. + continue + } + ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue())) + updatedElems = append(updatedElems, e) + } + ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices)) +} + +func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) { + m := e.AsMap() + entries := m.Entries() + updatedEntries := []ast.EntryExpr{} + modified := false + for _, e := range entries { + entry := e.AsMapEntry() + key := entry.Key() + val := entry.Value() + // If the entry is not optional, or the value-side of the optional hasn't + // been resolved to a literal, then preserve the entry as-is. + if !entry.IsOptional() || val.Kind() != ast.LiteralKind { + updatedEntries = append(updatedEntries, e) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedEntries = append(updatedEntries, e) + continue + } + // When the key is not a literal, but the value is, then it needs to be + // restored to an optional value. + if key.Kind() != ast.LiteralKind { + undoOptVal, err := adaptLiteral(ctx, optElemVal) + if err != nil { + ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err) + } + ctx.UpdateExpr(val, undoOptVal) + updatedEntries = append(updatedEntries, e) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedEntry := ctx.NewMapEntry(key, val, false) + updatedEntries = append(updatedEntries, updatedEntry) + } + if modified { + ctx.UpdateExpr(e, ctx.NewMap(updatedEntries)) + } +} + +func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) { + s := e.AsStruct() + fields := s.Fields() + updatedFields := []ast.EntryExpr{} + modified := false + for _, f := range fields { + field := f.AsStructField() + val := field.Value() + if !field.IsOptional() || val.Kind() != ast.LiteralKind { + updatedFields = append(updatedFields, f) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedFields = append(updatedFields, f) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedField := ctx.NewStructField(field.Name(), val, false) + updatedFields = append(updatedFields, updatedField) + } + if modified { + ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields)) + } +} + +// adaptLiteral converts a runtime CEL value to its equivalent literal expression. +// +// For strongly typed values, the type-provider will be used to reconstruct the fields +// which are present in the literal and their equivalent initialization values. +func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) { + switch t := val.Type().(type) { + case *types.Type: + switch t { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + return ctx.NewLiteral(val), nil + case types.DurationType: + return ctx.NewCall( + overloads.TypeConvertDuration, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.TimestampType: + return ctx.NewCall( + overloads.TypeConvertTimestamp, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.OptionalType: + opt := val.(*types.Optional) + if !opt.HasValue() { + return ctx.NewCall("optional.none"), nil + } + target, err := adaptLiteral(ctx, opt.GetValue()) + if err != nil { + return nil, err + } + return ctx.NewCall("optional.of", target), nil + case types.TypeType: + return ctx.NewIdent(val.(*types.Type).TypeName()), nil + case types.ListType: + l, ok := val.(traits.Lister) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + elems := make([]ast.Expr, l.Size().(types.Int)) + idx := 0 + it := l.Iterator() + for it.HasNext() == types.True { + elemVal := it.Next() + elemExpr, err := adaptLiteral(ctx, elemVal) + if err != nil { + return nil, err + } + elems[idx] = elemExpr + idx++ + } + return ctx.NewList(elems, []int32{}), nil + case types.MapType: + m, ok := val.(traits.Mapper) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + entries := make([]ast.EntryExpr, m.Size().(types.Int)) + idx := 0 + it := m.Iterator() + for it.HasNext() == types.True { + keyVal := it.Next() + keyExpr, err := adaptLiteral(ctx, keyVal) + if err != nil { + return nil, err + } + valVal := m.Get(keyVal) + valExpr, err := adaptLiteral(ctx, valVal) + if err != nil { + return nil, err + } + entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false) + idx++ + } + return ctx.NewMap(entries), nil + default: + provider := ctx.CELTypeProvider() + fields, found := provider.FindStructFieldNames(t.TypeName()) + if !found { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + tester := val.(traits.FieldTester) + indexer := val.(traits.Indexer) + fieldInits := []ast.EntryExpr{} + for _, f := range fields { + field := types.String(f) + if tester.IsSet(field) != types.True { + continue + } + fieldVal := indexer.Get(field) + fieldExpr, err := adaptLiteral(ctx, fieldVal) + if err != nil { + return nil, err + } + fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false)) + } + return ctx.NewStruct(t.TypeName(), fieldInits), nil + } + } + return nil, fmt.Errorf("failed to adapt %v to literal", val) +} + +// constantExprMatcher matches calls, select statements, and comprehensions whose arguments +// are all constant scalar or aggregate literal values. +// +// Only comprehensions which are not nested are included as possible constant folds, and only +// if all variables referenced in the comprehension stack exist are only iteration or +// accumulation variables. +func constantExprMatcher(e ast.NavigableExpr) bool { + switch e.Kind() { + case ast.CallKind: + return constantCallMatcher(e) + case ast.SelectKind: + sel := e.AsSelect() // guaranteed to be a navigable value + return constantMatcher(sel.Operand().(ast.NavigableExpr)) + case ast.ComprehensionKind: + if isNestedComprehension(e) { + return false + } + vars := map[string]bool{} + constantExprs := true + visitor := ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() == ast.ComprehensionKind { + nested := e.AsComprehension() + vars[nested.AccuVar()] = true + vars[nested.IterVar()] = true + } + if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] { + constantExprs = false + } + }) + ast.PreOrderVisit(e, visitor) + return constantExprs + default: + return false + } +} + +// constantCallMatcher identifies strict and non-strict calls which can be folded. +func constantCallMatcher(e ast.NavigableExpr) bool { + call := e.AsCall() + children := e.Children() + fnName := call.FunctionName() + if fnName == operators.LogicalAnd { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.LogicalOr { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.Conditional { + cond := children[0] + if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType { + return true + } + } + if fnName == operators.In { + haystack := children[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + return true + } + needle := children[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + return true + } + } + } + } + // convert all other calls with constant arguments + for _, child := range children { + if !constantMatcher(child) { + return false + } + } + return true +} + +func isNestedComprehension(e ast.NavigableExpr) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.ComprehensionKind { + return true + } + parent, found = parent.Parent() + } + return false +} + +func aggregateLiteralMatcher(e ast.NavigableExpr) bool { + return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind +} + +var ( + constantMatcher = ast.ConstantValueMatcher() +) + +const ( + defaultMaxConstantFoldIterations = 100 +) diff --git a/constraint/vendor/github.com/google/cel-go/cel/inlining.go b/constraint/vendor/github.com/google/cel-go/cel/inlining.go new file mode 100644 index 000000000..78d5bea65 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/cel/inlining.go @@ -0,0 +1,228 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/traits" +) + +// InlineVariable holds a variable name to be matched and an AST representing +// the expression graph which should be used to replace it. +type InlineVariable struct { + name string + alias string + def *ast.AST +} + +// Name returns the qualified variable or field selection to replace. +func (v *InlineVariable) Name() string { + return v.name +} + +// Alias returns the alias to use when performing cel.bind() calls during inlining. +func (v *InlineVariable) Alias() string { + return v.alias +} + +// Expr returns the inlined expression value. +func (v *InlineVariable) Expr() ast.Expr { + return v.def.Expr() +} + +// Type indicates the inlined expression type. +func (v *InlineVariable) Type() *Type { + return v.def.GetType(v.def.Expr().ID()) +} + +// NewInlineVariable declares a variable name to be replaced by a checked expression. +func NewInlineVariable(name string, definition *Ast) *InlineVariable { + return NewInlineVariableWithAlias(name, name, definition) +} + +// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression. +// If the variable occurs more than once, the provided alias will be used to replace the expressions +// where the variable name occurs. +func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { + return &InlineVariable{name: name, alias: alias, def: definition.impl} +} + +// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. +// +// If a variable occurs one time, the variable is replaced by the inline definition. If the +// variable occurs more than once, the variable occurences are replaced by a cel.bind() call. +func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer { + return &inliningOptimizer{variables: inlineVars} +} + +type inliningOptimizer struct { + variables []*InlineVariable +} + +func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + for _, inlineVar := range opt.variables { + matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name())) + // Skip cases where the variable isn't in the expression graph + if len(matches) == 0 { + continue + } + + // For a single match, do a direct replacement of the expression sub-graph. + if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) { + for _, match := range matches { + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type()) + } + continue + } + + // For multiple matches, find the least common ancestor (lca) and insert the + // variable as a cel.bind() macro. + var lca ast.NavigableExpr = root + lcaAncestorCount := 0 + ancestors := map[int64]int{} + for _, match := range matches { + // Update the identifier matches with the provided alias. + parent, found := match, true + for found { + ancestorCount, hasAncestor := ancestors[parent.ID()] + if !hasAncestor { + ancestors[parent.ID()] = 1 + parent, found = parent.Parent() + continue + } + if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) { + lca = parent + lcaAncestorCount = ancestorCount + } + ancestors[parent.ID()] = ancestorCount + 1 + parent, found = parent.Parent() + } + aliasExpr := ctx.NewIdent(inlineVar.Alias()) + opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type()) + } + + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + // Update the least common ancestor by inserting a cel.bind() call to the alias. + inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca) + opt.inlineExpr(ctx, lca, inlined, inlineVar.Type()) + ctx.SetMacroCall(lca.ID(), bindMacro) + } + return a +} + +// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining +// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is +// made to determine whether the inlined value can be presence or existence tested. +func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) { + switch prev.Kind() { + case ast.SelectKind: + sel := prev.AsSelect() + if !sel.IsTestOnly() { + ctx.UpdateExpr(prev, inlined) + return + } + opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType) + default: + ctx.UpdateExpr(prev, inlined) + } +} + +// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe +// expression appropriate for the inlined type, if possible. +// +// If the rewrite is not possible an error is reported at the inline expression site. +func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) { + // If the input inlined expression is not a select expression it won't work with the has() + // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error. + if inlined.Kind() == ast.SelectKind { + presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined) + ctx.UpdateExpr(prev, presenceTest) + ctx.SetMacroCall(prev.ID(), hasMacro) + return + } + + ctx.ClearMacroCall(prev.ID()) + if inlinedType.IsAssignableType(NullType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + inlined, + ctx.NewLiteral(types.NullValue), + )) + return + } + if inlinedType.HasTrait(traits.SizerType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + ctx.NewMemberCall(overloads.Size, inlined), + ctx.NewLiteral(types.IntZero), + )) + return + } + ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType) +} + +// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression +// being replaced occurs within a presence test. Value types with a size() method or field selection +// support can be bound. +// +// In future iterations, support may also be added for indexer types which can be rewritten as an `in` +// expression; however, this would imply a rewrite of the inlined expression that may not be necessary +// in most cases. +func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool { + if inlinedType.IsAssignableType(NullType) || + inlinedType.HasTrait(traits.SizerType) { + return true + } + for _, m := range matches { + if m.Kind() != ast.SelectKind { + continue + } + sel := m.AsSelect() + if sel.IsTestOnly() { + return false + } + } + return true +} + +// matchVariable matches simple identifiers, select expressions, and presence test expressions +// which match the (potentially) qualified variable name provided as input. +// +// Note, this function does not support inlining against select expressions which includes optional +// field selection. This may be a future refinement. +func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + return true + } + if e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // While the `ToQualifiedName` call could take the select directly, this + // would skip presence tests from possible matches, which we would like + // to include. + qualName, found := containers.ToQualifiedName(sel.Operand()) + return found && qualName+"."+sel.FieldName() == varName + } + return false + } +} diff --git a/constraint/vendor/github.com/google/cel-go/cel/io.go b/constraint/vendor/github.com/google/cel-go/cel/io.go index 80f63140e..7d08d1c81 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/io.go +++ b/constraint/vendor/github.com/google/cel-go/cel/io.go @@ -28,6 +28,7 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/parser" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" anypb "google.golang.org/protobuf/types/known/anypb" ) @@ -47,17 +48,11 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { // // Prefer CheckedExprToAst if loading expressions from storage. func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { - checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr) + checked, err := ast.ToAST(checkedExpr) if err != nil { return nil, err } - return &Ast{ - expr: checkedAST.Expr, - info: checkedAST.SourceInfo, - source: src, - refMap: checkedAST.ReferenceMap, - typeMap: checkedAST.TypeMap, - }, nil + return &Ast{source: src, impl: checked}, nil } // AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. @@ -67,13 +62,7 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } - cAst := &ast.CheckedAST{ - Expr: a.expr, - SourceInfo: a.info, - ReferenceMap: a.refMap, - TypeMap: a.typeMap, - } - return ast.CheckedASTToCheckedExpr(cAst) + return ast.ToProto(a.impl) } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -89,18 +78,12 @@ func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast { // // Prefer ParsedExprToAst if loading expressions from storage. func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast { - si := parsedExpr.GetSourceInfo() - if si == nil { - si = &exprpb.SourceInfo{} - } + info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo()) if src == nil { - src = common.NewInfoSource(si) - } - return &Ast{ - expr: parsedExpr.GetExpr(), - info: si, - source: src, + src = common.NewInfoSource(parsedExpr.GetSourceInfo()) } + e, _ := ast.ProtoToExpr(parsedExpr.GetExpr()) + return &Ast{source: src, impl: ast.NewAST(e, info)} } // AstToParsedExpr converts an Ast to an protobuf ParsedExpr value. @@ -116,80 +99,92 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { // Note, the conversion may not be an exact replica of the original expression, but will produce // a string that is semantically equivalent and whose textual representation is stable. func AstToString(a *Ast) (string, error) { - expr := a.Expr() - info := a.SourceInfo() - return parser.Unparse(expr, info) + return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) } // RefValueToValue converts between ref.Val and api.expr.Value. // The result Value is the serialized proto form. The ref.Val must not be error or unknown. func RefValueToValue(res ref.Val) (*exprpb.Value, error) { + return ValueAsAlphaProto(res) +} + +func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) { + canonical, err := ValueAsProto(res) + if err != nil { + return nil, err + } + alpha := &exprpb.Value{} + err = convertProto(canonical, alpha) + return alpha, err +} + +func ValueAsProto(res ref.Val) (*celpb.Value, error) { switch res.Type() { case types.BoolType: - return &exprpb.Value{ - Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil case types.BytesType: - return &exprpb.Value{ - Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil case types.DoubleType: - return &exprpb.Value{ - Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil case types.IntType: - return &exprpb.Value{ - Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil case types.ListType: l := res.(traits.Lister) sz := l.Size().(types.Int) - elts := make([]*exprpb.Value, 0, int64(sz)) + elts := make([]*celpb.Value, 0, int64(sz)) for i := types.Int(0); i < sz; i++ { - v, err := RefValueToValue(l.Get(i)) + v, err := ValueAsProto(l.Get(i)) if err != nil { return nil, err } elts = append(elts, v) } - return &exprpb.Value{ - Kind: &exprpb.Value_ListValue{ - ListValue: &exprpb.ListValue{Values: elts}}}, nil + return &celpb.Value{ + Kind: &celpb.Value_ListValue{ + ListValue: &celpb.ListValue{Values: elts}}}, nil case types.MapType: mapper := res.(traits.Mapper) sz := mapper.Size().(types.Int) - entries := make([]*exprpb.MapValue_Entry, 0, int64(sz)) + entries := make([]*celpb.MapValue_Entry, 0, int64(sz)) for it := mapper.Iterator(); it.HasNext().(types.Bool); { k := it.Next() v := mapper.Get(k) - kv, err := RefValueToValue(k) + kv, err := ValueAsProto(k) if err != nil { return nil, err } - vv, err := RefValueToValue(v) + vv, err := ValueAsProto(v) if err != nil { return nil, err } - entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv}) + entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv}) } - return &exprpb.Value{ - Kind: &exprpb.Value_MapValue{ - MapValue: &exprpb.MapValue{Entries: entries}}}, nil + return &celpb.Value{ + Kind: &celpb.Value_MapValue{ + MapValue: &celpb.MapValue{Entries: entries}}}, nil case types.NullType: - return &exprpb.Value{ - Kind: &exprpb.Value_NullValue{}}, nil + return &celpb.Value{ + Kind: &celpb.Value_NullValue{}}, nil case types.StringType: - return &exprpb.Value{ - Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil case types.TypeType: typeName := res.(ref.Type).TypeName() - return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil + return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil case types.UintType: - return &exprpb.Value{ - Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil default: any, err := res.ConvertToNative(anyPbType) if err != nil { return nil, err } - return &exprpb.Value{ - Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil + return &celpb.Value{ + Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil } } @@ -212,55 +207,67 @@ var ( // ValueToRefValue converts between exprpb.Value and ref.Val. func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + return AlphaProtoAsValue(adapter, v) +} + +func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + canonical := &celpb.Value{} + if err := convertProto(v, canonical); err != nil { + return nil, err + } + return ProtoAsValue(adapter, canonical) +} + +func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { switch v.Kind.(type) { - case *exprpb.Value_NullValue: + case *celpb.Value_NullValue: return types.NullValue, nil - case *exprpb.Value_BoolValue: + case *celpb.Value_BoolValue: return types.Bool(v.GetBoolValue()), nil - case *exprpb.Value_Int64Value: + case *celpb.Value_Int64Value: return types.Int(v.GetInt64Value()), nil - case *exprpb.Value_Uint64Value: + case *celpb.Value_Uint64Value: return types.Uint(v.GetUint64Value()), nil - case *exprpb.Value_DoubleValue: + case *celpb.Value_DoubleValue: return types.Double(v.GetDoubleValue()), nil - case *exprpb.Value_StringValue: + case *celpb.Value_StringValue: return types.String(v.GetStringValue()), nil - case *exprpb.Value_BytesValue: + case *celpb.Value_BytesValue: return types.Bytes(v.GetBytesValue()), nil - case *exprpb.Value_ObjectValue: + case *celpb.Value_ObjectValue: any := v.GetObjectValue() msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) if err != nil { return nil, err } return adapter.NativeToValue(msg), nil - case *exprpb.Value_MapValue: + case *celpb.Value_MapValue: m := v.GetMapValue() entries := make(map[ref.Val]ref.Val) for _, entry := range m.Entries { - key, err := ValueToRefValue(adapter, entry.Key) + key, err := ProtoAsValue(adapter, entry.Key) if err != nil { return nil, err } - pb, err := ValueToRefValue(adapter, entry.Value) + pb, err := ProtoAsValue(adapter, entry.Value) if err != nil { return nil, err } entries[key] = pb } return adapter.NativeToValue(entries), nil - case *exprpb.Value_ListValue: + case *celpb.Value_ListValue: l := v.GetListValue() elts := make([]ref.Val, len(l.Values)) for i, e := range l.Values { - rv, err := ValueToRefValue(adapter, e) + rv, err := ProtoAsValue(adapter, e) if err != nil { return nil, err } elts[i] = rv } return adapter.NativeToValue(elts), nil - case *exprpb.Value_TypeValue: + case *celpb.Value_TypeValue: typeName := v.GetTypeValue() tv, ok := typeNameToTypeValue[typeName] if ok { @@ -270,3 +277,12 @@ func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { } return nil, errors.New("unknown value") } + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/constraint/vendor/github.com/google/cel-go/cel/library.go b/constraint/vendor/github.com/google/cel-go/cel/library.go index 4d232085c..be59f1b02 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/library.go +++ b/constraint/vendor/github.com/google/cel-go/cel/library.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/stdlib" @@ -28,8 +29,6 @@ import ( "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/interpreter" "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) const ( @@ -313,7 +312,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { Types(types.OptionalType), // Configure the optMap and optFlatMap macros. - Macros(NewReceiverMacro(optMapMacro, 2, optMap)), + Macros(ReceiverMacro(optMapMacro, 2, optMap)), // Global and member functions for working with optional values. Function(optionalOfFunc, @@ -374,7 +373,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption { Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), } if lib.version >= 1 { - opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap))) + opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) } return opts } @@ -386,57 +385,57 @@ func (lib *optionalLib) ProgramOptions() []ProgramOption { } } -func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier") + return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier") } mapExpr := args[1] - return meh.GlobalCall( + return meh.NewCall( operators.Conditional, - meh.ReceiverCall(hasValueFunc, target), - meh.GlobalCall(optionalOfFunc, - meh.Fold( - unusedIterVar, + meh.NewMemberCall(hasValueFunc, target), + meh.NewCall(optionalOfFunc, + meh.NewComprehension( meh.NewList(), + unusedIterVar, varName, - meh.ReceiverCall(valueFunc, target), - meh.LiteralBool(false), - meh.Ident(varName), + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), mapExpr, ), ), - meh.GlobalCall(optionalNoneFunc), + meh.NewCall(optionalNoneFunc), ), nil } -func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier") + return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier") } mapExpr := args[1] - return meh.GlobalCall( + return meh.NewCall( operators.Conditional, - meh.ReceiverCall(hasValueFunc, target), - meh.Fold( - unusedIterVar, + meh.NewMemberCall(hasValueFunc, target), + meh.NewComprehension( meh.NewList(), + unusedIterVar, varName, - meh.ReceiverCall(valueFunc, target), - meh.LiteralBool(false), - meh.Ident(varName), + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), mapExpr, ), - meh.GlobalCall(optionalNoneFunc), + meh.NewCall(optionalNoneFunc), ), nil } @@ -447,6 +446,12 @@ func enableOptionalSyntax() EnvOption { } } +// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field +// selection is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) EnvOption { + return features(featureEnableErrorOnBadPresenceTest, value) +} + func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { call, ok := i.(interpreter.InterpretableCall) if !ok { diff --git a/constraint/vendor/github.com/google/cel-go/cel/macro.go b/constraint/vendor/github.com/google/cel-go/cel/macro.go index 1eb414c8b..4db1fd57a 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/macro.go +++ b/constraint/vendor/github.com/google/cel-go/cel/macro.go @@ -15,6 +15,11 @@ package cel import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -26,7 +31,14 @@ import ( // a Macro should be created per arg-count or as a var arg macro. type Macro = parser.Macro -// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree. +// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value. +type MacroFactory = parser.MacroExpander + +// MacroExprFactory assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type MacroExprFactory = parser.ExprHelper + +// MacroExpander converts a call and its associated arguments into a protobuf Expr representation. // // If the MacroExpander determines within the implementation that an expansion is not needed it may return // a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments @@ -36,48 +48,197 @@ type Macro = parser.Macro // and produces as output an Expr ast node. // // Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. -type MacroExpander = parser.MacroExpander +type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) // MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree. -type MacroExprHelper = parser.ExprHelper +// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is +// consistent with the source position and expression id generation code leveraged by both +// the parser and type-checker. +type MacroExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(*exprpb.Expr) *exprpb.Expr + + // LiteralBool creates an Expr value for a bool literal. + LiteralBool(value bool) *exprpb.Expr + + // LiteralBytes creates an Expr value for a byte literal. + LiteralBytes(value []byte) *exprpb.Expr + + // LiteralDouble creates an Expr value for double literal. + LiteralDouble(value float64) *exprpb.Expr + + // LiteralInt creates an Expr value for an int literal. + LiteralInt(value int64) *exprpb.Expr + + // LiteralString creates am Expr value for a string literal. + LiteralString(value string) *exprpb.Expr + + // LiteralUint creates an Expr value for a uint literal. + LiteralUint(value uint64) *exprpb.Expr + + // NewList creates a CreateList instruction where the list is comprised of the optional set + // of elements provided as arguments. + NewList(elems ...*exprpb.Expr) *exprpb.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // NewObject creates a CreateStruct instruction for an object with a given type name and + // optional set of field initializers. + NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewObjectFieldInit creates a new Object field initializer from the field name and value. + NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // Fold creates a fold comprehension instruction. + // + // - iterVar is the iteration variable name. + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr + + // Ident creates an identifier Expr value. + Ident(name string) *exprpb.Expr + + // AccuIdent returns an accumulator identifier for use with comprehension results. + AccuIdent() *exprpb.Expr + + // GlobalCall creates a function call Expr value for a global (free) function. + GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + + // ReceiverCall creates a function call Expr value for a receiver-style function. + ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + + // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + + // Select create a field traversal Expr value. + Select(operand *exprpb.Expr, field string) *exprpb.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *Error +} + +// GlobalMacro creates a Macro for a global function with the specified arg count. +func GlobalMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewGlobalMacro(function, argCount, factory) +} + +// ReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewReceiverMacro(function, argCount, factory) +} + +// GlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func GlobalVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewGlobalVarArgMacro(function, factory) +} + +// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func ReceiverVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewReceiverVarArgMacro(function, factory) +} // NewGlobalMacro creates a Macro for a global function with the specified arg count. +// +// Deprecated: use GlobalMacro func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { - return parser.NewGlobalMacro(function, argCount, expander) + expand := adaptingExpander{expander} + return parser.NewGlobalMacro(function, argCount, expand.Expander) } // NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +// +// Deprecated: use ReceiverMacro func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { - return parser.NewReceiverMacro(function, argCount, expander) + expand := adaptingExpander{expander} + return parser.NewReceiverMacro(function, argCount, expand.Expander) } // NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +// +// Deprecated: use GlobalVarArgMacro func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { - return parser.NewGlobalVarArgMacro(function, expander) + expand := adaptingExpander{expander} + return parser.NewGlobalVarArgMacro(function, expand.Expander) } // NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +// +// Deprecated: use ReceiverVarArgMacro func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { - return parser.NewReceiverVarArgMacro(function, expander) + expand := adaptingExpander{expander} + return parser.NewReceiverVarArgMacro(function, expand.Expander) } // HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeHas(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + arg, err := adaptToExpr(args[0]) + if err != nil { + return nil, err + } + if arg.Kind() == ast.SelectKind { + s := arg.AsSelect() + return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName())) + } + return nil, ph.NewError(arg.ID(), "invalid argument to has() macro") } // ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeExists(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeExistsOne(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the @@ -91,14 +252,30 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeMap(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } // FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - return parser.MakeFilter(meh, target, args) + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) } var ( @@ -142,3 +319,258 @@ var ( // NoMacros provides an alias to an empty list of macros NoMacros = []Macro{} ) + +type adaptingExpander struct { + legacyExpander MacroExpander +} + +func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + var legacyTarget *exprpb.Expr = nil + var err *Error = nil + if target != nil { + legacyTarget, err = adaptToProto(target) + if err != nil { + return nil, err + } + } + legacyArgs := make([]*exprpb.Expr, len(args)) + for i, arg := range args { + legacyArgs[i], err = adaptToProto(arg) + if err != nil { + return nil, err + } + } + ah := &adaptingHelper{modernHelper: eh} + legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs) + if err != nil { + return nil, err + } + ex, err := adaptToExpr(legacyExpr) + if err != nil { + return nil, err + } + return ex, nil +} + +func wrapErr(id int64, message string, err error) *common.Error { + return &common.Error{ + Location: common.NoLocation, + Message: fmt.Sprintf("%s: %v", message, err), + ExprID: id, + } +} + +type adaptingHelper struct { + modernHelper parser.ExprHelper +} + +// Copy the input expression with a brand new set of identifiers. +func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e))) +} + +// LiteralBool creates an Expr value for a bool literal. +func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value))) +} + +// LiteralBytes creates an Expr value for a byte literal. +func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value))) +} + +// LiteralDouble creates an Expr value for double literal. +func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value))) +} + +// LiteralInt creates an Expr value for an int literal. +func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value))) +} + +// LiteralString creates am Expr value for a string literal. +func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value))) +} + +// LiteralUint creates an Expr value for a uint literal. +func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value))) +} + +// NewList creates a CreateList instruction where the list is comprised of the optional set +// of elements provided as arguments. +func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...)) +} + +// NewMap creates a CreateStruct instruction for a map where the map is comprised of the +// optional set of key, value entries. +func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(entries)) + for i, e := range entries { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...)) +} + +// NewMapEntry creates a Map Entry for the key, value pair. +func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional)) +} + +// NewObject creates a CreateStruct instruction for an object with a given type name and +// optional set of field initializers. +func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(fieldInits)) + for i, e := range fieldInits { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...)) +} + +// NewObjectFieldInit creates a new Object field initializer from the field name and value. +func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional)) +} + +// Fold creates a fold comprehension instruction. +// +// - iterVar is the iteration variable name. +// - iterRange represents the expression that resolves to a list or map where the elements or +// keys (respectively) will be iterated over. +// - accuVar is the accumulation variable name, typically parser.AccumulatorName. +// - accuInit is the initial expression whose value will be set for the accuVar prior to +// folding. +// - condition is the expression to test to determine whether to continue folding. +// - step is the expression to evaluation at the conclusion of a single fold iteration. +// - result is the computation to evaluate at the conclusion of the fold. +// +// The accuVar should not shadow variable names that you would like to reference within the +// environment in the step and condition expressions. Presently, the name __result__ is commonly +// used by built-in macros but this may change in the future. +func (ah *adaptingHelper) Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewComprehension( + mustAdaptToExpr(iterRange), + iterVar, + accuVar, + mustAdaptToExpr(accuInit), + mustAdaptToExpr(condition), + mustAdaptToExpr(step), + mustAdaptToExpr(result), + ), + ) +} + +// Ident creates an identifier Expr value. +func (ah *adaptingHelper) Ident(name string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewIdent(name)) +} + +// AccuIdent returns an accumulator identifier for use with comprehension results. +func (ah *adaptingHelper) AccuIdent() *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewAccuIdent()) +} + +// GlobalCall creates a function call Expr value for a global (free) function. +func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...)) +} + +// ReceiverCall creates a function call Expr value for a receiver-style function. +func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...)) +} + +// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. +func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field)) +} + +// Select create a field traversal Expr value. +func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewSelect(op, field)) +} + +// OffsetLocation returns the Location of the expression identifier. +func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location { + return ah.modernHelper.OffsetLocation(exprID) +} + +// NewError associates an error message with a given expression id. +func (ah *adaptingHelper) NewError(exprID int64, message string) *Error { + return ah.modernHelper.NewError(exprID, message) +} + +func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr { + adapted := make([]ast.Expr, len(exprs)) + for i, e := range exprs { + adapted[i] = mustAdaptToExpr(e) + } + return adapted +} + +func mustAdaptToExpr(e *exprpb.Expr) ast.Expr { + out, _ := adaptToExpr(e) + return out +} + +func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ProtoToExpr(e) + if err != nil { + return nil, wrapErr(e.GetId(), "proto conversion failure", err) + } + return out, nil +} + +func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr { + out, _ := ast.ProtoToEntryExpr(e) + return out +} + +func mustAdaptToProto(e ast.Expr) *exprpb.Expr { + out, _ := adaptToProto(e) + return out +} + +func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ExprToProto(e) + if err != nil { + return nil, wrapErr(e.ID(), "expr conversion failure", err) + } + return out, nil +} + +func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry { + out, _ := ast.EntryExprToProto(e) + return out +} + +func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) { + ah, ok := meh.(*adaptingHelper) + if !ok { + return nil, common.NewError(0, + fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh), + common.NoLocation) + } + return ah.modernHelper, nil +} diff --git a/constraint/vendor/github.com/google/cel-go/cel/optimizer.go b/constraint/vendor/github.com/google/cel-go/cel/optimizer.go new file mode 100644 index 000000000..c149abb70 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/cel/optimizer.go @@ -0,0 +1,535 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "sort" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order. +// +// The static optimizer normalizes expression ids and type-checking run between optimization +// passes to ensure that the final optimized output is a valid expression with metadata consistent +// with what would have been generated from a parsed and checked expression. +// +// Note: source position information is best-effort and likely wrong, but optimized expressions +// should be suitable for calls to parser.Unparse. +type StaticOptimizer struct { + optimizers []ASTOptimizer +} + +// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied +// to a checked expression. +func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { + return &StaticOptimizer{ + optimizers: optimizers, + } +} + +// Optimize applies a sequence of optimizations to an Ast within a given environment. +// +// If issues are encountered, the Issues.Err() return value will be non-nil. +func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { + // Make a copy of the AST to be optimized. + optimized := ast.Copy(a.impl) + ids := newIDGenerator(ast.MaxID(a.impl)) + + // Create the optimizer context, could be pooled in the future. + issues := NewIssues(common.NewErrors(a.Source())) + baseFac := ast.NewExprFactory() + exprFac := &optimizerExprFactory{ + idGenerator: ids, + fac: baseFac, + sourceInfo: optimized.SourceInfo(), + } + ctx := &OptimizerContext{ + optimizerExprFactory: exprFac, + Env: env, + Issues: issues, + } + + // Apply the optimizations sequentially. + for _, o := range opt.optimizers { + optimized = o.Optimize(ctx, optimized) + if issues.Err() != nil { + return nil, issues + } + // Normalize expression id metadata including coordination with macro call metadata. + freshIDGen := newIDGenerator(0) + info := optimized.SourceInfo() + expr := optimized.Expr() + normalizeIDs(freshIDGen.renumberStable, expr, info) + cleanupMacroRefs(expr, info) + + // Recheck the updated expression for any possible type-agreement or validation errors. + parsed := &Ast{ + source: a.Source(), + impl: ast.NewAST(expr, info)} + checked, iss := ctx.Check(parsed) + if iss.Err() != nil { + return nil, iss + } + optimized = checked.impl + } + + // Return the optimized result. + return &Ast{ + source: a.Source(), + impl: optimized, + }, nil +} + +// normalizeIDs ensures that the metadata present with an AST is reset in a manner such +// that the ids within the expression correspond to the ids within macros. +func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { + optimized.RenumberIDs(idGen) + if len(info.MacroCalls()) == 0 { + return + } + + // Sort the macro ids to make sure that the renumbering of macro-specific variables + // is stable across normalization calls. + sortedMacroIDs := []int64{} + for id := range info.MacroCalls() { + sortedMacroIDs = append(sortedMacroIDs, id) + } + sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] }) + + // First, update the macro call ids themselves. + callIDMap := map[int64]int64{} + for _, id := range sortedMacroIDs { + callIDMap[id] = idGen(id) + } + // Then update the macro call definitions which refer to these ids, but + // ensure that the updates don't collide and remove macro entries which haven't + // been visited / updated yet. + type macroUpdate struct { + id int64 + call ast.Expr + } + macroUpdates := []macroUpdate{} + for _, oldID := range sortedMacroIDs { + newID := callIDMap[oldID] + call, found := info.GetMacroCall(oldID) + if !found { + continue + } + call.RenumberIDs(idGen) + macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call}) + info.ClearMacroCall(oldID) + } + for _, u := range macroUpdates { + info.SetMacroCall(u.id, u.call) + } +} + +func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { + if len(info.MacroCalls()) == 0 { + return + } + + // Sanitize the macro call references once the optimized expression has been computed + // and the ids normalized between the expression and the macros. + exprRefMap := make(map[int64]struct{}) + ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + // Update the macro call id references to ensure that macro pointers are + // updated consistently across macros. + for _, call := range info.MacroCalls() { + ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + } + for id := range info.MacroCalls() { + if _, found := exprRefMap[id]; !found { + info.ClearMacroCall(id) + } + } +} + +// newIDGenerator ensures that new ids are only created the first time they are encountered. +func newIDGenerator(seed int64) *idGenerator { + return &idGenerator{ + idMap: make(map[int64]int64), + seed: seed, + } +} + +type idGenerator struct { + idMap map[int64]int64 + seed int64 +} + +func (gen *idGenerator) nextID() int64 { + gen.seed++ + return gen.seed +} + +func (gen *idGenerator) renumberStable(id int64) int64 { + if id == 0 { + return 0 + } + if newID, found := gen.idMap[id]; found { + return newID + } + nextID := gen.nextID() + gen.idMap[id] = nextID + return nextID +} + +// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate +// subexpressions and report any errors encountered along the way. The context also embeds the +// optimizerExprFactory which can be used to generate new sub-expressions with expression ids +// consistent with the expectations of a parsed expression. +type OptimizerContext struct { + *Env + *optimizerExprFactory + *Issues +} + +// ExtendEnv auguments the context's environment with the additional options. +func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { + e, err := opt.Env.Extend(opts...) + if err != nil { + return err + } + opt.Env = e + return nil +} + +// ASTOptimizer applies an optimization over an AST and returns the optimized result. +type ASTOptimizer interface { + // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. + Optimize(*OptimizerContext, *ast.AST) *ast.AST +} + +type optimizerExprFactory struct { + *idGenerator + fac ast.ExprFactory + sourceInfo *ast.SourceInfo +} + +// NewAST creates an AST from the current expression using the tracked source info which +// is modified and managed by the OptimizerContext. +func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST { + return ast.NewAST(expr, opt.sourceInfo) +} + +// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the +// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions +// between copies. +// +// Use this method before attempting to merge the expression from AST into another. +func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) { + idGen := newIDGenerator(opt.nextID()) + defer func() { opt.seed = idGen.nextID() }() + copyExpr := opt.fac.CopyExpr(a.Expr()) + copyInfo := ast.CopySourceInfo(a.SourceInfo()) + normalizeIDs(idGen.renumberStable, copyExpr, copyInfo) + return copyExpr, copyInfo +} + +// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being +// optimized. +func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr { + copyExpr, copyInfo := opt.CopyAST(a) + for macroID, call := range copyInfo.MacroCalls() { + opt.SetMacroCall(macroID, call) + } + return copyExpr +} + +// ClearMacroCall clears the macro at the given expression id. +func (opt *optimizerExprFactory) ClearMacroCall(id int64) { + opt.sourceInfo.ClearMacroCall(id) +} + +// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info +// metadata. +func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { + opt.sourceInfo.SetMacroCall(id, expr) +} + +// MacroCalls returns the map of macro calls currently in the context. +func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { + return opt.sourceInfo.MacroCalls() +} + +// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression +// representing the unexpanded call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { + varID := opt.nextID() + remainingID := opt.nextID() + remaining = opt.fac.CopyExpr(remaining) + remaining.RenumberIDs(func(id int64) int64 { + if id == macroID { + return remainingID + } + return id + }) + if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists { + opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call)) + } + + astExpr = opt.fac.NewComprehension(macroID, + opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}), + "#unused", + varName, + opt.fac.CopyExpr(varInit), + opt.fac.NewLiteral(opt.nextID(), types.False), + opt.fac.NewIdent(varID, varName), + remaining) + + macroExpr = opt.fac.NewMemberCall(0, "bind", + opt.fac.NewIdent(opt.nextID(), "cel"), + opt.fac.NewIdent(varID, varName), + opt.fac.CopyExpr(varInit), + opt.fac.CopyExpr(remaining)) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewCall creates a global function call invocation expression. +// +// Example: +// +// countByField(list, fieldName) +// - function: countByField +// - args: [list, fieldName] +func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr { + return opt.fac.NewCall(opt.nextID(), function, args...) +} + +// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call. +// +// Example: +// +// list.countByField(fieldName) +// - function: countByField +// - target: list +// - args: [fieldName] +func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return opt.fac.NewMemberCall(opt.nextID(), function, target, args...) +} + +// NewIdent creates a new identifier expression. +// +// Examples: +// +// - simple_var_name +// - qualified.subpackage.var_name +func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr { + return opt.fac.NewIdent(opt.nextID(), name) +} + +// NewLiteral creates a new literal expression value. +// +// The range of valid values for a literal generated during optimization is different than for expressions +// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can +// be converted back to a literal-like form. +func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr { + return opt.fac.NewLiteral(opt.nextID(), value) +} + +// NewList creates a list expression with a set of optional indices. +// +// Examples: +// +// [a, b] +// - elems: [a, b] +// - optIndices: [] +// +// [a, ?b, ?c] +// - elems: [a, b, c] +// - optIndices: [1, 2] +func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr { + return opt.fac.NewList(opt.nextID(), elems, optIndices) +} + +// NewMap creates a map from a set of entry expressions which contain a key and value expression. +func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr { + return opt.fac.NewMap(opt.nextID(), entries) +} + +// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the +// entry is optional. +// +// Examples: +// +// {a: b} +// - key: a +// - value: b +// - optional: false +// +// {?a: ?b} +// - key: a +// - value: b +// - optional: true +func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional) +} + +// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded +// has() macro call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) { + sel := s.AsSelect() + astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName()) + macroExpr = opt.fac.NewCall(0, "has", + opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName())) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewSelect creates a select expression where a field value is selected from an operand. +// +// Example: +// +// msg.field_name +// - operand: msg +// - field: field_name +func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr { + return opt.fac.NewSelect(opt.nextID(), operand, field) +} + +// NewStruct creates a new typed struct value with an set of field initializations. +// +// Example: +// +// pkg.TypeName{field: value} +// - typeName: pkg.TypeName +// - fields: [{field: value}] +func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr { + return opt.fac.NewStruct(opt.nextID(), typeName, fields) +} + +// NewStructField creates a struct field initialization. +// +// Examples: +// +// {count: 3u} +// - field: count +// - value: 3u +// - optional: false +// +// {?count: x} +// - field: count +// - value: x +// - optional: true +func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewStructField(opt.nextID(), field, value, isOptional) +} + +// UpdateExpr updates the target expression with the updated content while preserving macro metadata. +// +// There are four scenarios during the update to consider: +// 1. target is not macro, updated is not macro +// 2. target is macro, updated is not macro +// 3. target is macro, updated is macro +// 4. target is not macro, updated is macro +// +// When the target is a macro already, it may either be updated to a new macro function +// body if the update is also a macro, or it may be removed altogether if the update is +// a macro. +// +// When the update is a macro, then the target references within other macros must be +// updated to point to the new updated macro. Otherwise, other macros which pointed to +// the target body must be replaced with copies of the updated expression body. +func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) { + // Update the expression + target.SetKindCase(updated) + + // Early return if there's no macros present sa the source info reflects the + // macro set from the target and updated expressions. + if len(opt.sourceInfo.MacroCalls()) == 0 { + return + } + // Determine whether the target expression was a macro. + _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID()) + + // Determine whether the updated expression was a macro. + updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID()) + + if updatedIsMacro { + // If the updated call was a macro, then updated id maps to target id, + // and the updated macro moves into the target id slot. + opt.sourceInfo.ClearMacroCall(updated.ID()) + opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro) + } else if targetIsMacro { + // Otherwise if the target expr was a macro, but is no longer, clear + // the macro reference. + opt.sourceInfo.ClearMacroCall(target.ID()) + } + + // Punch holes in the updated value where macros references exist. + macroExpr := opt.fac.CopyExpr(target) + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) + + // Update any references to the expression within a macro + macroVisitor := ast.NewExprVisitor(func(call ast.Expr) { + // Update the target expression to point to the macro expression which + // will be empty if the updated expression was a macro. + if call.ID() == target.ID() { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Update the macro call expression if it refers to the updated expression + // id which has since been remapped to the target id. + if call.ID() == updated.ID() { + // Either ensure the expression is a macro reference or a populated with + // the relevant sub-expression if the updated expr was not a macro. + if updatedIsMacro { + call.SetKindCase(nil) + } else { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Since SetKindCase does not renumber the id, ensure the references to + // the old 'updated' id are mapped to the target id. + call.RenumberIDs(func(id int64) int64 { + if id == updated.ID() { + return target.ID() + } + return id + }) + } + }) + for _, call := range opt.sourceInfo.MacroCalls() { + ast.PostOrderVisit(call, macroVisitor) + } +} + +func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) { + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) +} diff --git a/constraint/vendor/github.com/google/cel-go/cel/options.go b/constraint/vendor/github.com/google/cel-go/cel/options.go index 05867730d..69c694263 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/options.go +++ b/constraint/vendor/github.com/google/cel-go/cel/options.go @@ -61,6 +61,10 @@ const ( // compressing the logic graph to a single call when multiple like-operator // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) featureVariadicLogicalASTs + + // Enable error generation when a presence test or optional field selection is + // performed on a primitive type. + featureEnableErrorOnBadPresenceTest ) // EnvOption is a functional interface for configuring the environment. @@ -243,6 +247,13 @@ func Abbrevs(qualifiedNames ...string) EnvOption { } } +// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// custom types. It is a subset of methods from ref.TypeRegistry. +type customTypeRegistry interface { + RegisterDescriptor(protoreflect.FileDescriptor) error + RegisterType(...ref.Type) error +} + // Types adds one or more type declarations to the environment, allowing for construction of // type-literals whose definitions are included in the common expression built-in set. // @@ -255,12 +266,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - var reg ref.TypeRegistry - var isReg bool - reg, isReg = e.provider.(*types.Registry) - if !isReg { - reg, isReg = e.provider.(ref.TypeRegistry) - } + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -297,7 +303,7 @@ func Types(addTypes ...any) EnvOption { // extension or by re-using the same EnvOption with another NewEnv() call. func TypeDescs(descs ...any) EnvOption { return func(e *Env) (*Env, error) { - reg, isReg := e.provider.(ref.TypeRegistry) + reg, isReg := e.provider.(customTypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -345,7 +351,7 @@ func TypeDescs(descs ...any) EnvOption { } } -func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) error { +func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { files, err := protodesc.NewFiles(fileSet) if err != nil { return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) @@ -353,7 +359,7 @@ func registerFileSet(reg ref.TypeRegistry, fileSet *descpb.FileDescriptorSet) er return registerFiles(reg, files) } -func registerFiles(reg ref.TypeRegistry, files *protoregistry.Files) error { +func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { var err error files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { err = reg.RegisterDescriptor(fd) @@ -448,6 +454,8 @@ const ( OptTrackCost EvalOption = 1 << iota // OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality. + // + // Deprecated: use ext.StringsValidateFormatCalls() as this option is now a no-op. OptCheckStringFormat EvalOption = 1 << iota ) diff --git a/constraint/vendor/github.com/google/cel-go/cel/program.go b/constraint/vendor/github.com/google/cel-go/cel/program.go index 2dd72f750..6f477afc9 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/program.go +++ b/constraint/vendor/github.com/google/cel-go/cel/program.go @@ -19,7 +19,7 @@ import ( "fmt" "sync" - celast "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -152,7 +152,7 @@ func (p *prog) clone() *prog { // ProgramOption values. // // If the program cannot be configured the prog will be nil, with a non-nil error response. -func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { +func newProgram(e *Env, a *ast.AST, opts []ProgramOption) (Program, error) { // Build the dispatcher, interpreter, and default program value. disp := interpreter.NewDispatcher() @@ -188,10 +188,13 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Set the attribute factory after the options have been set. var attrFactory interpreter.AttributeFactory + attrFactorOpts := []interpreter.AttrFactoryOption{ + interpreter.EnableErrorOnBadPresenceTest(p.HasFeature(featureEnableErrorOnBadPresenceTest)), + } if p.evalOpts&OptPartialEval == OptPartialEval { - attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewPartialAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } else { - attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider) + attrFactory = interpreter.NewAttributeFactory(e.Container, e.adapter, e.provider, attrFactorOpts...) } interp := interpreter.NewInterpreter(disp, e.Container, e.provider, e.adapter, attrFactory) p.interpreter = interp @@ -213,34 +216,6 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { if len(p.regexOptimizations) > 0 { decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...)) } - // Enable compile-time checking of syntax/cardinality for string.format calls. - if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat { - var isValidType func(id int64, validTypes ...ref.Type) (bool, error) - if ast.IsChecked() { - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { - t := ast.typeMap[id] - if t.Kind() == DynKind { - return true, nil - } - for _, vt := range validTypes { - k, err := typeValueToKind(vt) - if err != nil { - return false, err - } - if t.Kind() == k { - return true, nil - } - } - return false, nil - } - } else { - // if the AST isn't type-checked, short-circuit validation - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { - return true, nil - } - } - decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType)) - } // Enable exhaustive eval, state tracking and cost tracking last since they require a factory. if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 { @@ -274,33 +249,16 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { decs = append(decs, interpreter.Observe(observers...)) } - return p.clone().initInterpretable(ast, decs) + return p.clone().initInterpretable(a, decs) } return newProgGen(factory) } - return p.initInterpretable(ast, decorators) + return p.initInterpretable(a, decorators) } -func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { - // Unchecked programs do not contain type and reference information and may be slower to execute. - if !ast.IsChecked() { - interpretable, err := - p.interpreter.NewUncheckedInterpretable(ast.Expr(), decs...) - if err != nil { - return nil, err - } - p.interpretable = interpretable - return p, nil - } - - // When the AST has been checked it contains metadata that can be used to speed up program execution. - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, - } - interpretable, err := p.interpreter.NewInterpretable(checked, decs...) +func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) { + // When the AST has been exprAST it contains metadata that can be used to speed up program execution. + interpretable, err := p.interpreter.NewInterpretable(a, decs...) if err != nil { return nil, err } @@ -580,8 +538,6 @@ func (p *evalActivationPool) Put(value any) { } var ( - emptyEvalState = interpreter.NewEvalState() - // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs activationPool = newEvalActivationPool() diff --git a/constraint/vendor/github.com/google/cel-go/cel/validator.go b/constraint/vendor/github.com/google/cel-go/cel/validator.go index 78b311381..b50c67452 100644 --- a/constraint/vendor/github.com/google/cel-go/cel/validator.go +++ b/constraint/vendor/github.com/google/cel-go/cel/validator.go @@ -21,8 +21,6 @@ import ( "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/overloads" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) const ( @@ -69,7 +67,7 @@ type ASTValidator interface { // // See individual validators for more information on their configuration keys and configuration // properties. - Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues) + Validate(*Env, ValidatorConfig, *ast.AST, *Issues) } // ValidatorConfig provides an accessor method for querying validator configuration state. @@ -180,7 +178,7 @@ func ValidateComprehensionNestingLimit(limit int) ASTValidator { return nestingLimitValidator{limit: limit} } -type argChecker func(env *Env, call, arg ast.NavigableExpr) error +type argChecker func(env *Env, call, arg ast.Expr) error func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { return formatValidator{ @@ -203,8 +201,8 @@ func (v formatValidator) Name() string { // Validate searches the AST for uses of a given function name with a constant argument and performs a check // on whether the argument is a valid literal value. -func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) +func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) for _, call := range funcCalls { callArgs := call.AsCall().Args() @@ -221,8 +219,8 @@ func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, } } -func evalCall(env *Env, call, arg ast.NavigableExpr) error { - ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()}) +func evalCall(env *Env, call, arg ast.Expr) error { + ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))} prg, err := env.Program(ast) if err != nil { return err @@ -231,7 +229,7 @@ func evalCall(env *Env, call, arg ast.NavigableExpr) error { return err } -func compileRegex(_ *Env, _, arg ast.NavigableExpr) error { +func compileRegex(_ *Env, _, arg ast.Expr) error { pattern := arg.AsLiteral().Value().(string) _, err := regexp.Compile(pattern) return err @@ -244,25 +242,14 @@ func (homogeneousAggregateLiteralValidator) Name() string { return homogeneousValidatorName } -// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard -// and exempt functions from homogeneous aggregate literal checks. -// -// TODO: Move this call into the string.format() ASTValidator once ported. -func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error { - emptyList := []string{} - exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string) - exemptFunctions = append(exemptFunctions, "format") - return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions) -} - // Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. // // This validator makes an exception for list and map literals which occur at any level of nesting within // string format calls. -func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) { +func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) { var exemptedFunctions []string exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) - root := ast.NavigateCheckedAST(a) + root := ast.NavigateAST(a) listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) for _, listExpr := range listExprs { if inExemptFunction(listExpr, exemptedFunctions) { @@ -273,7 +260,7 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig optIndices := l.OptionalIndices() var elemType *Type for i, e := range elements { - et := e.Type() + et := a.GetType(e.ID()) if isOptionalIndex(i, optIndices) { et = et.Parameters()[0] } @@ -296,9 +283,10 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig entries := m.Entries() var keyType, valType *Type for _, e := range entries { - key, val := e.Key(), e.Value() - kt, vt := key.Type(), val.Type() - if e.IsOptional() { + mapEntry := e.AsMapEntry() + key, val := mapEntry.Key(), mapEntry.Value() + kt, vt := a.GetType(key.ID()), a.GetType(val.ID()) + if mapEntry.IsOptional() { vt = vt.Parameters()[0] } if keyType == nil && valType == nil { @@ -316,7 +304,8 @@ func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig } func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { - if parent, found := e.Parent(); found { + parent, found := e.Parent() + for found { if parent.Kind() == ast.CallKind { fnName := parent.AsCall().FunctionName() for _, exempt := range exemptFunctions { @@ -325,9 +314,7 @@ func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { } } } - if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind { - return inExemptFunction(parent, exemptFunctions) - } + parent, found = parent.Parent() } return false } @@ -353,8 +340,8 @@ func (v nestingLimitValidator) Name() string { return "cel.lib.std.validate.comprehension_nesting_limit" } -func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) +func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) if len(comprehensions) <= v.limit { return diff --git a/constraint/vendor/github.com/google/cel-go/checker/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/checker/BUILD.bazel index 0459d3523..678b412a9 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -16,7 +16,6 @@ go_library( "options.go", "printer.go", "scopes.go", - "standard.go", "types.go", ], importpath = "github.com/google/cel-go/checker", @@ -60,7 +59,6 @@ go_test( "//test:go_default_library", "//test/proto2pb:go_default_library", "//test/proto3pb:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", ], ) diff --git a/constraint/vendor/github.com/google/cel-go/checker/checker.go b/constraint/vendor/github.com/google/cel-go/checker/checker.go index 720e4fa96..0603cfa30 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/checker.go +++ b/constraint/vendor/github.com/google/cel-go/checker/checker.go @@ -18,6 +18,7 @@ package checker import ( "fmt" + "reflect" "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" @@ -25,139 +26,98 @@ import ( "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types/ref" ) type checker struct { + *ast.AST + ast.ExprFactory env *Env errors *typeErrors mappings *mapping freeTypeVarCounter int - sourceInfo *exprpb.SourceInfo - types map[int64]*types.Type - references map[int64]*ast.ReferenceInfo } // Check performs type checking, giving a typed AST. -// The input is a ParsedExpr proto and an env which encapsulates -// type binding of variables, declarations of built-in functions, -// descriptions of protocol buffers, and a registry for errors. -// Returns a CheckedExpr proto, which might not be usable if -// there are errors in the error registry. -func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) { +// +// The input is a parsed AST and an env which encapsulates type binding of variables, +// declarations of built-in functions, descriptions of protocol buffers, and a registry for +// errors. +// +// Returns a type-checked AST, which might not be usable if there are errors in the error +// registry. +func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) + typeMap := make(map[int64]*types.Type) + refMap := make(map[int64]*ast.ReferenceInfo) c := checker{ + AST: ast.NewCheckedAST(parsed, typeMap, refMap), + ExprFactory: ast.NewExprFactory(), env: env, errors: &typeErrors{errs: errs}, mappings: newMapping(), freeTypeVarCounter: 0, - sourceInfo: parsedExpr.GetSourceInfo(), - types: make(map[int64]*types.Type), - references: make(map[int64]*ast.ReferenceInfo), } - c.check(parsedExpr.GetExpr()) + c.check(c.Expr()) - // Walk over the final type map substituting any type parameters either by their bound value or - // by DYN. - m := make(map[int64]*types.Type) - for id, t := range c.types { - m[id] = substitute(c.mappings, t, true) + // Walk over the final type map substituting any type parameters either by their bound value + // or by DYN. + for id, t := range c.TypeMap() { + c.SetType(id, substitute(c.mappings, t, true)) } - - return &ast.CheckedAST{ - Expr: parsedExpr.GetExpr(), - SourceInfo: parsedExpr.GetSourceInfo(), - TypeMap: m, - ReferenceMap: c.references, - }, errs + return c.AST, errs } -func (c *checker) check(e *exprpb.Expr) { +func (c *checker) check(e ast.Expr) { if e == nil { return } - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - c.checkBoolLiteral(e) - case *exprpb.Constant_BytesValue: - c.checkBytesLiteral(e) - case *exprpb.Constant_DoubleValue: - c.checkDoubleLiteral(e) - case *exprpb.Constant_Int64Value: - c.checkInt64Literal(e) - case *exprpb.Constant_NullValue: - c.checkNullLiteral(e) - case *exprpb.Constant_StringValue: - c.checkStringLiteral(e) - case *exprpb.Constant_Uint64Value: - c.checkUint64Literal(e) + switch e.Kind() { + case ast.LiteralKind: + literal := ref.Val(e.AsLiteral()) + switch literal.Type() { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + c.setType(e, literal.Type().(*types.Type)) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName()) } - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: c.checkIdent(e) - case *exprpb.Expr_SelectExpr: + case ast.SelectKind: c.checkSelect(e) - case *exprpb.Expr_CallExpr: + case ast.CallKind: c.checkCall(e) - case *exprpb.Expr_ListExpr: + case ast.ListKind: c.checkCreateList(e) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + c.checkCreateMap(e) + case ast.StructKind: c.checkCreateStruct(e) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: c.checkComprehension(e) default: - c.errors.unexpectedASTType(e.GetId(), c.location(e), e) + c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name()) } } -func (c *checker) checkInt64Literal(e *exprpb.Expr) { - c.setType(e, types.IntType) -} - -func (c *checker) checkUint64Literal(e *exprpb.Expr) { - c.setType(e, types.UintType) -} - -func (c *checker) checkStringLiteral(e *exprpb.Expr) { - c.setType(e, types.StringType) -} - -func (c *checker) checkBytesLiteral(e *exprpb.Expr) { - c.setType(e, types.BytesType) -} - -func (c *checker) checkDoubleLiteral(e *exprpb.Expr) { - c.setType(e, types.DoubleType) -} - -func (c *checker) checkBoolLiteral(e *exprpb.Expr) { - c.setType(e, types.BoolType) -} - -func (c *checker) checkNullLiteral(e *exprpb.Expr) { - c.setType(e, types.NullType) -} - -func (c *checker) checkIdent(e *exprpb.Expr) { - identExpr := e.GetIdentExpr() +func (c *checker) checkIdent(e ast.Expr) { + identName := e.AsIdent() // Check to see if the identifier is declared. - if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil { + if ident := c.env.LookupIdent(identName); ident != nil { c.setType(e, ident.Type()) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) // Overwrite the identifier with its fully qualified name. - identExpr.Name = ident.Name() + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) return } c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName()) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName) } -func (c *checker) checkSelect(e *exprpb.Expr) { - sel := e.GetSelectExpr() +func (c *checker) checkSelect(e ast.Expr) { + sel := e.AsSelect() // Before traversing down the tree, try to interpret as qualified name. qname, found := containers.ToQualifiedName(e) if found { @@ -170,31 +130,26 @@ func (c *checker) checkSelect(e *exprpb.Expr) { // variable name. c.setType(e, ident.Type()) c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) - identName := ident.Name() - e.ExprKind = &exprpb.Expr_IdentExpr{ - IdentExpr: &exprpb.Expr_Ident{ - Name: identName, - }, - } + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) return } } - resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false) - if sel.TestOnly { + resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false) + if sel.IsTestOnly() { resultType = types.BoolType } c.setType(e, substitute(c.mappings, resultType, false)) } -func (c *checker) checkOptSelect(e *exprpb.Expr) { +func (c *checker) checkOptSelect(e ast.Expr) { // Collect metadata related to the opt select call packaged by the parser. - call := e.GetCallExpr() - operand := call.GetArgs()[0] - field := call.GetArgs()[1] + call := e.AsCall() + operand := call.Args()[0] + field := call.Args()[1] fieldName, isString := maybeUnwrapString(field) if !isString { - c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field) + c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field) return } @@ -204,7 +159,7 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) { c.setReference(e, ast.NewFunctionReference("select_optional_field")) } -func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type { +func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type { // Interpret as field selection, first traversing down the operand. c.check(operand) operandType := substitute(c.mappings, c.getType(operand), false) @@ -222,7 +177,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option // Objects yield their field type declaration as the selection result type, but only if // the field is defined. messageType := targetType - if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found { + if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found { resultType = fieldType } case types.TypeParamKind: @@ -236,7 +191,7 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option // Dynamic / error values are treated as DYN type. Errors are handled this way as well // in order to allow forward progress on the check. if !isDynOrError(targetType) { - c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType) + c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType) } resultType = types.DynType } @@ -248,35 +203,34 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option return resultType } -func (c *checker) checkCall(e *exprpb.Expr) { +func (c *checker) checkCall(e ast.Expr) { // Note: similar logic exists within the `interpreter/planner.go`. If making changes here // please consider the impact on planner.go and consolidate implementations or mirror code // as appropriate. - call := e.GetCallExpr() - fnName := call.GetFunction() + call := e.AsCall() + fnName := call.FunctionName() if fnName == operators.OptSelect { c.checkOptSelect(e) return } - args := call.GetArgs() + args := call.Args() // Traverse arguments. for _, arg := range args { c.check(arg) } - target := call.GetTarget() // Regular static call with simple name. - if target == nil { + if !call.IsMemberFunction() { // Check for the existence of the function. fn := c.env.LookupFunction(fnName) if fn == nil { - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) c.setType(e, types.ErrorType) return } // Overwrite the function name with its fully qualified resolved name. - call.Function = fn.Name() + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) // Check to see whether the overload resolves. c.resolveOverloadOrError(e, fn, nil, args) return @@ -287,6 +241,7 @@ func (c *checker) checkCall(e *exprpb.Expr) { // target a.b. // // Check whether the target is a namespaced function name. + target := call.Target() qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target) if maybeQualified { maybeQualifiedName := qualifiedPrefix + "." + fnName @@ -295,15 +250,14 @@ func (c *checker) checkCall(e *exprpb.Expr) { // The function name is namespaced and so preserving the target operand would // be an inaccurate representation of the desired evaluation behavior. // Overwrite with fully-qualified resolved function name sans receiver target. - call.Target = nil - call.Function = fn.Name() + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) c.resolveOverloadOrError(e, fn, nil, args) return } } // Regular instance call. - c.check(call.Target) + c.check(target) fn := c.env.LookupFunction(fnName) // Function found, attempt overload resolution. if fn != nil { @@ -312,11 +266,11 @@ func (c *checker) checkCall(e *exprpb.Expr) { } // Function name not declared, record error. c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) } func (c *checker) resolveOverloadOrError( - e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) { + e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) { // Attempt to resolve the overload. resolution := c.resolveOverload(e, fn, target, args) // No such overload, error noted in the resolveOverload call, type recorded here. @@ -330,7 +284,7 @@ func (c *checker) resolveOverloadOrError( } func (c *checker) resolveOverload( - call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { + call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution { var argTypes []*types.Type if target != nil { @@ -362,8 +316,8 @@ func (c *checker) resolveOverload( for i, argType := range argTypes { if !c.isAssignable(argType, types.BoolType) { c.errors.typeMismatch( - args[i].GetId(), - c.locationByID(args[i].GetId()), + args[i].ID(), + c.locationByID(args[i].ID()), types.BoolType, argType) resultType = types.ErrorType @@ -408,29 +362,29 @@ func (c *checker) resolveOverload( for i, argType := range argTypes { argTypes[i] = substitute(c.mappings, argType, true) } - c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil) + c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil) return nil } return newResolution(checkedRef, resultType) } -func (c *checker) checkCreateList(e *exprpb.Expr) { - create := e.GetListExpr() +func (c *checker) checkCreateList(e ast.Expr) { + create := e.AsList() var elemsType *types.Type - optionalIndices := create.GetOptionalIndices() + optionalIndices := create.OptionalIndices() optionals := make(map[int32]bool, len(optionalIndices)) for _, optInd := range optionalIndices { optionals[optInd] = true } - for i, e := range create.GetElements() { + for i, e := range create.Elements() { c.check(e) elemType := c.getType(e) if optionals[int32(i)] { var isOptional bool elemType, isOptional = maybeUnwrapOptional(elemType) if !isOptional && !isDyn(elemType) { - c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType) + c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType) } } elemsType = c.joinTypes(e, elemsType, elemType) @@ -442,32 +396,24 @@ func (c *checker) checkCreateList(e *exprpb.Expr) { c.setType(e, types.NewListType(elemsType)) } -func (c *checker) checkCreateStruct(e *exprpb.Expr) { - str := e.GetStructExpr() - if str.GetMessageName() != "" { - c.checkCreateMessage(e) - } else { - c.checkCreateMap(e) - } -} - -func (c *checker) checkCreateMap(e *exprpb.Expr) { - mapVal := e.GetStructExpr() +func (c *checker) checkCreateMap(e ast.Expr) { + mapVal := e.AsMap() var mapKeyType *types.Type var mapValueType *types.Type - for _, ent := range mapVal.GetEntries() { - key := ent.GetMapKey() + for _, e := range mapVal.Entries() { + entry := e.AsMapEntry() + key := entry.Key() c.check(key) mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) - val := ent.GetValue() + val := entry.Value() c.check(val) valType := c.getType(val) - if ent.GetOptionalEntry() { + if entry.IsOptional() { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType) } } mapValueType = c.joinTypes(val, mapValueType, valType) @@ -480,25 +426,28 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) { c.setType(e, types.NewMapType(mapKeyType, mapValueType)) } -func (c *checker) checkCreateMessage(e *exprpb.Expr) { - msgVal := e.GetStructExpr() +func (c *checker) checkCreateStruct(e ast.Expr) { + msgVal := e.AsStruct() // Determine the type of the message. resultType := types.ErrorType - ident := c.env.LookupIdent(msgVal.GetMessageName()) + ident := c.env.LookupIdent(msgVal.TypeName()) if ident == nil { c.errors.undeclaredReference( - e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName()) + e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName()) c.setType(e, types.ErrorType) return } // Ensure the type name is fully qualified in the AST. typeName := ident.Name() - msgVal.MessageName = typeName - c.setReference(e, ast.NewIdentReference(ident.Name(), nil)) + if msgVal.TypeName() != typeName { + e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields())) + msgVal = e.AsStruct() + } + c.setReference(e, ast.NewIdentReference(typeName, nil)) identKind := ident.Type().Kind() if identKind != types.ErrorKind { if identKind != types.TypeKind { - c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName()) + c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName()) } else { resultType = ident.Type().Parameters()[0] // Backwards compatibility test between well-known types and message types @@ -509,7 +458,7 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { } else if resultType.Kind() == types.StructKind { typeName = resultType.DeclaredTypeName() } else { - c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName()) + c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName()) resultType = types.ErrorType } } @@ -517,45 +466,62 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { c.setType(e, resultType) // Check the field initializers. - for _, ent := range msgVal.GetEntries() { - field := ent.GetFieldKey() - value := ent.GetValue() + for _, f := range msgVal.Fields() { + field := f.AsStructField() + fieldName := field.Name() + value := field.Value() c.check(value) fieldType := types.ErrorType - ft, found := c.lookupFieldType(ent.GetId(), typeName, field) + ft, found := c.lookupFieldType(f.ID(), typeName, fieldName) if found { fieldType = ft } valType := c.getType(value) - if ent.GetOptionalEntry() { + if field.IsOptional() { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType) } } if !c.isAssignable(fieldType, valType) { - c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType) + c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType) } } } -func (c *checker) checkComprehension(e *exprpb.Expr) { - comp := e.GetComprehensionExpr() - c.check(comp.GetIterRange()) - c.check(comp.GetAccuInit()) - accuType := c.getType(comp.GetAccuInit()) - rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false) - var varType *types.Type +func (c *checker) checkComprehension(e ast.Expr) { + comp := e.AsComprehension() + c.check(comp.IterRange()) + c.check(comp.AccuInit()) + rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) + // Create a scope for the comprehension since it has a local accumulation variable. + // This scope will contain the accumulation variable used to compute the result. + accuType := c.getType(comp.AccuInit()) + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) + + var varType, var2Type *types.Type switch rangeType.Kind() { case types.ListKind: + // varType represents the list element type for one-variable comprehensions. varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // varType represents the list index (int) for two-variable comprehensions, + // and var2Type represents the list element type. + var2Type = varType + varType = types.IntType + } case types.MapKind: - // Ranges over the keys. + // varType represents the map entry key for all comprehension types. varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // var2Type represents the map entry value for two-variable comprehensions. + var2Type = rangeType.Parameters()[1] + } case types.DynKind, types.ErrorKind, types.TypeParamKind: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type @@ -564,32 +530,31 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { // Set the range iteration variable to type DYN as well. varType = types.DynType default: - c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType) + c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) varType = types.ErrorType } - // Create a scope for the comprehension since it has a local accumulation variable. - // This scope will contain the accumulation variable used to compute the result. - c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType)) // Create a block scope for the loop. c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType)) + c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) + if comp.HasIterVar2() { + c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type)) + } // Check the variable references in the condition and step. - c.check(comp.GetLoopCondition()) - c.assertType(comp.GetLoopCondition(), types.BoolType) - c.check(comp.GetLoopStep()) - c.assertType(comp.GetLoopStep(), accuType) + c.check(comp.LoopCondition()) + c.assertType(comp.LoopCondition(), types.BoolType) + c.check(comp.LoopStep()) + c.assertType(comp.LoopStep(), accuType) // Exit the loop's block scope before checking the result. c.env = c.env.exitScope() - c.check(comp.GetResult()) + c.check(comp.Result()) // Exit the comprehension scope. c.env = c.env.exitScope() - c.setType(e, substitute(c.mappings, c.getType(comp.GetResult()), false)) + c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false)) } // Checks compatibility of joined types, and returns the most general common type. -func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type { +func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type { if previous == nil { return current } @@ -599,7 +564,7 @@ func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *type if c.dynAggregateLiteralElementTypesEnabled() { return types.DynType } - c.errors.typeMismatch(e.GetId(), c.location(e), previous, current) + c.errors.typeMismatch(e.ID(), c.location(e), previous, current) return types.ErrorType } @@ -633,41 +598,41 @@ func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { return false } -func maybeUnwrapString(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: - return literal.GetStringValue(), true +func maybeUnwrapString(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.LiteralKind: + literal := e.AsLiteral() + switch v := literal.(type) { + case types.String: + return string(v), true } } return "", false } -func (c *checker) setType(e *exprpb.Expr, t *types.Type) { - if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) { - c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t) +func (c *checker) setType(e ast.Expr, t *types.Type) { + if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) { + c.errors.incompatibleType(e.ID(), c.location(e), e, old, t) return } - c.types[e.GetId()] = t + c.SetType(e.ID(), t) } -func (c *checker) getType(e *exprpb.Expr) *types.Type { - return c.types[e.GetId()] +func (c *checker) getType(e ast.Expr) *types.Type { + return c.TypeMap()[e.ID()] } -func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) { - if old, found := c.references[e.GetId()]; found && !old.Equals(r) { - c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r) +func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) { + if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) { + c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r) return } - c.references[e.GetId()] = r + c.SetReference(e.ID(), r) } -func (c *checker) assertType(e *exprpb.Expr, t *types.Type) { +func (c *checker) assertType(e ast.Expr, t *types.Type) { if !c.isAssignable(t, c.getType(e)) { - c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e)) + c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e)) } } @@ -683,26 +648,12 @@ func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { } } -func (c *checker) location(e *exprpb.Expr) common.Location { - return c.locationByID(e.GetId()) +func (c *checker) location(e ast.Expr) common.Location { + return c.locationByID(e.ID()) } func (c *checker) locationByID(id int64) common.Location { - positions := c.sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range c.sourceInfo.GetLineOffsets() { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation + return c.SourceInfo().GetStartLocation(id) } func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { diff --git a/constraint/vendor/github.com/google/cel-go/checker/cost.go b/constraint/vendor/github.com/google/cel-go/checker/cost.go index fd3f73505..04244694d 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/cost.go +++ b/constraint/vendor/github.com/google/cel-go/checker/cost.go @@ -22,8 +22,6 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go @@ -58,7 +56,7 @@ type AstNode interface { // Type returns the deduced type of the AstNode. Type() *types.Type // Expr returns the expression of the AstNode. - Expr() *exprpb.Expr + Expr() ast.Expr // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no @@ -69,7 +67,7 @@ type AstNode interface { type astNode struct { path []string t *types.Type - expr *exprpb.Expr + expr ast.Expr derivedSize *SizeEstimate } @@ -81,7 +79,7 @@ func (e astNode) Type() *types.Type { return e.t } -func (e astNode) Expr() *exprpb.Expr { +func (e astNode) Expr() ast.Expr { return e.expr } @@ -90,29 +88,27 @@ func (e astNode) ComputedSize() *SizeEstimate { return e.derivedSize } var v uint64 - switch ek := e.expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - switch ck := ek.ConstExpr.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: + switch e.expr.Kind() { + case ast.LiteralKind: + switch ck := e.expr.AsLiteral().(type) { + case types.String: // converting to runes here is an O(n) operation, but // this is consistent with how size is computed at runtime, // and how the language definition defines string size - v = uint64(len([]rune(ck.StringValue))) - case *exprpb.Constant_BytesValue: - v = uint64(len(ck.BytesValue)) - case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue, - *exprpb.Constant_Int64Value, *exprpb.Constant_TimestampValue, *exprpb.Constant_Uint64Value, - *exprpb.Constant_NullValue: + v = uint64(len([]rune(ck))) + case types.Bytes: + v = uint64(len(ck)) + case types.Bool, types.Double, types.Duration, + types.Int, types.Timestamp, types.Uint, + types.Null: v = uint64(1) default: return nil } - case *exprpb.Expr_ListExpr: - v = uint64(len(ek.ListExpr.GetElements())) - case *exprpb.Expr_StructExpr: - if ek.StructExpr.GetMessageName() == "" { - v = uint64(len(ek.StructExpr.GetEntries())) - } + case ast.ListKind: + v = uint64(e.expr.AsList().Size()) + case ast.MapKind: + v = uint64(e.expr.AsMap().Size()) default: return nil } @@ -265,7 +261,7 @@ type coster struct { iterRanges iterRangeScopes // computedSizes tracks the computed sizes of call results. computedSizes map[int64]SizeEstimate - checkedAST *ast.CheckedAST + checkedAST *ast.AST estimator CostEstimator overloadEstimators map[string]FunctionEstimator // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. @@ -275,8 +271,8 @@ type coster struct { // Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. type iterRangeScopes map[string][]int64 -func (vs iterRangeScopes) push(varName string, expr *exprpb.Expr) { - vs[varName] = append(vs[varName], expr.GetId()) +func (vs iterRangeScopes) push(varName string, expr ast.Expr) { + vs[varName] = append(vs[varName], expr.ID()) } func (vs iterRangeScopes) pop(varName string) { @@ -324,9 +320,9 @@ func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) C } // Cost estimates the cost of the parsed and type checked CEL expression. -func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { +func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { c := &coster{ - checkedAST: checker, + checkedAST: checked, estimator: estimator, overloadEstimators: map[string]FunctionEstimator{}, exprPath: map[int64][]string{}, @@ -340,28 +336,30 @@ func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) return CostEstimate{}, err } } - return c.cost(checker.Expr), nil + return c.cost(checked.Expr()), nil } -func (c *coster) cost(e *exprpb.Expr) CostEstimate { +func (c *coster) cost(e ast.Expr) CostEstimate { if e == nil { return CostEstimate{} } var cost CostEstimate - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: + switch e.Kind() { + case ast.LiteralKind: cost = constCost - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: cost = c.costIdent(e) - case *exprpb.Expr_SelectExpr: + case ast.SelectKind: cost = c.costSelect(e) - case *exprpb.Expr_CallExpr: + case ast.CallKind: cost = c.costCall(e) - case *exprpb.Expr_ListExpr: + case ast.ListKind: cost = c.costCreateList(e) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + cost = c.costCreateMap(e) + case ast.StructKind: cost = c.costCreateStruct(e) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: cost = c.costComprehension(e) default: return CostEstimate{} @@ -369,53 +367,51 @@ func (c *coster) cost(e *exprpb.Expr) CostEstimate { return cost } -func (c *coster) costIdent(e *exprpb.Expr) CostEstimate { - identExpr := e.GetIdentExpr() - +func (c *coster) costIdent(e ast.Expr) CostEstimate { + identName := e.AsIdent() // build and track the field path - if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok { - switch c.checkedAST.TypeMap[iterRange].Kind() { + if iterRange, ok := c.iterRanges.peek(identName); ok { + switch c.checkedAST.GetType(iterRange).Kind() { case types.ListKind: c.addPath(e, append(c.exprPath[iterRange], "@items")) case types.MapKind: c.addPath(e, append(c.exprPath[iterRange], "@keys")) } } else { - c.addPath(e, []string{identExpr.GetName()}) + c.addPath(e, []string{identName}) } return selectAndIdentCost } -func (c *coster) costSelect(e *exprpb.Expr) CostEstimate { - sel := e.GetSelectExpr() +func (c *coster) costSelect(e ast.Expr) CostEstimate { + sel := e.AsSelect() var sum CostEstimate - if sel.GetTestOnly() { + if sel.IsTestOnly() { // recurse, but do not add any cost // this is equivalent to how evalTestOnly increments the runtime cost counter // but does not add any additional cost for the qualifier, except here we do // the reverse (ident adds cost) sum = sum.Add(c.presenceTestCost) - sum = sum.Add(c.cost(sel.GetOperand())) + sum = sum.Add(c.cost(sel.Operand())) return sum } - sum = sum.Add(c.cost(sel.GetOperand())) - targetType := c.getType(sel.GetOperand()) + sum = sum.Add(c.cost(sel.Operand())) + targetType := c.getType(sel.Operand()) switch targetType.Kind() { case types.MapKind, types.StructKind, types.TypeParamKind: sum = sum.Add(selectAndIdentCost) } // build and track the field path - c.addPath(e, append(c.getPath(sel.GetOperand()), sel.GetField())) + c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) return sum } -func (c *coster) costCall(e *exprpb.Expr) CostEstimate { - call := e.GetCallExpr() - target := call.GetTarget() - args := call.GetArgs() +func (c *coster) costCall(e ast.Expr) CostEstimate { + call := e.AsCall() + args := call.Args() var sum CostEstimate @@ -426,22 +422,20 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { argTypes[i] = c.newAstNode(arg) } - ref := c.checkedAST.ReferenceMap[e.GetId()] - if ref == nil || len(ref.OverloadIDs) == 0 { + overloadIDs := c.checkedAST.GetOverloadIDs(e.ID()) + if len(overloadIDs) == 0 { return CostEstimate{} } var targetType AstNode - if target != nil { - if call.Target != nil { - sum = sum.Add(c.cost(call.GetTarget())) - targetType = c.newAstNode(call.GetTarget()) - } + if call.IsMemberFunction() { + sum = sum.Add(c.cost(call.Target())) + targetType = c.newAstNode(call.Target()) } // Pick a cost estimate range that covers all the overload cost estimation ranges fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate - for _, overload := range ref.OverloadIDs { - overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts) + for _, overload := range overloadIDs { + overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { if resultSize == nil { @@ -464,64 +458,56 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { } } if resultSize != nil { - c.computedSizes[e.GetId()] = *resultSize + c.computedSizes[e.ID()] = *resultSize } return sum.Add(fnCost) } -func (c *coster) costCreateList(e *exprpb.Expr) CostEstimate { - create := e.GetListExpr() +func (c *coster) costCreateList(e ast.Expr) CostEstimate { + create := e.AsList() var sum CostEstimate - for _, e := range create.GetElements() { + for _, e := range create.Elements() { sum = sum.Add(c.cost(e)) } return sum.Add(createListBaseCost) } -func (c *coster) costCreateStruct(e *exprpb.Expr) CostEstimate { - str := e.GetStructExpr() - if str.MessageName != "" { - return c.costCreateMessage(e) - } - return c.costCreateMap(e) -} - -func (c *coster) costCreateMap(e *exprpb.Expr) CostEstimate { - mapVal := e.GetStructExpr() +func (c *coster) costCreateMap(e ast.Expr) CostEstimate { + mapVal := e.AsMap() var sum CostEstimate - for _, ent := range mapVal.GetEntries() { - key := ent.GetMapKey() - sum = sum.Add(c.cost(key)) - - sum = sum.Add(c.cost(ent.GetValue())) + for _, ent := range mapVal.Entries() { + entry := ent.AsMapEntry() + sum = sum.Add(c.cost(entry.Key())) + sum = sum.Add(c.cost(entry.Value())) } return sum.Add(createMapBaseCost) } -func (c *coster) costCreateMessage(e *exprpb.Expr) CostEstimate { - msgVal := e.GetStructExpr() +func (c *coster) costCreateStruct(e ast.Expr) CostEstimate { + msgVal := e.AsStruct() var sum CostEstimate - for _, ent := range msgVal.GetEntries() { - sum = sum.Add(c.cost(ent.GetValue())) + for _, ent := range msgVal.Fields() { + field := ent.AsStructField() + sum = sum.Add(c.cost(field.Value())) } return sum.Add(createMessageBaseCost) } -func (c *coster) costComprehension(e *exprpb.Expr) CostEstimate { - comp := e.GetComprehensionExpr() +func (c *coster) costComprehension(e ast.Expr) CostEstimate { + comp := e.AsComprehension() var sum CostEstimate - sum = sum.Add(c.cost(comp.GetIterRange())) - sum = sum.Add(c.cost(comp.GetAccuInit())) + sum = sum.Add(c.cost(comp.IterRange())) + sum = sum.Add(c.cost(comp.AccuInit())) // Track the iterRange of each IterVar for field path construction - c.iterRanges.push(comp.GetIterVar(), comp.GetIterRange()) - loopCost := c.cost(comp.GetLoopCondition()) - stepCost := c.cost(comp.GetLoopStep()) - c.iterRanges.pop(comp.GetIterVar()) - sum = sum.Add(c.cost(comp.Result)) - rangeCnt := c.sizeEstimate(c.newAstNode(comp.GetIterRange())) + c.iterRanges.push(comp.IterVar(), comp.IterRange()) + loopCost := c.cost(comp.LoopCondition()) + stepCost := c.cost(comp.LoopStep()) + c.iterRanges.pop(comp.IterVar()) + sum = sum.Add(c.cost(comp.Result())) + rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) - c.computedSizes[e.GetId()] = rangeCnt + c.computedSizes[e.ID()] = rangeCnt rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) sum = sum.Add(rangeCost) @@ -674,26 +660,26 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} } -func (c *coster) getType(e *exprpb.Expr) *types.Type { - return c.checkedAST.TypeMap[e.GetId()] +func (c *coster) getType(e ast.Expr) *types.Type { + return c.checkedAST.GetType(e.ID()) } -func (c *coster) getPath(e *exprpb.Expr) []string { - return c.exprPath[e.GetId()] +func (c *coster) getPath(e ast.Expr) []string { + return c.exprPath[e.ID()] } -func (c *coster) addPath(e *exprpb.Expr, path []string) { - c.exprPath[e.GetId()] = path +func (c *coster) addPath(e ast.Expr, path []string) { + c.exprPath[e.ID()] = path } -func (c *coster) newAstNode(e *exprpb.Expr) *astNode { +func (c *coster) newAstNode(e ast.Expr) *astNode { path := c.getPath(e) if len(path) > 0 && path[0] == parser.AccumulatorName { // only provide paths to root vars; omit accumulator vars path = nil } var derivedSize *SizeEstimate - if size, ok := c.computedSizes[e.GetId()]; ok { + if size, ok := c.computedSizes[e.ID()]; ok { derivedSize = &size } return &astNode{ diff --git a/constraint/vendor/github.com/google/cel-go/checker/decls/decls.go b/constraint/vendor/github.com/google/cel-go/checker/decls/decls.go index 0d91bef51..c0e5de469 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/decls/decls.go +++ b/constraint/vendor/github.com/google/cel-go/checker/decls/decls.go @@ -67,7 +67,7 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type { // NewOptionalType constructs an abstract type indicating that the parameterized type // may be contained within the object. func NewOptionalType(paramType *exprpb.Type) *exprpb.Type { - return NewAbstractType("optional", paramType) + return NewAbstractType("optional_type", paramType) } // NewFunctionType creates a function invocation contract, typically only used diff --git a/constraint/vendor/github.com/google/cel-go/checker/env.go b/constraint/vendor/github.com/google/cel-go/checker/env.go index 70682b17c..d5ac05014 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/env.go +++ b/constraint/vendor/github.com/google/cel-go/checker/env.go @@ -146,6 +146,14 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl { return decl } + if i, found := e.provider.FindIdent(candidate); found { + if t, ok := i.(*types.Type); ok { + decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) + e.declarations.AddIdent(decl) + return decl + } + } + // Next try to import this as an enum value by splitting the name in a type prefix and // the enum inside. if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { diff --git a/constraint/vendor/github.com/google/cel-go/checker/errors.go b/constraint/vendor/github.com/google/cel-go/checker/errors.go index c2b96498d..8b3bf0b8b 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/errors.go +++ b/constraint/vendor/github.com/google/cel-go/checker/errors.go @@ -15,13 +15,9 @@ package checker import ( - "reflect" - "github.com/google/cel-go/common" "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // typeErrors is a specialization of Errors. @@ -34,9 +30,9 @@ func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, name, FormatCELType(field), FormatCELType(value)) } -func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) { +func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) { e.errs.ReportErrorAtID(id, l, - "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) + "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) } func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { @@ -49,7 +45,7 @@ func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *type FormatCELType(t)) } -func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) { +func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) { e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) } @@ -61,9 +57,9 @@ func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName strin e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) } -func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) { +func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) { e.errs.ReportErrorAtID(id, l, - "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) + "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) } func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { @@ -87,6 +83,6 @@ func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typ e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) } -func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) { - e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex)) +func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName) } diff --git a/constraint/vendor/github.com/google/cel-go/checker/printer.go b/constraint/vendor/github.com/google/cel-go/checker/printer.go index 15cba06ee..7a3984f02 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/printer.go +++ b/constraint/vendor/github.com/google/cel-go/checker/printer.go @@ -17,40 +17,40 @@ package checker import ( "sort" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/debug" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) type semanticAdorner struct { - checks *exprpb.CheckedExpr + checked *ast.AST } var _ debug.Adorner = &semanticAdorner{} func (a *semanticAdorner) GetMetadata(elem any) string { result := "" - e, isExpr := elem.(*exprpb.Expr) + e, isExpr := elem.(ast.Expr) if !isExpr { return result } - t := a.checks.TypeMap[e.GetId()] + t := a.checked.TypeMap()[e.ID()] if t != nil { result += "~" - result += FormatCheckedType(t) + result += FormatCELType(t) } - switch e.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr, - *exprpb.Expr_CallExpr, - *exprpb.Expr_StructExpr, - *exprpb.Expr_SelectExpr: - if ref, found := a.checks.ReferenceMap[e.GetId()]; found { - if len(ref.GetOverloadId()) == 0 { + switch e.Kind() { + case ast.IdentKind, + ast.CallKind, + ast.ListKind, + ast.StructKind, + ast.SelectKind: + if ref, found := a.checked.ReferenceMap()[e.ID()]; found { + if len(ref.OverloadIDs) == 0 { result += "^" + ref.Name } else { - sort.Strings(ref.GetOverloadId()) - for i, overload := range ref.GetOverloadId() { + sort.Strings(ref.OverloadIDs) + for i, overload := range ref.OverloadIDs { if i == 0 { result += "^" } else { @@ -68,7 +68,7 @@ func (a *semanticAdorner) GetMetadata(elem any) string { // Print returns a string representation of the Expr message, // annotated with types from the CheckedExpr. The Expr must // be a sub-expression embedded in the CheckedExpr. -func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string { - a := &semanticAdorner{checks: checks} +func Print(e ast.Expr, checked *ast.AST) string { + a := &semanticAdorner{checked: checked} return debug.ToAdornedDebugString(e, a) } diff --git a/constraint/vendor/github.com/google/cel-go/checker/standard.go b/constraint/vendor/github.com/google/cel-go/checker/standard.go deleted file mode 100644 index 11b35b80e..000000000 --- a/constraint/vendor/github.com/google/cel-go/checker/standard.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "github.com/google/cel-go/common/stdlib" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// StandardFunctions returns the Decls for all functions in the evaluator. -// -// Deprecated: prefer stdlib.FunctionExprDecls() -func StandardFunctions() []*exprpb.Decl { - return stdlib.FunctionExprDecls() -} - -// StandardTypes returns the set of type identifiers for standard library types. -// -// Deprecated: prefer stdlib.TypeExprDecls() -func StandardTypes() []*exprpb.Decl { - return stdlib.TypeExprDecls() -} diff --git a/constraint/vendor/github.com/google/cel-go/checker/types.go b/constraint/vendor/github.com/google/cel-go/checker/types.go index e2373d1b7..4c65b2737 100644 --- a/constraint/vendor/github.com/google/cel-go/checker/types.go +++ b/constraint/vendor/github.com/google/cel-go/checker/types.go @@ -41,7 +41,7 @@ func isError(t *types.Type) bool { func isOptional(t *types.Type) bool { if t.Kind() == types.OpaqueKind { - return t.TypeName() == "optional" + return t.TypeName() == "optional_type" } return false } @@ -137,7 +137,11 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, types.AnyKind, types.DurationKind, types.TimestampKind, types.StructKind: - return t1.IsAssignableType(t2) + // Test whether t2 is assignable from t1. The order of this check won't usually matter; + // however, there may be cases where type capabilities are expanded beyond what is supported + // in the current common/types package. For example, an interface designation for a group of + // Struct types. + return t2.IsAssignableType(t1) case types.TypeKind: return kind2 == types.TypeKind case types.OpaqueKind, types.ListKind, types.MapKind: @@ -256,7 +260,7 @@ func notReferencedIn(m *mapping, t, withinType *types.Type) bool { return true } return notReferencedIn(m, t, wtSub) - case types.OpaqueKind, types.ListKind, types.MapKind: + case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind: for _, pt := range withinType.Parameters() { if !notReferencedIn(m, t, pt) { return false @@ -288,7 +292,8 @@ func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { substitute(m, t.Parameters()[1], typeParamToDyn)) case types.TypeKind: if len(t.Parameters()) > 0 { - return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn)) + tParam := t.Parameters()[0] + return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn)) } return t default: diff --git a/constraint/vendor/github.com/google/cel-go/common/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/BUILD.bazel index d6165b13a..eef7f281b 100644 --- a/constraint/vendor/github.com/google/cel-go/common/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -18,7 +18,6 @@ go_library( deps = [ "//common/runes:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_x_text//width:go_default_library", ], ) diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/ast/BUILD.bazel index 7269cdff5..9824f57a9 100644 --- a/constraint/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -1,12 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package( - default_visibility = [ - "//cel:__subpackages__", - "//checker:__subpackages__", - "//common:__subpackages__", - "//interpreter:__subpackages__", - ], + default_visibility = ["//visibility:public"], licenses = ["notice"], # Apache 2.0 ) @@ -14,13 +9,19 @@ go_library( name = "go_default_library", srcs = [ "ast.go", + "conversion.go", "expr.go", + "factory.go", + "navigable.go", ], importpath = "github.com/google/cel-go/common/ast", - deps = [ + deps = [ + "//common:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", ], ) @@ -29,17 +30,20 @@ go_test( name = "go_default_test", srcs = [ "ast_test.go", + "conversion_test.go", "expr_test.go", + "navigable_test.go", ], embed = [ ":go_default_library", ], - deps = [ + deps = [ "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", "//common/containers:go_default_library", "//common/decls:go_default_library", + "//common/operators:go_default_library", "//common/overloads:go_default_library", "//common/stdlib:go_default_library", "//common/types:go_default_library", @@ -48,5 +52,6 @@ go_test( "//test/proto3pb:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", ], -) \ No newline at end of file +) diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/ast.go b/constraint/vendor/github.com/google/cel-go/common/ast/ast.go index b3c150793..b807669d4 100644 --- a/constraint/vendor/github.com/google/cel-go/common/ast/ast.go +++ b/constraint/vendor/github.com/google/cel-go/common/ast/ast.go @@ -16,74 +16,362 @@ package ast import ( - "fmt" - + "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" +) - structpb "google.golang.org/protobuf/types/known/structpb" +// AST contains a protobuf expression and source info along with CEL-native type and reference information. +type AST struct { + expr Expr + sourceInfo *SourceInfo + typeMap map[int64]*types.Type + refMap map[int64]*ReferenceInfo +} - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) +// Expr returns the root ast.Expr value in the AST. +func (a *AST) Expr() Expr { + if a == nil { + return nilExpr + } + return a.expr +} -// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information. -type CheckedAST struct { - Expr *exprpb.Expr - SourceInfo *exprpb.SourceInfo - TypeMap map[int64]*types.Type - ReferenceMap map[int64]*ReferenceInfo -} - -// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf. -func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) { - refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap)) - for id, ref := range ast.ReferenceMap { - r, err := ReferenceInfoToReferenceExpr(ref) - if err != nil { - return nil, err - } - refMap[id] = r +// SourceInfo returns the source metadata associated with the parse / type-check passes. +func (a *AST) SourceInfo() *SourceInfo { + if a == nil { + return nil + } + return a.sourceInfo +} + +// GetType returns the type for the expression at the given id, if one exists, else types.DynType. +func (a *AST) GetType(id int64) *types.Type { + if t, found := a.TypeMap()[id]; found { + return t + } + return types.DynType +} + +// SetType sets the type of the expression node at the given id. +func (a *AST) SetType(id int64, t *types.Type) { + if a == nil { + return + } + a.typeMap[id] = t +} + +// TypeMap returns the map of expression ids to type-checked types. +// +// If the AST is not type-checked, the map will be empty. +func (a *AST) TypeMap() map[int64]*types.Type { + if a == nil { + return map[int64]*types.Type{} + } + return a.typeMap +} + +// GetOverloadIDs returns the set of overload function names for a given expression id. +// +// If the expression id is not a function call, or the AST is not type-checked, the result will be empty. +func (a *AST) GetOverloadIDs(id int64) []string { + if ref, found := a.ReferenceMap()[id]; found { + return ref.OverloadIDs } - typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap)) - for id, typ := range ast.TypeMap { - t, err := types.TypeToExprType(typ) - if err != nil { - return nil, err + return []string{} +} + +// ReferenceMap returns the map of expression id to identifier, constant, and function references. +func (a *AST) ReferenceMap() map[int64]*ReferenceInfo { + if a == nil { + return map[int64]*ReferenceInfo{} + } + return a.refMap +} + +// SetReference adds a reference to the checked AST type map. +func (a *AST) SetReference(id int64, r *ReferenceInfo) { + if a == nil { + return + } + a.refMap[id] = r +} + +// IsChecked returns whether the AST is type-checked. +func (a *AST) IsChecked() bool { + return a != nil && len(a.TypeMap()) > 0 +} + +// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value. +func NewAST(e Expr, sourceInfo *SourceInfo) *AST { + if e == nil { + e = nilExpr + } + return &AST{ + expr: e, + sourceInfo: sourceInfo, + typeMap: make(map[int64]*types.Type), + refMap: make(map[int64]*ReferenceInfo), + } +} + +// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata. +func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST { + return &AST{ + expr: parsed.Expr(), + sourceInfo: parsed.SourceInfo(), + typeMap: typeMap, + refMap: refMap, + } +} + +// Copy creates a deep copy of the Expr and SourceInfo values in the input AST. +// +// Copies of the Expr value are generated using an internal default ExprFactory. +func Copy(a *AST) *AST { + if a == nil { + return nil + } + e := defaultFactory.CopyExpr(a.expr) + if !a.IsChecked() { + return NewAST(e, CopySourceInfo(a.SourceInfo())) + } + typesCopy := make(map[int64]*types.Type, len(a.typeMap)) + for id, t := range a.typeMap { + typesCopy[id] = t + } + refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap)) + for id, r := range a.refMap { + refsCopy[id] = r + } + return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy) +} + +// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value. +func MaxID(a *AST) int64 { + visitor := &maxIDVisitor{maxID: 1} + PostOrderVisit(a.Expr(), visitor) + for id, call := range a.SourceInfo().MacroCalls() { + PostOrderVisit(call, visitor) + if id > visitor.maxID { + visitor.maxID = id + 1 } - typeMap[id] = t - } - return &exprpb.CheckedExpr{ - Expr: ast.Expr, - SourceInfo: ast.SourceInfo, - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil -} - -// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance. -func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) { - refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) - for id, ref := range checked.GetReferenceMap() { - r, err := ReferenceExprToReferenceInfo(ref) - if err != nil { - return nil, err + } + return visitor.maxID + 1 +} + +// NewSourceInfo creates a simple SourceInfo object from an input common.Source value. +func NewSourceInfo(src common.Source) *SourceInfo { + var lineOffsets []int32 + var desc string + baseLine := int32(0) + baseCol := int32(0) + if src != nil { + desc = src.Description() + lineOffsets = src.LineOffsets() + // Determine whether the source metadata should be computed relative + // to a base line and column value. This can be determined by requesting + // the location for offset 0 from the source object. + if loc, found := src.OffsetLocation(0); found { + baseLine = int32(loc.Line()) - 1 + baseCol = int32(loc.Column()) } - refMap[id] = r } - typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) - for id, typ := range checked.GetTypeMap() { - t, err := types.ExprTypeToType(typ) - if err != nil { - return nil, err + return &SourceInfo{ + desc: desc, + lines: lineOffsets, + baseLine: baseLine, + baseCol: baseCol, + offsetRanges: make(map[int64]OffsetRange), + macroCalls: make(map[int64]Expr), + } +} + +// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo. +// +// Copies of macro Expr values are generated using an internal default ExprFactory. +func CopySourceInfo(info *SourceInfo) *SourceInfo { + if info == nil { + return nil + } + rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges)) + for id, off := range info.offsetRanges { + rangesCopy[id] = off + } + callsCopy := make(map[int64]Expr, len(info.macroCalls)) + for id, call := range info.macroCalls { + callsCopy[id] = defaultFactory.CopyExpr(call) + } + return &SourceInfo{ + syntax: info.syntax, + desc: info.desc, + lines: info.lines, + baseLine: info.baseLine, + baseCol: info.baseCol, + offsetRanges: rangesCopy, + macroCalls: callsCopy, + } +} + +// SourceInfo records basic information about the expression as a textual input and +// as a parsed expression value. +type SourceInfo struct { + syntax string + desc string + lines []int32 + baseLine int32 + baseCol int32 + offsetRanges map[int64]OffsetRange + macroCalls map[int64]Expr +} + +// SyntaxVersion returns the syntax version associated with the text expression. +func (s *SourceInfo) SyntaxVersion() string { + if s == nil { + return "" + } + return s.syntax +} + +// Description provides information about where the expression came from. +func (s *SourceInfo) Description() string { + if s == nil { + return "" + } + return s.desc +} + +// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear. +func (s *SourceInfo) LineOffsets() []int32 { + if s == nil { + return []int32{} + } + return s.lines +} + +// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression +// node where the macro was inserted into the AST, and the ast.Expr value represents the original call +// signature which was replaced. +func (s *SourceInfo) MacroCalls() map[int64]Expr { + if s == nil { + return map[int64]Expr{} + } + return s.macroCalls +} + +// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via +// a macro replacement. +// +// Note, parsing options must be enabled to track macro calls before this method will return a value. +func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) { + e, found := s.MacroCalls()[id] + return e, found +} + +// SetMacroCall records a macro call at a specific location. +func (s *SourceInfo) SetMacroCall(id int64, e Expr) { + if s != nil { + s.macroCalls[id] = e + } +} + +// ClearMacroCall removes the macro call at the given expression id. +func (s *SourceInfo) ClearMacroCall(id int64) { + if s != nil { + delete(s.macroCalls, id) + } +} + +// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either: +// the start and end position in the input stream where the expression occurs, or the start position +// only. If the range only captures start position, the stop position of the range will be equal to +// the start. +func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange { + if s == nil { + return map[int64]OffsetRange{} + } + return s.offsetRanges +} + +// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists. +func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) { + if s == nil { + return OffsetRange{}, false + } + o, found := s.offsetRanges[id] + return o, found +} + +// SetOffsetRange sets the OffsetRange for the given expression id. +func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { + if s == nil { + return + } + s.offsetRanges[id] = o +} + +// ClearOffsetRange removes the OffsetRange for the given expression id. +func (s *SourceInfo) ClearOffsetRange(id int64) { + if s != nil { + delete(s.offsetRanges, id) + } +} + +// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character +// of the expression node at the id. +func (s *SourceInfo) GetStartLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Start) + } + return common.NoLocation +} + +// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for +// the expression node at the given id. +// +// If the SourceInfo was generated from a serialized protobuf representation, the stop location will +// be identical to the start location for the expression. +func (s *SourceInfo) GetStopLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Stop) + } + return common.NoLocation +} + +// GetLocationByOffset returns the line and column information for a given character offset. +func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { + line := 1 + col := int(offset) + for _, lineOffset := range s.LineOffsets() { + if lineOffset > offset { + break } - typeMap[id] = t + line++ + col = int(offset - lineOffset) } - return &CheckedAST{ - Expr: checked.GetExpr(), - SourceInfo: checked.GetSourceInfo(), - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil + return common.NewLocation(line, col) +} + +// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. +func (s *SourceInfo) ComputeOffset(line, col int32) int32 { + if s != nil { + line = s.baseLine + line + col = s.baseCol + col + } + if line == 1 { + return col + } + if line < 1 || line > int32(len(s.LineOffsets())) { + return -1 + } + offset := s.LineOffsets()[line-2] + return offset + col +} + +// OffsetRange captures the start and stop positions of a section of text in the input expression. +type OffsetRange struct { + Start int32 + Stop int32 } // ReferenceInfo contains a CEL native representation of an identifier reference which may refer to @@ -149,78 +437,21 @@ func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { return true } -// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. -func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) { - c, err := ValToConstant(info.Value) - if err != nil { - return nil, err - } - return &exprpb.Reference{ - Name: info.Name, - OverloadId: info.OverloadIDs, - Value: c, - }, nil +type maxIDVisitor struct { + maxID int64 + *baseVisitor } -// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. -func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { - v, err := ConstantToVal(ref.GetValue()) - if err != nil { - return nil, err +// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed. +func (v *maxIDVisitor) VisitExpr(e Expr) { + if v.maxID < e.ID() { + v.maxID = e.ID() } - return &ReferenceInfo{ - Name: ref.GetName(), - OverloadIDs: ref.GetOverloadId(), - Value: v, - }, nil } -// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. -// -// Only simple scalar types are supported by this method. -func ValToConstant(v ref.Val) (*exprpb.Constant, error) { - if v == nil { - return nil, nil - } - switch v.Type() { - case types.BoolType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil - case types.BytesType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil - case types.DoubleType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil - case types.IntType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil - case types.NullType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil - case types.StringType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil - case types.UintType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) -} - -// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. -func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { - if c == nil { - return nil, nil - } - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_Int64Value: - return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return types.NullValue, nil - case *exprpb.Constant_StringValue: - return types.String(c.GetStringValue()), nil - case *exprpb.Constant_Uint64Value: - return types.Uint(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed. +func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } } diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/conversion.go b/constraint/vendor/github.com/google/cel-go/common/ast/conversion.go new file mode 100644 index 000000000..435d8f654 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -0,0 +1,659 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + celpb "cel.dev/expr" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// ToProto converts an AST to a CheckedExpr protobouf. +func ToProto(ast *AST) (*exprpb.CheckedExpr, error) { + refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap())) + for id, ref := range ast.ReferenceMap() { + r, err := ReferenceInfoToProto(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap())) + for id, typ := range ast.TypeMap() { + t, err := types.TypeToExprType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + e, err := ExprToProto(ast.Expr()) + if err != nil { + return nil, err + } + info, err := SourceInfoToProto(ast.SourceInfo()) + if err != nil { + return nil, err + } + return &exprpb.CheckedExpr{ + Expr: e, + SourceInfo: info, + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// ToAST converts a CheckedExpr protobuf to an AST instance. +func ToAST(checked *exprpb.CheckedExpr) (*AST, error) { + refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) + for id, ref := range checked.GetReferenceMap() { + r, err := ProtoToReferenceInfo(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) + for id, typ := range checked.GetTypeMap() { + t, err := types.ExprTypeToType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + info, err := ProtoToSourceInfo(checked.GetSourceInfo()) + if err != nil { + return nil, err + } + root, err := ProtoToExpr(checked.GetExpr()) + if err != nil { + return nil, err + } + ast := NewCheckedAST(NewAST(root, info), typeMap, refMap) + return ast, nil +} + +// ProtoToExpr converts a protobuf Expr value to an ast.Expr value. +func ProtoToExpr(e *exprpb.Expr) (Expr, error) { + factory := NewExprFactory() + return exprInternal(factory, e) +} + +// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr +func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + factory := NewExprFactory() + switch e.GetKeyKind().(type) { + case *exprpb.Expr_CreateStruct_Entry_FieldKey: + return exprStructField(factory, e.GetId(), e) + case *exprpb.Expr_CreateStruct_Entry_MapKey: + return exprMapEntry(factory, e.GetId(), e) + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) { + id := e.GetId() + switch e.GetExprKind().(type) { + case *exprpb.Expr_CallExpr: + return exprCall(factory, id, e.GetCallExpr()) + case *exprpb.Expr_ComprehensionExpr: + return exprComprehension(factory, id, e.GetComprehensionExpr()) + case *exprpb.Expr_ConstExpr: + return exprLiteral(factory, id, e.GetConstExpr()) + case *exprpb.Expr_IdentExpr: + return exprIdent(factory, id, e.GetIdentExpr()) + case *exprpb.Expr_ListExpr: + return exprList(factory, id, e.GetListExpr()) + case *exprpb.Expr_SelectExpr: + return exprSelect(factory, id, e.GetSelectExpr()) + case *exprpb.Expr_StructExpr: + s := e.GetStructExpr() + if s.GetMessageName() != "" { + return exprStruct(factory, id, s) + } + return exprMap(factory, id, s) + } + return factory.NewUnspecifiedExpr(id), nil +} + +func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) { + var err error + args := make([]Expr, len(call.GetArgs())) + for i, a := range call.GetArgs() { + args[i], err = exprInternal(factory, a) + if err != nil { + return nil, err + } + } + if call.GetTarget() == nil { + return factory.NewCall(id, call.GetFunction(), args...), nil + } + + target, err := exprInternal(factory, call.GetTarget()) + if err != nil { + return nil, err + } + return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil +} + +func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) { + iterRange, err := exprInternal(factory, comp.GetIterRange()) + if err != nil { + return nil, err + } + accuInit, err := exprInternal(factory, comp.GetAccuInit()) + if err != nil { + return nil, err + } + loopCond, err := exprInternal(factory, comp.GetLoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := exprInternal(factory, comp.GetLoopStep()) + if err != nil { + return nil, err + } + result, err := exprInternal(factory, comp.GetResult()) + if err != nil { + return nil, err + } + return factory.NewComprehensionTwoVar(id, + iterRange, + comp.GetIterVar(), + comp.GetIterVar2(), + comp.GetAccuVar(), + accuInit, + loopCond, + loopStep, + result), nil +} + +func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) { + val, err := ConstantToVal(c) + if err != nil { + return nil, err + } + return factory.NewLiteral(id, val), nil +} + +func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) { + return factory.NewIdent(id, i.GetName()), nil +} + +func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) { + elems := make([]Expr, len(l.GetElements())) + for i, e := range l.GetElements() { + elem, err := exprInternal(factory, e) + if err != nil { + return nil, err + } + elems[i] = elem + } + return factory.NewList(id, elems, l.GetOptionalIndices()), nil +} + +func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + entries := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, entry := range s.GetEntries() { + entries[i], err = exprMapEntry(factory, entry.GetId(), entry) + if err != nil { + return nil, err + } + } + return factory.NewMap(id, entries), nil +} + +func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + k, err := exprInternal(factory, e.GetMapKey()) + if err != nil { + return nil, err + } + v, err := exprInternal(factory, e.GetValue()) + if err != nil { + return nil, err + } + return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil +} + +func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) { + op, err := exprInternal(factory, s.GetOperand()) + if err != nil { + return nil, err + } + if s.GetTestOnly() { + return factory.NewPresenceTest(id, op, s.GetField()), nil + } + return factory.NewSelect(id, op, s.GetField()), nil +} + +func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + fields := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, field := range s.GetEntries() { + fields[i], err = exprStructField(factory, field.GetId(), field) + if err != nil { + return nil, err + } + } + return factory.NewStruct(id, s.GetMessageName(), fields), nil +} + +func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + v, err := exprInternal(factory, f.GetValue()) + if err != nil { + return nil, err + } + return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil +} + +// ExprToProto serializes an ast.Expr value to a protobuf Expr representation. +func ExprToProto(e Expr) (*exprpb.Expr, error) { + if e == nil { + return &exprpb.Expr{}, nil + } + switch e.Kind() { + case CallKind: + return protoCall(e.ID(), e.AsCall()) + case ComprehensionKind: + return protoComprehension(e.ID(), e.AsComprehension()) + case IdentKind: + return protoIdent(e.ID(), e.AsIdent()) + case ListKind: + return protoList(e.ID(), e.AsList()) + case LiteralKind: + return protoLiteral(e.ID(), e.AsLiteral()) + case MapKind: + return protoMap(e.ID(), e.AsMap()) + case SelectKind: + return protoSelect(e.ID(), e.AsSelect()) + case StructKind: + return protoStruct(e.ID(), e.AsStruct()) + case UnspecifiedExprKind: + // Handle the case where a macro reference may be getting translated. + // A nested macro 'pointer' is a non-zero expression id with no kind set. + if e.ID() != 0 { + return &exprpb.Expr{Id: e.ID()}, nil + } + return &exprpb.Expr{}, nil + } + return nil, fmt.Errorf("unsupported expr kind: %v", e) +} + +// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry +func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) { + switch e.Kind() { + case MapEntryKind: + return protoMapEntry(e.ID(), e.AsMapEntry()) + case StructFieldKind: + return protoStructField(e.ID(), e.AsStructField()) + case UnspecifiedEntryExprKind: + return &exprpb.Expr_CreateStruct_Entry{}, nil + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) { + var err error + var target *exprpb.Expr + if call.IsMemberFunction() { + target, err = ExprToProto(call.Target()) + if err != nil { + return nil, err + } + } + callArgs := call.Args() + args := make([]*exprpb.Expr, len(callArgs)) + for i, a := range callArgs { + args[i], err = ExprToProto(a) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: &exprpb.Expr_Call{ + Function: call.FunctionName(), + Target: target, + Args: args, + }, + }, + }, nil +} + +func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) { + iterRange, err := ExprToProto(comp.IterRange()) + if err != nil { + return nil, err + } + accuInit, err := ExprToProto(comp.AccuInit()) + if err != nil { + return nil, err + } + loopCond, err := ExprToProto(comp.LoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := ExprToProto(comp.LoopStep()) + if err != nil { + return nil, err + } + result, err := ExprToProto(comp.Result()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ComprehensionExpr{ + ComprehensionExpr: &exprpb.Expr_Comprehension{ + IterVar: comp.IterVar(), + IterVar2: comp.IterVar2(), + IterRange: iterRange, + AccuVar: comp.AccuVar(), + AccuInit: accuInit, + LoopCondition: loopCond, + LoopStep: loopStep, + Result: result, + }, + }, + }, nil +} + +func protoIdent(id int64, name string) (*exprpb.Expr, error) { + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_IdentExpr{ + IdentExpr: &exprpb.Expr_Ident{ + Name: name, + }, + }, + }, nil +} + +func protoList(id int64, list ListExpr) (*exprpb.Expr, error) { + var err error + elems := make([]*exprpb.Expr, list.Size()) + for i, e := range list.Elements() { + elems[i], err = ExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ListExpr{ + ListExpr: &exprpb.Expr_CreateList{ + Elements: elems, + OptionalIndices: list.OptionalIndices(), + }, + }, + }, nil +} + +func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) { + c, err := ValToConstant(val) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ConstExpr{ + ConstExpr: c, + }, + }, nil +} + +func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries())) + var err error + for i, e := range m.Entries() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + Entries: entries, + }, + }, + }, nil +} + +func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) { + k, err := ExprToProto(e.Key()) + if err != nil { + return nil, err + } + v, err := ExprToProto(e.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ + MapKey: k, + }, + Value: v, + OptionalEntry: e.IsOptional(), + }, nil +} + +func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) { + op, err := ExprToProto(s.Operand()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_SelectExpr{ + SelectExpr: &exprpb.Expr_Select{ + Operand: op, + Field: s.FieldName(), + TestOnly: s.IsTestOnly(), + }, + }, + }, nil +} + +func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields())) + var err error + for i, e := range s.Fields() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + MessageName: s.TypeName(), + Entries: entries, + }, + }, + }, nil +} + +func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) { + v, err := ExprToProto(f.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{ + FieldKey: f.Name(), + }, + Value: v, + OptionalEntry: f.IsOptional(), + }, nil +} + +// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object. +func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) { + if info == nil { + return &exprpb.SourceInfo{}, nil + } + sourceInfo := &exprpb.SourceInfo{ + SyntaxVersion: info.SyntaxVersion(), + Location: info.Description(), + LineOffsets: info.LineOffsets(), + Positions: make(map[int64]int32, len(info.OffsetRanges())), + MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())), + } + for id, offset := range info.OffsetRanges() { + sourceInfo.Positions[id] = offset.Start + } + for id, e := range info.MacroCalls() { + call, err := ExprToProto(e) + if err != nil { + return nil, err + } + sourceInfo.MacroCalls[id] = call + } + return sourceInfo, nil +} + +// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value. +func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) { + sourceInfo := &SourceInfo{ + syntax: info.GetSyntaxVersion(), + desc: info.GetLocation(), + lines: info.GetLineOffsets(), + offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())), + macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())), + } + for id, offset := range info.GetPositions() { + sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset}) + } + for id, e := range info.GetMacroCalls() { + call, err := ProtoToExpr(e) + if err != nil { + return nil, err + } + sourceInfo.SetMacroCall(id, call) + } + return sourceInfo, nil +} + +// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. +func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) { + c, err := ValToConstant(info.Value) + if err != nil { + return nil, err + } + return &exprpb.Reference{ + Name: info.Name, + OverloadId: info.OverloadIDs, + Value: c, + }, nil +} + +// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. +func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { + v, err := ConstantToVal(ref.GetValue()) + if err != nil { + return nil, err + } + return &ReferenceInfo{ + Name: ref.GetName(), + OverloadIDs: ref.GetOverloadId(), + Value: v, + }, nil +} + +// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. +// +// Only simple scalar types are supported by this method. +func ValToConstant(v ref.Val) (*exprpb.Constant, error) { + if v == nil { + return nil, nil + } + switch v.Type() { + case types.BoolType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil + case types.NullType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil + case types.StringType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil + case types.UintType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) +} + +// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. +func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + return AlphaProtoConstantAsVal(c) +} + +// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val. +func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) { + if c == nil { + return nil, nil + } + canonical := &celpb.Constant{} + if err := convertProto(c, canonical); err != nil { + return nil, err + } + return ProtoConstantAsVal(canonical) +} + +// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val. +func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) { + switch c.GetConstantKind().(type) { + case *celpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *celpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *celpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *celpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *celpb.Constant_NullValue: + return types.NullValue, nil + case *celpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *celpb.Constant_Uint64Value: + return types.Uint(c.GetUint64Value()), nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +} + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/expr.go b/constraint/vendor/github.com/google/cel-go/common/ast/expr.go index b63884a60..9f55cb3b9 100644 --- a/constraint/vendor/github.com/google/cel-go/common/ast/expr.go +++ b/constraint/vendor/github.com/google/cel-go/common/ast/expr.go @@ -15,168 +15,61 @@ package ast import ( - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // ExprKind represents the expression node kind. type ExprKind int const ( - // UnspecifiedKind represents an unset expression with no specified properties. - UnspecifiedKind ExprKind = iota + // UnspecifiedExprKind represents an unset expression with no specified properties. + UnspecifiedExprKind ExprKind = iota - // LiteralKind represents a primitive scalar literal. - LiteralKind + // CallKind represents a function call. + CallKind + + // ComprehensionKind represents a comprehension expression generated by a macro. + ComprehensionKind // IdentKind represents a simple variable, constant, or type identifier. IdentKind - // SelectKind represents a field selection expression. - SelectKind - - // CallKind represents a function call. - CallKind - // ListKind represents a list literal expression. ListKind + // LiteralKind represents a primitive scalar literal. + LiteralKind + // MapKind represents a map literal expression. MapKind + // SelectKind represents a field selection expression. + SelectKind + // StructKind represents a struct literal expression. StructKind - - // ComprehensionKind represents a comprehension expression generated by a macro. - ComprehensionKind ) -// NavigateCheckedAST converts a CheckedAST to a NavigableExpr -func NavigateCheckedAST(ast *CheckedAST) NavigableExpr { - return newNavigableExpr(nil, ast.Expr, ast.TypeMap) -} - -// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. -// -// This function type should be use with the `Match` and `MatchList` calls. -type ExprMatcher func(NavigableExpr) bool - -// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr -// is comprised of all constant values, such as a simple literal or even list and map literal. -func ConstantValueMatcher() ExprMatcher { - return matchIsConstantValue -} - -// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches -// the specified `kind`. -func KindMatcher(kind ExprKind) ExprMatcher { - return func(e NavigableExpr) bool { - return e.Kind() == kind - } -} - -// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose -// function name is equal to `funcName`. -func FunctionMatcher(funcName string) ExprMatcher { - return func(e NavigableExpr) bool { - if e.Kind() != CallKind { - return false - } - return e.AsCall().FunctionName() == funcName - } -} - -// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. -// -// Such a result would work well with subsequent MatchList calls. -func AllMatcher() ExprMatcher { - return func(NavigableExpr) bool { - return true - } -} - -// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the -// descendants which match. -func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { - return matchListInternal([]NavigableExpr{expr}, matcher, true) -} - -// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a -// subset of NavigableExpr values which match. -func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { - visit := make([]NavigableExpr, len(exprs)) - copy(visit, exprs) - return matchListInternal(visit, matcher, false) -} - -func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr { - var matched []NavigableExpr - for len(visit) != 0 { - e := visit[0] - if matcher(e) { - matched = append(matched, e) - } - if visitDescendants { - visit = append(visit[1:], e.Children()...) - } else { - visit = visit[1:] - } - } - return matched -} - -func matchIsConstantValue(e NavigableExpr) bool { - if e.Kind() == LiteralKind { - return true - } - if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { - for _, child := range e.Children() { - if !matchIsConstantValue(child) { - return false - } - } - return true - } - return false -} - -// NavigableExpr represents the base navigable expression value. +// Expr represents the base expression node in a CEL abstract syntax tree. // -// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types +// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types // as indicated by the `As` methods. -// -// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr -// to the wrong kind should produce a nil value. -type NavigableExpr interface { +type Expr interface { // ID of the expression as it appears in the AST ID() int64 // Kind of the expression node. See ExprKind for the valid enum values. Kind() ExprKind - // Type of the expression node. - Type() *types.Type - - // Parent returns the parent expression node, if one exists. - Parent() (NavigableExpr, bool) - - // Children returns a list of child expression nodes. - Children() []NavigableExpr - - // ToExpr adapts this NavigableExpr to a protobuf representation. - ToExpr() *exprpb.Expr - - // AsCall adapts the expr into a NavigableCallExpr + // AsCall adapts the expr into a CallExpr // // The Kind() must be equal to a CallKind for the conversion to be well-defined. - AsCall() NavigableCallExpr + AsCall() CallExpr - // AsComprehension adapts the expr into a NavigableComprehensionExpr. + // AsComprehension adapts the expr into a ComprehensionExpr. // // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. - AsComprehension() NavigableComprehensionExpr + AsComprehension() ComprehensionExpr // AsIdent adapts the expr into an identifier string. // @@ -188,67 +81,123 @@ type NavigableExpr interface { // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. AsLiteral() ref.Val - // AsList adapts the expr into a NavigableListExpr. + // AsList adapts the expr into a ListExpr. // // The Kind() must be equal to a ListKind for the conversion to be well-defined. - AsList() NavigableListExpr + AsList() ListExpr - // AsMap adapts the expr into a NavigableMapExpr. + // AsMap adapts the expr into a MapExpr. // // The Kind() must be equal to a MapKind for the conversion to be well-defined. - AsMap() NavigableMapExpr + AsMap() MapExpr - // AsSelect adapts the expr into a NavigableSelectExpr. + // AsSelect adapts the expr into a SelectExpr. // // The Kind() must be equal to a SelectKind for the conversion to be well-defined. - AsSelect() NavigableSelectExpr + AsSelect() SelectExpr - // AsStruct adapts the expr into a NavigableStructExpr. + // AsStruct adapts the expr into a StructExpr. // // The Kind() must be equal to a StructKind for the conversion to be well-defined. - AsStruct() NavigableStructExpr + AsStruct() StructExpr - // marker interface method - isNavigable() + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + // SetKindCase replaces the contents of the current expression with the contents of the other. + // + // The SetKindCase takes ownership of any expression instances references within the input Expr. + // A shallow copy is made of the Expr value itself, but not a deep one. + // + // This method should only be used during AST rewrites using temporary Expr values. + SetKindCase(Expr) + + // isExpr is a marker interface. + isExpr() } -// NavigableCallExpr defines an interface for inspecting a function call and its arugments. -type NavigableCallExpr interface { +// EntryExprKind represents the possible EntryExpr kinds. +type EntryExprKind int + +const ( + // UnspecifiedEntryExprKind indicates that the entry expr is not set. + UnspecifiedEntryExprKind EntryExprKind = iota + + // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions. + MapEntryKind + + // StructFieldKind indicates that the entry is a StructField with a field name and initializer + // expression. + StructFieldKind +) + +// EntryExpr represents the base entry expression in a CEL map or struct literal. +type EntryExpr interface { + // ID of the entry as it appears in the AST. + ID() int64 + + // Kind of the entry expression node. See EntryExprKind for valid enum values. + Kind() EntryExprKind + + // AsMapEntry casts the EntryExpr to a MapEntry. + // + // The Kind() must be equal to MapEntryKind for the conversion to be well-defined. + AsMapEntry() MapEntry + + // AsStructField casts the EntryExpr to a StructField + // + // The Kind() must be equal to StructFieldKind for the conversion to be well-defined. + AsStructField() StructField + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + isEntryExpr() +} + +// IDGenerator produces unique ids suitable for tagging expression nodes +type IDGenerator func(originalID int64) int64 + +// CallExpr defines an interface for inspecting a function call and its arguments. +type CallExpr interface { // FunctionName returns the name of the function. FunctionName() string + // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function + IsMemberFunction() bool + // Target returns the target of the expression if one is present. - Target() NavigableExpr + Target() Expr // Args returns the list of call arguments, excluding the target. - Args() []NavigableExpr - - // ReturnType returns the result type of the call. - ReturnType() *types.Type + Args() []Expr // marker interface method - isNavigable() + isExpr() } -// NavigableListExpr defines an interface for inspecting a list literal expression. -type NavigableListExpr interface { +// ListExpr defines an interface for inspecting a list literal expression. +type ListExpr interface { // Elements returns the list elements as navigable expressions. - Elements() []NavigableExpr + Elements() []Expr // OptionalIndicies returns the list of optional indices in the list literal. OptionalIndices() []int32 + // IsOptional indicates whether the given element index is optional. + IsOptional(int32) bool + // Size returns the number of elements in the list. Size() int // marker interface method - isNavigable() + isExpr() } -// NavigableSelectExpr defines an interface for inspecting a select expression. -type NavigableSelectExpr interface { +// SelectExpr defines an interface for inspecting a select expression. +type SelectExpr interface { // Operand returns the selection operand expression. - Operand() NavigableExpr + Operand() Expr // FieldName returns the field name being selected from the operand. FieldName() string @@ -257,453 +206,679 @@ type NavigableSelectExpr interface { IsTestOnly() bool // marker interface method - isNavigable() + isExpr() } -// NavigableMapExpr defines an interface for inspecting a map expression. -type NavigableMapExpr interface { - // Entries returns the map key value pairs as NavigableEntry values. - Entries() []NavigableEntry +// MapExpr defines an interface for inspecting a map expression. +type MapExpr interface { + // Entries returns the map key value pairs as EntryExpr values. + Entries() []EntryExpr // Size returns the number of entries in the map. Size() int // marker interface method - isNavigable() + isExpr() } -// NavigableEntry defines an interface for inspecting a map entry. -type NavigableEntry interface { +// MapEntry defines an interface for inspecting a map entry. +type MapEntry interface { // Key returns the map entry key expression. - Key() NavigableExpr + Key() Expr // Value returns the map entry value expression. - Value() NavigableExpr + Value() Expr // IsOptional returns whether the entry is optional. IsOptional() bool // marker interface method - isNavigable() + isEntryExpr() } -// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers. -type NavigableStructExpr interface { +// StructExpr defines an interfaces for inspecting a struct and its field initializers. +type StructExpr interface { // TypeName returns the struct type name. TypeName() string - // Fields returns the set of field initializers in the struct expression as NavigableField values. - Fields() []NavigableField + // Fields returns the set of field initializers in the struct expression as EntryExpr values. + Fields() []EntryExpr // marker interface method - isNavigable() + isExpr() } -// NavigableField defines an interface for inspecting a struct field initialization. -type NavigableField interface { - // FieldName returns the name of the field. - FieldName() string +// StructField defines an interface for inspecting a struct field initialization. +type StructField interface { + // Name returns the name of the field. + Name() string // Value returns the field initialization expression. - Value() NavigableExpr + Value() Expr // IsOptional returns whether the field is optional. IsOptional() bool // marker interface method - isNavigable() + isEntryExpr() } -// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression. -type NavigableComprehensionExpr interface { +// ComprehensionExpr defines an interface for inspecting a comprehension expression. +type ComprehensionExpr interface { // IterRange returns the iteration range expression. - IterRange() NavigableExpr + IterRange() Expr // IterVar returns the iteration variable name. + // + // For one-variable comprehensions, the iter var refers to the element value + // when iterating over a list, or the map key when iterating over a map. + // + // For two-variable comprehneions, the iter var refers to the list index or the + // map key. IterVar() string + // IterVar2 returns the second iteration variable name. + // + // When the value is non-empty, the comprehension is a two-variable comprehension. + IterVar2() string + + // HasIterVar2 returns true if the second iteration variable is non-empty. + HasIterVar2() bool + // AccuVar returns the accumulation variable name. AccuVar() string // AccuInit returns the accumulation variable initialization expression. - AccuInit() NavigableExpr + AccuInit() Expr // LoopCondition returns the loop condition expression. - LoopCondition() NavigableExpr + LoopCondition() Expr // LoopStep returns the loop step expression. - LoopStep() NavigableExpr + LoopStep() Expr // Result returns the comprehension result expression. - Result() NavigableExpr + Result() Expr // marker interface method - isNavigable() + isExpr() +} + +var _ Expr = &expr{} + +type expr struct { + id int64 + exprKindCase +} + +type exprKindCase interface { + Kind() ExprKind + + renumberIDs(IDGenerator) + + isExpr() } -func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr { - kind, factory := kindOf(expr) - nav := &navigableExprImpl{ - parent: parent, - kind: kind, - expr: expr, - typeMap: typeMap, - createChildren: factory, +func (e *expr) ID() int64 { + if e == nil { + return 0 } - return nav + return e.id } -type navigableExprImpl struct { - parent NavigableExpr - kind ExprKind - expr *exprpb.Expr - typeMap map[int64]*types.Type - createChildren childFactory +func (e *expr) Kind() ExprKind { + if e == nil || e.exprKindCase == nil { + return UnspecifiedExprKind + } + return e.exprKindCase.Kind() } -func (nav *navigableExprImpl) ID() int64 { - return nav.ToExpr().GetId() +func (e *expr) AsCall() CallExpr { + if e.Kind() != CallKind { + return nilCall + } + return e.exprKindCase.(CallExpr) } -func (nav *navigableExprImpl) Kind() ExprKind { - return nav.kind +func (e *expr) AsComprehension() ComprehensionExpr { + if e.Kind() != ComprehensionKind { + return nilCompre + } + return e.exprKindCase.(ComprehensionExpr) } -func (nav *navigableExprImpl) Type() *types.Type { - if t, found := nav.typeMap[nav.ID()]; found { - return t +func (e *expr) AsIdent() string { + if e.Kind() != IdentKind { + return "" } - return types.DynType + return string(e.exprKindCase.(baseIdentExpr)) } -func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { - if nav.parent != nil { - return nav.parent, true +func (e *expr) AsLiteral() ref.Val { + if e.Kind() != LiteralKind { + return nil } - return nil, false + return e.exprKindCase.(*baseLiteral).Val } -func (nav *navigableExprImpl) Children() []NavigableExpr { - return nav.createChildren(nav) +func (e *expr) AsList() ListExpr { + if e.Kind() != ListKind { + return nilList + } + return e.exprKindCase.(ListExpr) } -func (nav *navigableExprImpl) ToExpr() *exprpb.Expr { - return nav.expr +func (e *expr) AsMap() MapExpr { + if e.Kind() != MapKind { + return nilMap + } + return e.exprKindCase.(MapExpr) } -func (nav *navigableExprImpl) AsCall() NavigableCallExpr { - return navigableCallImpl{navigableExprImpl: nav} +func (e *expr) AsSelect() SelectExpr { + if e.Kind() != SelectKind { + return nilSel + } + return e.exprKindCase.(SelectExpr) } -func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr { - return navigableComprehensionImpl{navigableExprImpl: nav} +func (e *expr) AsStruct() StructExpr { + if e.Kind() != StructKind { + return nilStruct + } + return e.exprKindCase.(StructExpr) } -func (nav *navigableExprImpl) AsIdent() string { - return nav.ToExpr().GetIdentExpr().GetName() +func (e *expr) SetKindCase(other Expr) { + if e == nil { + return + } + if other == nil { + e.exprKindCase = nil + return + } + switch other.Kind() { + case CallKind: + c := other.AsCall() + e.exprKindCase = &baseCallExpr{ + function: c.FunctionName(), + target: c.Target(), + args: c.Args(), + isMember: c.IsMemberFunction(), + } + case ComprehensionKind: + c := other.AsComprehension() + e.exprKindCase = &baseComprehensionExpr{ + iterRange: c.IterRange(), + iterVar: c.IterVar(), + iterVar2: c.IterVar2(), + accuVar: c.AccuVar(), + accuInit: c.AccuInit(), + loopCond: c.LoopCondition(), + loopStep: c.LoopStep(), + result: c.Result(), + } + case IdentKind: + e.exprKindCase = baseIdentExpr(other.AsIdent()) + case ListKind: + l := other.AsList() + optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices())) + for _, idx := range l.OptionalIndices() { + optIndexMap[idx] = struct{}{} + } + e.exprKindCase = &baseListExpr{ + elements: l.Elements(), + optIndices: l.OptionalIndices(), + optIndexMap: optIndexMap, + } + case LiteralKind: + e.exprKindCase = &baseLiteral{Val: other.AsLiteral()} + case MapKind: + e.exprKindCase = &baseMapExpr{ + entries: other.AsMap().Entries(), + } + case SelectKind: + s := other.AsSelect() + e.exprKindCase = &baseSelectExpr{ + operand: s.Operand(), + field: s.FieldName(), + testOnly: s.IsTestOnly(), + } + case StructKind: + s := other.AsStruct() + e.exprKindCase = &baseStructExpr{ + typeName: s.TypeName(), + fields: s.Fields(), + } + case UnspecifiedExprKind: + e.exprKindCase = nil + } } -func (nav *navigableExprImpl) AsLiteral() ref.Val { - if nav.Kind() != LiteralKind { - return nil +func (e *expr) RenumberIDs(idGen IDGenerator) { + if e == nil { + return } - val, err := ConstantToVal(nav.ToExpr().GetConstExpr()) - if err != nil { - panic(err) + e.id = idGen(e.id) + if e.exprKindCase != nil { + e.exprKindCase.renumberIDs(idGen) } - return val } -func (nav *navigableExprImpl) AsList() NavigableListExpr { - return navigableListImpl{navigableExprImpl: nav} +type baseCallExpr struct { + function string + target Expr + args []Expr + isMember bool } -func (nav *navigableExprImpl) AsMap() NavigableMapExpr { - return navigableMapImpl{navigableExprImpl: nav} +func (*baseCallExpr) Kind() ExprKind { + return CallKind } -func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr { - return navigableSelectImpl{navigableExprImpl: nav} +func (e *baseCallExpr) FunctionName() string { + if e == nil { + return "" + } + return e.function } -func (nav *navigableExprImpl) AsStruct() NavigableStructExpr { - return navigableStructImpl{navigableExprImpl: nav} +func (e *baseCallExpr) IsMemberFunction() bool { + if e == nil { + return false + } + return e.isMember } -func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr { - return newNavigableExpr(nav, e, nav.typeMap) +func (e *baseCallExpr) Target() Expr { + if e == nil || !e.IsMemberFunction() { + return nilExpr + } + return e.target } -func (nav *navigableExprImpl) isNavigable() {} +func (e *baseCallExpr) Args() []Expr { + if e == nil { + return []Expr{} + } + return e.args +} -type navigableCallImpl struct { - *navigableExprImpl +func (e *baseCallExpr) renumberIDs(idGen IDGenerator) { + if e.IsMemberFunction() { + e.Target().RenumberIDs(idGen) + } + for _, arg := range e.Args() { + arg.RenumberIDs(idGen) + } } -func (call navigableCallImpl) FunctionName() string { - return call.ToExpr().GetCallExpr().GetFunction() +func (*baseCallExpr) isExpr() {} + +var _ ComprehensionExpr = &baseComprehensionExpr{} + +type baseComprehensionExpr struct { + iterRange Expr + iterVar string + iterVar2 string + accuVar string + accuInit Expr + loopCond Expr + loopStep Expr + result Expr } -func (call navigableCallImpl) Target() NavigableExpr { - t := call.ToExpr().GetCallExpr().GetTarget() - if t != nil { - return call.createChild(t) - } - return nil +func (*baseComprehensionExpr) Kind() ExprKind { + return ComprehensionKind } -func (call navigableCallImpl) Args() []NavigableExpr { - args := call.ToExpr().GetCallExpr().GetArgs() - navArgs := make([]NavigableExpr, len(args)) - for i, a := range args { - navArgs[i] = call.createChild(a) +func (e *baseComprehensionExpr) IterRange() Expr { + if e == nil { + return nilExpr } - return navArgs + return e.iterRange } -func (call navigableCallImpl) ReturnType() *types.Type { - return call.Type() +func (e *baseComprehensionExpr) IterVar() string { + return e.iterVar } -type navigableComprehensionImpl struct { - *navigableExprImpl +func (e *baseComprehensionExpr) IterVar2() string { + return e.iterVar2 } -func (comp navigableComprehensionImpl) IterRange() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange()) +func (e *baseComprehensionExpr) HasIterVar2() bool { + return e.iterVar2 != "" } -func (comp navigableComprehensionImpl) IterVar() string { - return comp.ToExpr().GetComprehensionExpr().GetIterVar() +func (e *baseComprehensionExpr) AccuVar() string { + return e.accuVar } -func (comp navigableComprehensionImpl) AccuVar() string { - return comp.ToExpr().GetComprehensionExpr().GetAccuVar() +func (e *baseComprehensionExpr) AccuInit() Expr { + if e == nil { + return nilExpr + } + return e.accuInit } -func (comp navigableComprehensionImpl) AccuInit() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit()) +func (e *baseComprehensionExpr) LoopCondition() Expr { + if e == nil { + return nilExpr + } + return e.loopCond } -func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition()) +func (e *baseComprehensionExpr) LoopStep() Expr { + if e == nil { + return nilExpr + } + return e.loopStep } -func (comp navigableComprehensionImpl) LoopStep() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep()) +func (e *baseComprehensionExpr) Result() Expr { + if e == nil { + return nilExpr + } + return e.result } -func (comp navigableComprehensionImpl) Result() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult()) +func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) { + e.IterRange().RenumberIDs(idGen) + e.AccuInit().RenumberIDs(idGen) + e.LoopCondition().RenumberIDs(idGen) + e.LoopStep().RenumberIDs(idGen) + e.Result().RenumberIDs(idGen) } -type navigableListImpl struct { - *navigableExprImpl +func (*baseComprehensionExpr) isExpr() {} + +var _ exprKindCase = baseIdentExpr("") + +type baseIdentExpr string + +func (baseIdentExpr) Kind() ExprKind { + return IdentKind } -func (l navigableListImpl) Elements() []NavigableExpr { - return l.Children() +func (e baseIdentExpr) renumberIDs(IDGenerator) {} + +func (baseIdentExpr) isExpr() {} + +var _ exprKindCase = &baseLiteral{} +var _ ref.Val = &baseLiteral{} + +type baseLiteral struct { + ref.Val } -func (l navigableListImpl) OptionalIndices() []int32 { - return l.ToExpr().GetListExpr().GetOptionalIndices() +func (*baseLiteral) Kind() ExprKind { + return LiteralKind } -func (l navigableListImpl) Size() int { - return len(l.ToExpr().GetListExpr().GetElements()) +func (l *baseLiteral) renumberIDs(IDGenerator) {} + +func (*baseLiteral) isExpr() {} + +var _ ListExpr = &baseListExpr{} + +type baseListExpr struct { + elements []Expr + optIndices []int32 + optIndexMap map[int32]struct{} } -type navigableMapImpl struct { - *navigableExprImpl +func (*baseListExpr) Kind() ExprKind { + return ListKind } -func (m navigableMapImpl) Entries() []NavigableEntry { - mapExpr := m.ToExpr().GetStructExpr() - entries := make([]NavigableEntry, len(mapExpr.GetEntries())) - for i, e := range mapExpr.GetEntries() { - entries[i] = navigableEntryImpl{ - key: m.createChild(e.GetMapKey()), - val: m.createChild(e.GetValue()), - isOpt: e.GetOptionalEntry(), - } +func (e *baseListExpr) Elements() []Expr { + if e == nil { + return []Expr{} } - return entries + return e.elements } -func (m navigableMapImpl) Size() int { - return len(m.ToExpr().GetStructExpr().GetEntries()) +func (e *baseListExpr) IsOptional(index int32) bool { + _, found := e.optIndexMap[index] + return found } -type navigableEntryImpl struct { - key NavigableExpr - val NavigableExpr - isOpt bool +func (e *baseListExpr) OptionalIndices() []int32 { + if e == nil { + return []int32{} + } + return e.optIndices } -func (e navigableEntryImpl) Key() NavigableExpr { - return e.key +func (e *baseListExpr) Size() int { + return len(e.Elements()) } -func (e navigableEntryImpl) Value() NavigableExpr { - return e.val +func (e *baseListExpr) renumberIDs(idGen IDGenerator) { + for _, elem := range e.Elements() { + elem.RenumberIDs(idGen) + } } -func (e navigableEntryImpl) IsOptional() bool { - return e.isOpt +func (*baseListExpr) isExpr() {} + +type baseMapExpr struct { + entries []EntryExpr } -func (e navigableEntryImpl) isNavigable() {} +func (*baseMapExpr) Kind() ExprKind { + return MapKind +} -type navigableSelectImpl struct { - *navigableExprImpl +func (e *baseMapExpr) Entries() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.entries } -func (sel navigableSelectImpl) FieldName() string { - return sel.ToExpr().GetSelectExpr().GetField() +func (e *baseMapExpr) Size() int { + return len(e.Entries()) } -func (sel navigableSelectImpl) IsTestOnly() bool { - return sel.ToExpr().GetSelectExpr().GetTestOnly() +func (e *baseMapExpr) renumberIDs(idGen IDGenerator) { + for _, entry := range e.Entries() { + entry.RenumberIDs(idGen) + } } -func (sel navigableSelectImpl) Operand() NavigableExpr { - return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand()) +func (*baseMapExpr) isExpr() {} + +type baseSelectExpr struct { + operand Expr + field string + testOnly bool } -type navigableStructImpl struct { - *navigableExprImpl +func (*baseSelectExpr) Kind() ExprKind { + return SelectKind } -func (s navigableStructImpl) TypeName() string { - return s.ToExpr().GetStructExpr().GetMessageName() +func (e *baseSelectExpr) Operand() Expr { + if e == nil || e.operand == nil { + return nilExpr + } + return e.operand } -func (s navigableStructImpl) Fields() []NavigableField { - fieldInits := s.ToExpr().GetStructExpr().GetEntries() - fields := make([]NavigableField, len(fieldInits)) - for i, f := range fieldInits { - fields[i] = navigableFieldImpl{ - name: f.GetFieldKey(), - val: s.createChild(f.GetValue()), - isOpt: f.GetOptionalEntry(), - } +func (e *baseSelectExpr) FieldName() string { + if e == nil { + return "" + } + return e.field +} + +func (e *baseSelectExpr) IsTestOnly() bool { + if e == nil { + return false } - return fields + return e.testOnly } -type navigableFieldImpl struct { - name string - val NavigableExpr - isOpt bool +func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) { + e.Operand().RenumberIDs(idGen) } -func (f navigableFieldImpl) FieldName() string { - return f.name +func (*baseSelectExpr) isExpr() {} + +type baseStructExpr struct { + typeName string + fields []EntryExpr } -func (f navigableFieldImpl) Value() NavigableExpr { - return f.val +func (*baseStructExpr) Kind() ExprKind { + return StructKind } -func (f navigableFieldImpl) IsOptional() bool { - return f.isOpt +func (e *baseStructExpr) TypeName() string { + if e == nil { + return "" + } + return e.typeName } -func (f navigableFieldImpl) isNavigable() {} +func (e *baseStructExpr) Fields() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.fields +} -func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - return LiteralKind, noopFactory - case *exprpb.Expr_IdentExpr: - return IdentKind, noopFactory - case *exprpb.Expr_SelectExpr: - return SelectKind, selectFactory - case *exprpb.Expr_CallExpr: - return CallKind, callArgFactory - case *exprpb.Expr_ListExpr: - return ListKind, listElemFactory - case *exprpb.Expr_StructExpr: - if expr.GetStructExpr().GetMessageName() != "" { - return StructKind, structEntryFactory - } - return MapKind, mapEntryFactory - case *exprpb.Expr_ComprehensionExpr: - return ComprehensionKind, comprehensionFactory - default: - return UnspecifiedKind, noopFactory +func (e *baseStructExpr) renumberIDs(idGen IDGenerator) { + for _, f := range e.Fields() { + f.RenumberIDs(idGen) } } -type childFactory func(*navigableExprImpl) []NavigableExpr +func (*baseStructExpr) isExpr() {} -func noopFactory(*navigableExprImpl) []NavigableExpr { - return nil +type entryExprKindCase interface { + Kind() EntryExprKind + + renumberIDs(IDGenerator) + + isEntryExpr() +} + +var _ EntryExpr = &entryExpr{} + +type entryExpr struct { + id int64 + entryExprKindCase } -func selectFactory(nav *navigableExprImpl) []NavigableExpr { - return []NavigableExpr{ - nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()), +func (e *entryExpr) ID() int64 { + return e.id +} + +func (e *entryExpr) AsMapEntry() MapEntry { + if e.Kind() != MapEntryKind { + return nilMapEntry } + return e.entryExprKindCase.(MapEntry) } -func callArgFactory(nav *navigableExprImpl) []NavigableExpr { - call := nav.ToExpr().GetCallExpr() - argCount := len(call.GetArgs()) - if call.GetTarget() != nil { - argCount++ +func (e *entryExpr) AsStructField() StructField { + if e.Kind() != StructFieldKind { + return nilStructField } - navExprs := make([]NavigableExpr, argCount) - i := 0 - if call.GetTarget() != nil { - navExprs[i] = nav.createChild(call.GetTarget()) - i++ + return e.entryExprKindCase.(StructField) +} + +func (e *entryExpr) RenumberIDs(idGen IDGenerator) { + e.id = idGen(e.id) + e.entryExprKindCase.renumberIDs(idGen) +} + +type baseMapEntry struct { + key Expr + value Expr + isOptional bool +} + +func (e *baseMapEntry) Kind() EntryExprKind { + return MapEntryKind +} + +func (e *baseMapEntry) Key() Expr { + if e == nil { + return nilExpr } - for _, arg := range call.GetArgs() { - navExprs[i] = nav.createChild(arg) - i++ + return e.key +} + +func (e *baseMapEntry) Value() Expr { + if e == nil { + return nilExpr } - return navExprs + return e.value } -func listElemFactory(nav *navigableExprImpl) []NavigableExpr { - l := nav.ToExpr().GetListExpr() - navExprs := make([]NavigableExpr, len(l.GetElements())) - for i, e := range l.GetElements() { - navExprs[i] = nav.createChild(e) +func (e *baseMapEntry) IsOptional() bool { + if e == nil { + return false } - return navExprs + return e.isOptional } -func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())) - for i, e := range s.GetEntries() { +func (e *baseMapEntry) renumberIDs(idGen IDGenerator) { + e.Key().RenumberIDs(idGen) + e.Value().RenumberIDs(idGen) +} + +func (*baseMapEntry) isEntryExpr() {} + +type baseStructField struct { + field string + value Expr + isOptional bool +} + +func (f *baseStructField) Kind() EntryExprKind { + return StructFieldKind +} - entries[i] = nav.createChild(e.GetValue()) +func (f *baseStructField) Name() string { + if f == nil { + return "" } - return entries + return f.field } -func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())*2) - j := 0 - for _, e := range s.GetEntries() { - entries[j] = nav.createChild(e.GetMapKey()) - entries[j+1] = nav.createChild(e.GetValue()) - j += 2 +func (f *baseStructField) Value() Expr { + if f == nil { + return nilExpr } - return entries + return f.value } -func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { - compre := nav.ToExpr().GetComprehensionExpr() - return []NavigableExpr{ - nav.createChild(compre.GetIterRange()), - nav.createChild(compre.GetAccuInit()), - nav.createChild(compre.GetLoopCondition()), - nav.createChild(compre.GetLoopStep()), - nav.createChild(compre.GetResult()), +func (f *baseStructField) IsOptional() bool { + if f == nil { + return false } + return f.isOptional } + +func (f *baseStructField) renumberIDs(idGen IDGenerator) { + f.Value().RenumberIDs(idGen) +} + +func (*baseStructField) isEntryExpr() {} + +var ( + nilExpr *expr = nil + nilCall *baseCallExpr = nil + nilCompre *baseComprehensionExpr = nil + nilList *baseListExpr = nil + nilMap *baseMapExpr = nil + nilMapEntry *baseMapEntry = nil + nilSel *baseSelectExpr = nil + nilStruct *baseStructExpr = nil + nilStructField *baseStructField = nil +) diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/factory.go b/constraint/vendor/github.com/google/cel-go/common/ast/factory.go new file mode 100644 index 000000000..994806b79 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/common/ast/factory.go @@ -0,0 +1,313 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "github.com/google/cel-go/common/types/ref" + +// ExprFactory interfaces defines a set of methods necessary for building native expression values. +type ExprFactory interface { + // CopyExpr creates a deep copy of the input Expr value. + CopyExpr(Expr) Expr + + // CopyEntryExpr creates a deep copy of the input EntryExpr value. + CopyEntryExpr(EntryExpr) EntryExpr + + // NewCall creates an Expr value representing a global function call. + NewCall(id int64, function string, args ...Expr) Expr + + // NewComprehension creates an Expr value representing a one-variable comprehension over a value range. + NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range. + NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewMemberCall creates an Expr value representing a member function call. + NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr + + // NewIdent creates an Expr value representing an identifier. + NewIdent(id int64, name string) Expr + + // NewAccuIdent creates an Expr value representing an accumulator identifier within a + //comprehension. + NewAccuIdent(id int64) Expr + + // NewLiteral creates an Expr value representing a literal value, such as a string or integer. + NewLiteral(id int64, value ref.Val) Expr + + // NewList creates an Expr value representing a list literal expression with optional indices. + // + // Optional indicies will typically be empty unless the CEL optional types are enabled. + NewList(id int64, elems []Expr, optIndices []int32) Expr + + // NewMap creates an Expr value representing a map literal expression + NewMap(id int64, entries []EntryExpr) Expr + + // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether + // the key is optionally set. + NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr + + // NewPresenceTest creates an Expr representing a field presence test on an operand expression. + NewPresenceTest(id int64, operand Expr, field string) Expr + + // NewSelect creates an Expr representing a field selection on an operand expression. + NewSelect(id int64, operand Expr, field string) Expr + + // NewStruct creates an Expr value representing a struct literal with a given type name and a + // set of field initializers. + NewStruct(id int64, typeName string, fields []EntryExpr) Expr + + // NewStructField creates a StructField with a given field name, value, and a flag indicating + // whether the field is optionally set. + NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr + + // NewUnspecifiedExpr creates an empty expression node. + NewUnspecifiedExpr(id int64) Expr + + isExprFactory() +} + +type baseExprFactory struct{} + +// NewExprFactory creates an ExprFactory instance. +func NewExprFactory() ExprFactory { + return &baseExprFactory{} +} + +func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: nilExpr, + args: args, + isMember: false, + }) +} + +func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: target, + args: args, + isMember: true, + }) +} + +func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + // Set the iter_var2 to empty string to indicate the second variable is omitted + return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result) +} + +func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + return fac.newExpr( + id, + &baseComprehensionExpr{ + iterRange: iterRange, + iterVar: iterVar, + iterVar2: iterVar2, + accuVar: accuVar, + accuInit: accuInit, + loopCond: loopCond, + loopStep: loopStep, + result: result, + }) +} + +func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { + return fac.newExpr(id, baseIdentExpr(name)) +} + +func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { + return fac.NewIdent(id, "__result__") +} + +func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { + return fac.newExpr(id, &baseLiteral{Val: value}) +} + +func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr { + optIndexMap := make(map[int32]struct{}, len(optIndices)) + for _, idx := range optIndices { + optIndexMap[idx] = struct{}{} + } + return fac.newExpr(id, + &baseListExpr{ + elements: elems, + optIndices: optIndices, + optIndexMap: optIndexMap, + }) +} + +func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr { + return fac.newExpr(id, &baseMapExpr{entries: entries}) +} + +func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseMapEntry{ + key: key, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + testOnly: true, + }) +} + +func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + }) +} + +func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr { + return fac.newExpr( + id, + &baseStructExpr{ + typeName: typeName, + fields: fields, + }) +} + +func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseStructField{ + field: field, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr { + return fac.newExpr(id, nil) +} + +func (fac *baseExprFactory) CopyExpr(e Expr) Expr { + // unwrap navigable expressions to avoid unnecessary allocations during copying. + if nav, ok := e.(*navigableExprImpl); ok { + e = nav.Expr + } + switch e.Kind() { + case CallKind: + c := e.AsCall() + argsCopy := make([]Expr, len(c.Args())) + for i, arg := range c.Args() { + argsCopy[i] = fac.CopyExpr(arg) + } + if !c.IsMemberFunction() { + return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...) + } + return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) + case ComprehensionKind: + compre := e.AsComprehension() + return fac.NewComprehensionTwoVar(e.ID(), + fac.CopyExpr(compre.IterRange()), + compre.IterVar(), + compre.IterVar2(), + compre.AccuVar(), + fac.CopyExpr(compre.AccuInit()), + fac.CopyExpr(compre.LoopCondition()), + fac.CopyExpr(compre.LoopStep()), + fac.CopyExpr(compre.Result())) + case IdentKind: + return fac.NewIdent(e.ID(), e.AsIdent()) + case ListKind: + l := e.AsList() + elemsCopy := make([]Expr, l.Size()) + for i, elem := range l.Elements() { + elemsCopy[i] = fac.CopyExpr(elem) + } + return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices()) + case LiteralKind: + return fac.NewLiteral(e.ID(), e.AsLiteral()) + case MapKind: + m := e.AsMap() + entriesCopy := make([]EntryExpr, m.Size()) + for i, entry := range m.Entries() { + entriesCopy[i] = fac.CopyEntryExpr(entry) + } + return fac.NewMap(e.ID(), entriesCopy) + case SelectKind: + s := e.AsSelect() + if s.IsTestOnly() { + return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + } + return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + case StructKind: + s := e.AsStruct() + fieldsCopy := make([]EntryExpr, len(s.Fields())) + for i, field := range s.Fields() { + fieldsCopy[i] = fac.CopyEntryExpr(field) + } + return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy) + default: + return fac.NewUnspecifiedExpr(e.ID()) + } +} + +func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr { + switch e.Kind() { + case MapEntryKind: + entry := e.AsMapEntry() + return fac.NewMapEntry(e.ID(), + fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional()) + case StructFieldKind: + field := e.AsStructField() + return fac.NewStructField(e.ID(), + field.Name(), fac.CopyExpr(field.Value()), field.IsOptional()) + default: + return fac.newEntryExpr(e.ID(), nil) + } +} + +func (*baseExprFactory) isExprFactory() {} + +func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr { + return &expr{ + id: id, + exprKindCase: e, + } +} + +func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr { + return &entryExpr{ + id: id, + entryExprKindCase: e, + } +} + +var ( + defaultFactory = &baseExprFactory{} +) diff --git a/constraint/vendor/github.com/google/cel-go/common/ast/navigable.go b/constraint/vendor/github.com/google/cel-go/common/ast/navigable.go new file mode 100644 index 000000000..d7a90fb7c --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -0,0 +1,660 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NavigableExpr represents the base navigable expression value with methods to inspect the +// parent and child expressions. +type NavigableExpr interface { + Expr + + // Type of the expression. + // + // If the expression is type-checked, the type check metadata is returned. If the expression + // has not been type-checked, the types.DynType value is returned. + Type() *types.Type + + // Parent returns the parent expression node, if one exists. + Parent() (NavigableExpr, bool) + + // Children returns a list of child expression nodes. + Children() []NavigableExpr + + // Depth indicates the depth in the expression tree. + // + // The root expression has depth 0. + Depth() int +} + +// NavigateAST converts an AST to a NavigableExpr +func NavigateAST(ast *AST) NavigableExpr { + return NavigateExpr(ast, ast.Expr()) +} + +// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST. +// +// If the expression is already a NavigableExpr, the parent and depth information will be +// propagated on the new NavigableExpr value; otherwise, the expr value will be treated +// as though it is the root of the expression graph with a depth of 0. +func NavigateExpr(ast *AST, expr Expr) NavigableExpr { + depth := 0 + var parent NavigableExpr = nil + if nav, ok := expr.(NavigableExpr); ok { + depth = nav.Depth() + parent, _ = nav.Parent() + } + return newNavigableExpr(ast, parent, expr, depth) +} + +// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// +// This function type should be use with the `Match` and `MatchList` calls. +type ExprMatcher func(NavigableExpr) bool + +// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr +// is comprised of all constant values, such as a simple literal or even list and map literal. +func ConstantValueMatcher() ExprMatcher { + return matchIsConstantValue +} + +// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches +// the specified `kind`. +func KindMatcher(kind ExprKind) ExprMatcher { + return func(e NavigableExpr) bool { + return e.Kind() == kind + } +} + +// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose +// function name is equal to `funcName`. +func FunctionMatcher(funcName string) ExprMatcher { + return func(e NavigableExpr) bool { + if e.Kind() != CallKind { + return false + } + return e.AsCall().FunctionName() == funcName + } +} + +// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. +// +// Such a result would work well with subsequent MatchList calls. +func AllMatcher() ExprMatcher { + return func(NavigableExpr) bool { + return true + } +} + +// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values +// matching the input criteria in post-order (bottom up). +func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + visit(expr, navVisitor, postOrder, 0, 0) + return matches +} + +// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a +// subset of NavigableExpr values which match. +func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + for _, expr := range exprs { + visit(expr, navVisitor, postOrder, 0, 1) + } + return matches +} + +// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph. +type Visitor interface { + // VisitExpr visits the input expression. + VisitExpr(Expr) + + // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry. + VisitEntryExpr(EntryExpr) +} + +type baseVisitor struct { + visitExpr func(Expr) + visitEntryExpr func(EntryExpr) +} + +// VisitExpr visits the Expr if the internal expr visitor has been configured. +func (v *baseVisitor) VisitExpr(e Expr) { + if v.visitExpr != nil { + v.visitExpr(e) + } +} + +// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured. +func (v *baseVisitor) VisitEntryExpr(e EntryExpr) { + if v.visitEntryExpr != nil { + v.visitEntryExpr(e) + } +} + +// NewExprVisitor creates a visitor which only visits expression nodes. +func NewExprVisitor(v func(Expr)) Visitor { + return &baseVisitor{ + visitExpr: v, + visitEntryExpr: nil, + } +} + +// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up). +func PostOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, postOrder, 0, 0) +} + +// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down). +func PreOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, preOrder, 0, 0) +} + +type visitOrder int + +const ( + preOrder = iota + 1 + postOrder +) + +// TODO: consider exposing a way to configure a limit for the max visit depth. +// It's possible that we could want to configure this on the NewExprVisitor() +// and through MatchDescendents() / MaxID(). +func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) { + if maxDepth > 0 && depth == maxDepth { + return + } + if order == preOrder { + visitor.VisitExpr(expr) + } + switch expr.Kind() { + case CallKind: + c := expr.AsCall() + if c.IsMemberFunction() { + visit(c.Target(), visitor, order, depth+1, maxDepth) + } + for _, arg := range c.Args() { + visit(arg, visitor, order, depth+1, maxDepth) + } + case ComprehensionKind: + c := expr.AsComprehension() + visit(c.IterRange(), visitor, order, depth+1, maxDepth) + visit(c.AccuInit(), visitor, order, depth+1, maxDepth) + visit(c.LoopCondition(), visitor, order, depth+1, maxDepth) + visit(c.LoopStep(), visitor, order, depth+1, maxDepth) + visit(c.Result(), visitor, order, depth+1, maxDepth) + case ListKind: + l := expr.AsList() + for _, elem := range l.Elements() { + visit(elem, visitor, order, depth+1, maxDepth) + } + case MapKind: + m := expr.AsMap() + for _, e := range m.Entries() { + if order == preOrder { + visitor.VisitEntryExpr(e) + } + entry := e.AsMapEntry() + visit(entry.Key(), visitor, order, depth+1, maxDepth) + visit(entry.Value(), visitor, order, depth+1, maxDepth) + if order == postOrder { + visitor.VisitEntryExpr(e) + } + } + case SelectKind: + visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth) + case StructKind: + s := expr.AsStruct() + for _, f := range s.Fields() { + visitor.VisitEntryExpr(f) + visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth) + } + } + if order == postOrder { + visitor.VisitExpr(expr) + } +} + +func matchIsConstantValue(e NavigableExpr) bool { + if e.Kind() == LiteralKind { + return true + } + if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { + for _, child := range e.Children() { + if !matchIsConstantValue(child) { + return false + } + } + return true + } + return false +} + +func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr { + // Reduce navigable expression nesting by unwrapping the embedded Expr value. + if nav, ok := expr.(*navigableExprImpl); ok { + expr = nav.Expr + } + nav := &navigableExprImpl{ + Expr: expr, + depth: depth, + ast: ast, + parent: parent, + createChildren: getChildFactory(expr), + } + return nav +} + +type navigableExprImpl struct { + Expr + depth int + ast *AST + parent NavigableExpr + createChildren childFactory +} + +func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { + if nav.parent != nil { + return nav.parent, true + } + return nil, false +} + +func (nav *navigableExprImpl) ID() int64 { + return nav.Expr.ID() +} + +func (nav *navigableExprImpl) Kind() ExprKind { + return nav.Expr.Kind() +} + +func (nav *navigableExprImpl) Type() *types.Type { + return nav.ast.GetType(nav.ID()) +} + +func (nav *navigableExprImpl) Children() []NavigableExpr { + return nav.createChildren(nav) +} + +func (nav *navigableExprImpl) Depth() int { + return nav.depth +} + +func (nav *navigableExprImpl) AsCall() CallExpr { + return navigableCallImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr { + return navigableComprehensionImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsIdent() string { + return nav.Expr.AsIdent() +} + +func (nav *navigableExprImpl) AsList() ListExpr { + return navigableListImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsLiteral() ref.Val { + return nav.Expr.AsLiteral() +} + +func (nav *navigableExprImpl) AsMap() MapExpr { + return navigableMapImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsSelect() SelectExpr { + return navigableSelectImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsStruct() StructExpr { + return navigableStructImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr { + return newNavigableExpr(nav.ast, nav, e, nav.depth+1) +} + +func (nav *navigableExprImpl) isExpr() {} + +type navigableCallImpl struct { + *navigableExprImpl +} + +func (call navigableCallImpl) FunctionName() string { + return call.Expr.AsCall().FunctionName() +} + +func (call navigableCallImpl) IsMemberFunction() bool { + return call.Expr.AsCall().IsMemberFunction() +} + +func (call navigableCallImpl) Target() Expr { + t := call.Expr.AsCall().Target() + if t != nil { + return call.createChild(t) + } + return nil +} + +func (call navigableCallImpl) Args() []Expr { + args := call.Expr.AsCall().Args() + navArgs := make([]Expr, len(args)) + for i, a := range args { + navArgs[i] = call.createChild(a) + } + return navArgs +} + +type navigableComprehensionImpl struct { + *navigableExprImpl +} + +func (comp navigableComprehensionImpl) IterRange() Expr { + return comp.createChild(comp.Expr.AsComprehension().IterRange()) +} + +func (comp navigableComprehensionImpl) IterVar() string { + return comp.Expr.AsComprehension().IterVar() +} + +func (comp navigableComprehensionImpl) IterVar2() string { + return comp.Expr.AsComprehension().IterVar2() +} + +func (comp navigableComprehensionImpl) HasIterVar2() bool { + return comp.Expr.AsComprehension().HasIterVar2() +} + +func (comp navigableComprehensionImpl) AccuVar() string { + return comp.Expr.AsComprehension().AccuVar() +} + +func (comp navigableComprehensionImpl) AccuInit() Expr { + return comp.createChild(comp.Expr.AsComprehension().AccuInit()) +} + +func (comp navigableComprehensionImpl) LoopCondition() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopCondition()) +} + +func (comp navigableComprehensionImpl) LoopStep() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopStep()) +} + +func (comp navigableComprehensionImpl) Result() Expr { + return comp.createChild(comp.Expr.AsComprehension().Result()) +} + +type navigableListImpl struct { + *navigableExprImpl +} + +func (l navigableListImpl) Elements() []Expr { + pbElems := l.Expr.AsList().Elements() + elems := make([]Expr, len(pbElems)) + for i := 0; i < len(pbElems); i++ { + elems[i] = l.createChild(pbElems[i]) + } + return elems +} + +func (l navigableListImpl) IsOptional(index int32) bool { + return l.Expr.AsList().IsOptional(index) +} + +func (l navigableListImpl) OptionalIndices() []int32 { + return l.Expr.AsList().OptionalIndices() +} + +func (l navigableListImpl) Size() int { + return l.Expr.AsList().Size() +} + +type navigableMapImpl struct { + *navigableExprImpl +} + +func (m navigableMapImpl) Entries() []EntryExpr { + mapExpr := m.Expr.AsMap() + entries := make([]EntryExpr, len(mapExpr.Entries())) + for i, e := range mapExpr.Entries() { + entry := e.AsMapEntry() + entries[i] = &entryExpr{ + id: e.ID(), + entryExprKindCase: navigableEntryImpl{ + key: m.createChild(entry.Key()), + val: m.createChild(entry.Value()), + isOpt: entry.IsOptional(), + }, + } + } + return entries +} + +func (m navigableMapImpl) Size() int { + return m.Expr.AsMap().Size() +} + +type navigableEntryImpl struct { + key NavigableExpr + val NavigableExpr + isOpt bool +} + +func (e navigableEntryImpl) Kind() EntryExprKind { + return MapEntryKind +} + +func (e navigableEntryImpl) Key() Expr { + return e.key +} + +func (e navigableEntryImpl) Value() Expr { + return e.val +} + +func (e navigableEntryImpl) IsOptional() bool { + return e.isOpt +} + +func (e navigableEntryImpl) renumberIDs(IDGenerator) {} + +func (e navigableEntryImpl) isEntryExpr() {} + +type navigableSelectImpl struct { + *navigableExprImpl +} + +func (sel navigableSelectImpl) FieldName() string { + return sel.Expr.AsSelect().FieldName() +} + +func (sel navigableSelectImpl) IsTestOnly() bool { + return sel.Expr.AsSelect().IsTestOnly() +} + +func (sel navigableSelectImpl) Operand() Expr { + return sel.createChild(sel.Expr.AsSelect().Operand()) +} + +type navigableStructImpl struct { + *navigableExprImpl +} + +func (s navigableStructImpl) TypeName() string { + return s.Expr.AsStruct().TypeName() +} + +func (s navigableStructImpl) Fields() []EntryExpr { + fieldInits := s.Expr.AsStruct().Fields() + fields := make([]EntryExpr, len(fieldInits)) + for i, f := range fieldInits { + field := f.AsStructField() + fields[i] = &entryExpr{ + id: f.ID(), + entryExprKindCase: navigableFieldImpl{ + name: field.Name(), + val: s.createChild(field.Value()), + isOpt: field.IsOptional(), + }, + } + } + return fields +} + +type navigableFieldImpl struct { + name string + val NavigableExpr + isOpt bool +} + +func (f navigableFieldImpl) Kind() EntryExprKind { + return StructFieldKind +} + +func (f navigableFieldImpl) Name() string { + return f.name +} + +func (f navigableFieldImpl) Value() Expr { + return f.val +} + +func (f navigableFieldImpl) IsOptional() bool { + return f.isOpt +} + +func (f navigableFieldImpl) renumberIDs(IDGenerator) {} + +func (f navigableFieldImpl) isEntryExpr() {} + +func getChildFactory(expr Expr) childFactory { + if expr == nil { + return noopFactory + } + switch expr.Kind() { + case LiteralKind: + return noopFactory + case IdentKind: + return noopFactory + case SelectKind: + return selectFactory + case CallKind: + return callArgFactory + case ListKind: + return listElemFactory + case MapKind: + return mapEntryFactory + case StructKind: + return structEntryFactory + case ComprehensionKind: + return comprehensionFactory + default: + return noopFactory + } +} + +type childFactory func(*navigableExprImpl) []NavigableExpr + +func noopFactory(*navigableExprImpl) []NavigableExpr { + return nil +} + +func selectFactory(nav *navigableExprImpl) []NavigableExpr { + return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())} +} + +func callArgFactory(nav *navigableExprImpl) []NavigableExpr { + call := nav.Expr.AsCall() + argCount := len(call.Args()) + if call.IsMemberFunction() { + argCount++ + } + navExprs := make([]NavigableExpr, argCount) + i := 0 + if call.IsMemberFunction() { + navExprs[i] = nav.createChild(call.Target()) + i++ + } + for _, arg := range call.Args() { + navExprs[i] = nav.createChild(arg) + i++ + } + return navExprs +} + +func listElemFactory(nav *navigableExprImpl) []NavigableExpr { + l := nav.Expr.AsList() + navExprs := make([]NavigableExpr, len(l.Elements())) + for i, e := range l.Elements() { + navExprs[i] = nav.createChild(e) + } + return navExprs +} + +func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.Expr.AsStruct() + entries := make([]NavigableExpr, len(s.Fields())) + for i, e := range s.Fields() { + f := e.AsStructField() + entries[i] = nav.createChild(f.Value()) + } + return entries +} + +func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { + m := nav.Expr.AsMap() + entries := make([]NavigableExpr, len(m.Entries())*2) + j := 0 + for _, e := range m.Entries() { + mapEntry := e.AsMapEntry() + entries[j] = nav.createChild(mapEntry.Key()) + entries[j+1] = nav.createChild(mapEntry.Value()) + j += 2 + } + return entries +} + +func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { + compre := nav.Expr.AsComprehension() + return []NavigableExpr{ + nav.createChild(compre.IterRange()), + nav.createChild(compre.AccuInit()), + nav.createChild(compre.LoopCondition()), + nav.createChild(compre.LoopStep()), + nav.createChild(compre.Result()), + } +} diff --git a/constraint/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/containers/BUILD.bazel index 3f3f07887..81197f064 100644 --- a/constraint/vendor/github.com/google/cel-go/common/containers/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/containers/BUILD.bazel @@ -12,7 +12,7 @@ go_library( ], importpath = "github.com/google/cel-go/common/containers", deps = [ - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", ], ) @@ -26,6 +26,6 @@ go_test( ":go_default_library", ], deps = [ - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", ], ) diff --git a/constraint/vendor/github.com/google/cel-go/common/containers/container.go b/constraint/vendor/github.com/google/cel-go/common/containers/container.go index d46698d3c..3097a3f78 100644 --- a/constraint/vendor/github.com/google/cel-go/common/containers/container.go +++ b/constraint/vendor/github.com/google/cel-go/common/containers/container.go @@ -19,8 +19,9 @@ package containers import ( "fmt" "strings" + "unicode" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" ) var ( @@ -212,6 +213,13 @@ type ContainerOption func(*Container) (*Container, error) func Abbrevs(qualifiedNames ...string) ContainerOption { return func(c *Container) (*Container, error) { for _, qn := range qualifiedNames { + qn = strings.TrimSpace(qn) + for _, r := range qn { + if !isIdentifierChar(r) { + return nil, fmt.Errorf( + "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) + } + } ind := strings.LastIndex(qn, ".") if ind <= 0 || ind >= len(qn)-1 { return nil, fmt.Errorf( @@ -278,6 +286,10 @@ func aliasAs(kind, qualifiedName, alias string) ContainerOption { } } +func isIdentifierChar(r rune) bool { + return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) +} + // Name sets the fully-qualified name of the Container. func Name(name string) ContainerOption { return func(c *Container) (*Container, error) { @@ -297,19 +309,19 @@ func Name(name string) ContainerOption { // ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean // 'found' value that indicates if the conversion is successful. -func ToQualifiedName(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - id := e.GetIdentExpr() - return id.GetName(), true - case *exprpb.Expr_SelectExpr: - sel := e.GetSelectExpr() +func ToQualifiedName(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + id := e.AsIdent() + return id, true + case ast.SelectKind: + sel := e.AsSelect() // Test only expressions are not valid as qualified names. - if sel.GetTestOnly() { + if sel.IsTestOnly() { return "", false } - if qual, found := ToQualifiedName(sel.GetOperand()); found { - return qual + "." + sel.GetField(), true + if qual, found := ToQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true } } return "", false diff --git a/constraint/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/debug/BUILD.bazel index 1f029839c..724ed3404 100644 --- a/constraint/vendor/github.com/google/cel-go/common/debug/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/debug/BUILD.bazel @@ -13,6 +13,8 @@ go_library( importpath = "github.com/google/cel-go/common/debug", deps = [ "//common:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//common/ast:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", ], ) diff --git a/constraint/vendor/github.com/google/cel-go/common/debug/debug.go b/constraint/vendor/github.com/google/cel-go/common/debug/debug.go index 5dab156ef..25d2e3d71 100644 --- a/constraint/vendor/github.com/google/cel-go/common/debug/debug.go +++ b/constraint/vendor/github.com/google/cel-go/common/debug/debug.go @@ -22,7 +22,9 @@ import ( "strconv" "strings" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) // Adorner returns debug metadata that will be tacked on to the string @@ -38,7 +40,7 @@ type Writer interface { // Buffer pushes an expression into an internal queue of expressions to // write to a string. - Buffer(e *exprpb.Expr) + Buffer(e ast.Expr) } type emptyDebugAdorner struct { @@ -51,12 +53,12 @@ func (a *emptyDebugAdorner) GetMetadata(e any) string { } // ToDebugString gives the unadorned string representation of the Expr. -func ToDebugString(e *exprpb.Expr) string { +func ToDebugString(e ast.Expr) string { return ToAdornedDebugString(e, emptyAdorner) } // ToAdornedDebugString gives the adorned string representation of the Expr. -func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string { +func ToAdornedDebugString(e ast.Expr, adorner Adorner) string { w := newDebugWriter(adorner) w.Buffer(e) return w.String() @@ -78,49 +80,51 @@ func newDebugWriter(a Adorner) *debugWriter { } } -func (w *debugWriter) Buffer(e *exprpb.Expr) { +func (w *debugWriter) Buffer(e ast.Expr) { if e == nil { return } - switch e.ExprKind.(type) { - case *exprpb.Expr_ConstExpr: - w.append(formatLiteral(e.GetConstExpr())) - case *exprpb.Expr_IdentExpr: - w.append(e.GetIdentExpr().Name) - case *exprpb.Expr_SelectExpr: - w.appendSelect(e.GetSelectExpr()) - case *exprpb.Expr_CallExpr: - w.appendCall(e.GetCallExpr()) - case *exprpb.Expr_ListExpr: - w.appendList(e.GetListExpr()) - case *exprpb.Expr_StructExpr: - w.appendStruct(e.GetStructExpr()) - case *exprpb.Expr_ComprehensionExpr: - w.appendComprehension(e.GetComprehensionExpr()) + switch e.Kind() { + case ast.LiteralKind: + w.append(formatLiteral(e.AsLiteral())) + case ast.IdentKind: + w.append(e.AsIdent()) + case ast.SelectKind: + w.appendSelect(e.AsSelect()) + case ast.CallKind: + w.appendCall(e.AsCall()) + case ast.ListKind: + w.appendList(e.AsList()) + case ast.MapKind: + w.appendMap(e.AsMap()) + case ast.StructKind: + w.appendStruct(e.AsStruct()) + case ast.ComprehensionKind: + w.appendComprehension(e.AsComprehension()) } w.adorn(e) } -func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) { - w.Buffer(sel.GetOperand()) +func (w *debugWriter) appendSelect(sel ast.SelectExpr) { + w.Buffer(sel.Operand()) w.append(".") - w.append(sel.GetField()) - if sel.TestOnly { + w.append(sel.FieldName()) + if sel.IsTestOnly() { w.append("~test-only~") } } -func (w *debugWriter) appendCall(call *exprpb.Expr_Call) { - if call.Target != nil { - w.Buffer(call.GetTarget()) +func (w *debugWriter) appendCall(call ast.CallExpr) { + if call.IsMemberFunction() { + w.Buffer(call.Target()) w.append(".") } - w.append(call.GetFunction()) + w.append(call.FunctionName()) w.append("(") - if len(call.GetArgs()) > 0 { + if len(call.Args()) > 0 { w.addIndent() w.appendLine() - for i, arg := range call.GetArgs() { + for i, arg := range call.Args() { if i > 0 { w.append(",") w.appendLine() @@ -133,12 +137,12 @@ func (w *debugWriter) appendCall(call *exprpb.Expr_Call) { w.append(")") } -func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) { +func (w *debugWriter) appendList(list ast.ListExpr) { w.append("[") - if len(list.GetElements()) > 0 { + if len(list.Elements()) > 0 { w.appendLine() w.addIndent() - for i, elem := range list.GetElements() { + for i, elem := range list.Elements() { if i > 0 { w.append(",") w.appendLine() @@ -151,32 +155,25 @@ func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) { w.append("]") } -func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) { - if obj.MessageName != "" { - w.appendObject(obj) - } else { - w.appendMap(obj) - } -} - -func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) { - w.append(obj.GetMessageName()) +func (w *debugWriter) appendStruct(obj ast.StructExpr) { + w.append(obj.TypeName()) w.append("{") - if len(obj.GetEntries()) > 0 { + if len(obj.Fields()) > 0 { w.appendLine() w.addIndent() - for i, entry := range obj.GetEntries() { + for i, f := range obj.Fields() { + field := f.AsStructField() if i > 0 { w.append(",") w.appendLine() } - if entry.GetOptionalEntry() { + if field.IsOptional() { w.append("?") } - w.append(entry.GetFieldKey()) + w.append(field.Name()) w.append(":") - w.Buffer(entry.GetValue()) - w.adorn(entry) + w.Buffer(field.Value()) + w.adorn(f) } w.removeIndent() w.appendLine() @@ -184,23 +181,24 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) { w.append("}") } -func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) { +func (w *debugWriter) appendMap(m ast.MapExpr) { w.append("{") - if len(obj.GetEntries()) > 0 { + if m.Size() > 0 { w.appendLine() w.addIndent() - for i, entry := range obj.GetEntries() { + for i, e := range m.Entries() { + entry := e.AsMapEntry() if i > 0 { w.append(",") w.appendLine() } - if entry.GetOptionalEntry() { + if entry.IsOptional() { w.append("?") } - w.Buffer(entry.GetMapKey()) + w.Buffer(entry.Key()) w.append(":") - w.Buffer(entry.GetValue()) - w.adorn(entry) + w.Buffer(entry.Value()) + w.adorn(e) } w.removeIndent() w.appendLine() @@ -208,62 +206,67 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) { w.append("}") } -func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) { +func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { w.append("__comprehension__(") w.addIndent() w.appendLine() w.append("// Variable") w.appendLine() - w.append(comprehension.GetIterVar()) + w.append(comprehension.IterVar()) w.append(",") w.appendLine() + if comprehension.HasIterVar2() { + w.append(comprehension.IterVar2()) + w.append(",") + w.appendLine() + } w.append("// Target") w.appendLine() - w.Buffer(comprehension.GetIterRange()) + w.Buffer(comprehension.IterRange()) w.append(",") w.appendLine() w.append("// Accumulator") w.appendLine() - w.append(comprehension.GetAccuVar()) + w.append(comprehension.AccuVar()) w.append(",") w.appendLine() w.append("// Init") w.appendLine() - w.Buffer(comprehension.GetAccuInit()) + w.Buffer(comprehension.AccuInit()) w.append(",") w.appendLine() w.append("// LoopCondition") w.appendLine() - w.Buffer(comprehension.GetLoopCondition()) + w.Buffer(comprehension.LoopCondition()) w.append(",") w.appendLine() w.append("// LoopStep") w.appendLine() - w.Buffer(comprehension.GetLoopStep()) + w.Buffer(comprehension.LoopStep()) w.append(",") w.appendLine() w.append("// Result") w.appendLine() - w.Buffer(comprehension.GetResult()) + w.Buffer(comprehension.Result()) w.append(")") w.removeIndent() } -func formatLiteral(c *exprpb.Constant) string { - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return fmt.Sprintf("%t", c.GetBoolValue()) - case *exprpb.Constant_BytesValue: - return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue())) - case *exprpb.Constant_DoubleValue: - return fmt.Sprintf("%v", c.GetDoubleValue()) - case *exprpb.Constant_Int64Value: - return fmt.Sprintf("%d", c.GetInt64Value()) - case *exprpb.Constant_StringValue: - return strconv.Quote(c.GetStringValue()) - case *exprpb.Constant_Uint64Value: - return fmt.Sprintf("%du", c.GetUint64Value()) - case *exprpb.Constant_NullValue: +func formatLiteral(c ref.Val) string { + switch v := c.(type) { + case types.Bool: + return fmt.Sprintf("%t", v) + case types.Bytes: + return fmt.Sprintf("b\"%s\"", string(v)) + case types.Double: + return fmt.Sprintf("%v", float64(v)) + case types.Int: + return fmt.Sprintf("%d", int64(v)) + case types.String: + return strconv.Quote(string(v)) + case types.Uint: + return fmt.Sprintf("%du", uint64(v)) + case types.Null: return "null" default: panic("Unknown constant type") diff --git a/constraint/vendor/github.com/google/cel-go/common/decls/decls.go b/constraint/vendor/github.com/google/cel-go/common/decls/decls.go index 734ebe57e..f67808feb 100644 --- a/constraint/vendor/github.com/google/cel-go/common/decls/decls.go +++ b/constraint/vendor/github.com/google/cel-go/common/decls/decls.go @@ -162,7 +162,9 @@ func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { if oID == overload.ID() { if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { // Allow redefinition of an overload implementation so long as the signatures match. - f.overloads[oID] = overload + if overload.hasBinding() { + f.overloads[oID] = overload + } return nil } return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) @@ -249,15 +251,15 @@ func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { // are preserved in order to assist with the function resolution step. switch len(args) { case 1: - if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.unaryOp(args[0]) } case 2: - if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.binaryOp(args[0], args[1]) } } - if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { return o.functionOp(args...) } // eventually this will fall through to the noSuchOverload below. @@ -775,8 +777,13 @@ func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) } -// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. -func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { varType, err := types.TypeToExprType(v.Type()) if err != nil { return nil, err @@ -784,13 +791,8 @@ func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { return chkdecls.NewVar(v.Name(), varType), nil } -// TypeVariable creates a new type identifier for use within a types.Provider -func TypeVariable(t *types.Type) *VariableDecl { - return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) -} - -// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. -func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { +// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) for i, oID := range f.overloadOrdinals { o := f.overloads[oID] diff --git a/constraint/vendor/github.com/google/cel-go/common/error.go b/constraint/vendor/github.com/google/cel-go/common/error.go index 774dcb5b4..0cf21345e 100644 --- a/constraint/vendor/github.com/google/cel-go/common/error.go +++ b/constraint/vendor/github.com/google/cel-go/common/error.go @@ -18,8 +18,6 @@ import ( "fmt" "strings" "unicode/utf8" - - "golang.org/x/text/width" ) // NewError creates an error associated with an expression id with the given message at the given location. @@ -35,18 +33,15 @@ type Error struct { } const ( - dot = "." - ind = "^" + dot = "." + ind = "^" + wideDot = "\uff0e" + wideInd = "\uff3e" // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. maxSnippetLength = 16384 ) -var ( - wideDot = width.Widen.String(dot) - wideInd = width.Widen.String(ind) -) - // ToDisplayString decorates the error message with the source location. func (e *Error) ToDisplayString(source Source) string { var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", diff --git a/constraint/vendor/github.com/google/cel-go/common/errors.go b/constraint/vendor/github.com/google/cel-go/common/errors.go index 63919714e..25adc73d8 100644 --- a/constraint/vendor/github.com/google/cel-go/common/errors.go +++ b/constraint/vendor/github.com/google/cel-go/common/errors.go @@ -64,7 +64,7 @@ func (e *Errors) GetErrors() []*Error { // Append creates a new Errors object with the current and input errors. func (e *Errors) Append(errs []*Error) *Errors { return &Errors{ - errors: append(e.errors, errs...), + errors: append(e.errors[:], errs...), source: e.source, numErrors: e.numErrors + len(errs), maxErrorsToReport: e.maxErrorsToReport, diff --git a/constraint/vendor/github.com/google/cel-go/common/runes/buffer.go b/constraint/vendor/github.com/google/cel-go/common/runes/buffer.go index 50aac0b27..021198224 100644 --- a/constraint/vendor/github.com/google/cel-go/common/runes/buffer.go +++ b/constraint/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -127,20 +127,48 @@ var nilBuffer = &emptyBuffer{} // elements of the byte or uint16 array, and continue. The underlying storage is an rune array // containing any Unicode character. func NewBuffer(data string) Buffer { + buf, _ := newBuffer(data, false) + return buf +} + +// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on +// the ranges of the encoded code points contained within, as well as returning the line offsets. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBufferAndLineOffsets(data string) (Buffer, []int32) { + return newBuffer(data, true) +} + +func newBuffer(data string, lines bool) (Buffer, []int32) { if len(data) == 0 { - return nilBuffer + return nilBuffer, []int32{0} } var ( - idx = 0 - buf8 = make([]byte, 0, len(data)) + idx = 0 + off int32 = 0 + buf8 = make([]byte, 0, len(data)) buf16 []uint16 buf32 []rune + offs []int32 ) for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r < utf8.RuneSelf { buf8 = append(buf8, byte(r)) + off++ continue } if r <= 0xffff { @@ -150,6 +178,7 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf16 = append(buf16, uint16(r)) + off++ goto copy16 } buf32 = make([]rune, len(buf8), len(data)) @@ -158,17 +187,25 @@ func NewBuffer(data string) Buffer { } buf8 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &asciiBuffer{ arr: buf8, - } + }, offs copy16: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } if r <= 0xffff { buf16 = append(buf16, uint16(r)) + off++ continue } buf32 = make([]rune, len(buf16), len(data)) @@ -177,18 +214,29 @@ copy16: } buf16 = nil buf32 = append(buf32, r) + off++ goto copy32 } + if lines { + offs = append(offs, off+1) + } return &basicBuffer{ arr: buf16, - } + }, offs copy32: for idx < len(data) { r, s := utf8.DecodeRuneInString(data[idx:]) idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } buf32 = append(buf32, r) + off++ + } + if lines { + offs = append(offs, off+1) } return &supplementalBuffer{ arr: buf32, - } + }, offs } diff --git a/constraint/vendor/github.com/google/cel-go/common/source.go b/constraint/vendor/github.com/google/cel-go/common/source.go index acf22bdf1..ec79cb545 100644 --- a/constraint/vendor/github.com/google/cel-go/common/source.go +++ b/constraint/vendor/github.com/google/cel-go/common/source.go @@ -15,9 +15,6 @@ package common import ( - "strings" - "unicode/utf8" - "github.com/google/cel-go/common/runes" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -80,17 +77,11 @@ func NewTextSource(text string) Source { // NewStringSource creates a new Source from the given contents and description. func NewStringSource(contents string, description string) Source { // Compute line offsets up front as they are referred to frequently. - lines := strings.Split(contents, "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } + buf, offs := runes.NewBufferAndLineOffsets(contents) return &sourceImpl{ - Buffer: runes.NewBuffer(contents), + Buffer: buf, description: description, - lineOffsets: offsets, + lineOffsets: offs, } } @@ -172,9 +163,8 @@ func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { for _, lineOffset := range s.lineOffsets { if lineOffset > characterOffset { break - } else { - line++ } + line++ } if line == 1 { return line, 0 diff --git a/constraint/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel index c130a93f6..b55f45215 100644 --- a/constraint/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -12,7 +12,6 @@ go_library( ], importpath = "github.com/google/cel-go/common/stdlib", deps = [ - "//checker/decls:go_default_library", "//common/decls:go_default_library", "//common/functions:go_default_library", "//common/operators:go_default_library", @@ -20,6 +19,5 @@ go_library( "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", ], ) \ No newline at end of file diff --git a/constraint/vendor/github.com/google/cel-go/common/stdlib/standard.go b/constraint/vendor/github.com/google/cel-go/common/stdlib/standard.go index d02cb64bf..1550c1786 100644 --- a/constraint/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ b/constraint/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -23,15 +23,11 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) var ( stdFunctions []*decls.FunctionDecl - stdFnDecls []*exprpb.Decl stdTypes []*decls.VariableDecl - stdTypeDecls []*exprpb.Decl ) func init() { @@ -55,15 +51,6 @@ func init() { decls.TypeVariable(types.UintType), } - stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) - for _, stdType := range stdTypes { - typeVar, err := decls.VariableDeclToExprDecl(stdType) - if err != nil { - panic(err) - } - stdTypeDecls = append(stdTypeDecls, typeVar) - } - stdFunctions = []*decls.FunctionDecl{ // Logical operators. Special-cased within the interpreter. // Note, the singleton binding prevents extensions from overriding the operator behavior. @@ -576,18 +563,6 @@ func init() { decls.MemberOverload(overloads.DurationToMilliseconds, argTypes(types.DurationType), types.IntType)), } - - stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) - for _, fn := range stdFunctions { - if fn.IsDeclarationDisabled() { - continue - } - ed, err := decls.FunctionDeclToExprDecl(fn) - if err != nil { - panic(err) - } - stdFnDecls = append(stdFnDecls, ed) - } } // Functions returns the set of standard library function declarations and definitions for CEL. @@ -595,27 +570,11 @@ func Functions() []*decls.FunctionDecl { return stdFunctions } -// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads -// in the CEL standard environment. -// -// Deprecated: use Functions -func FunctionExprDecls() []*exprpb.Decl { - return stdFnDecls -} - // Types returns the set of standard library types for CEL. func Types() []*decls.VariableDecl { return stdTypes } -// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL -// standard environment. -// -// Deprecated: use Types -func TypeExprDecls() []*exprpb.Decl { - return stdTypeDecls -} - func notStrictlyFalse(value ref.Val) ref.Val { if types.IsBool(value) { return value diff --git a/constraint/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/common/types/BUILD.bazel index b5e44ffbf..8f010fae4 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -40,10 +40,12 @@ go_library( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", + "@dev_cel_expr//:expr", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", "@org_golang_google_protobuf//types/known/anypb:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", diff --git a/constraint/vendor/github.com/google/cel-go/common/types/bytes.go b/constraint/vendor/github.com/google/cel-go/common/types/bytes.go index 5838755f8..7e813e291 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/bytes.go @@ -58,7 +58,17 @@ func (b Bytes) Compare(other ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { - case reflect.Array, reflect.Slice: + case reflect.Array: + if len(b) != typeDesc.Len() { + return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) + } + refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem())) + refArr := refArrPtr.Elem() + for i, byt := range b { + refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem())) + } + return refArr.Interface(), nil + case reflect.Slice: return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { diff --git a/constraint/vendor/github.com/google/cel-go/common/types/err.go b/constraint/vendor/github.com/google/cel-go/common/types/err.go index aa8f94b4f..9c9d9e21e 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/err.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/err.go @@ -31,6 +31,7 @@ type Error interface { // Err type which extends the built-in go error and implements ref.Val. type Err struct { error + id int64 } var ( @@ -58,7 +59,24 @@ var ( // NewErr creates a new Err described by the format string and args. // TODO: Audit the use of this function and standardize the error messages and codes. func NewErr(format string, args ...any) ref.Val { - return &Err{fmt.Errorf(format, args...)} + return &Err{error: fmt.Errorf(format, args...)} +} + +// NewErrWithNodeID creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...), id: id} +} + +// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero +// AST node ID already present. Otherwise the id is added to the error for +// recovery with the Err.NodeID method. +func LabelErrNode(id int64, val ref.Val) ref.Val { + if err, ok := val.(*Err); ok && err.id == 0 { + err.id = id + return err + } + return val } // NoSuchOverloadErr returns a new types.Err instance with a no such overload message. @@ -124,6 +142,11 @@ func (e *Err) Value() any { return e.error } +// NodeID returns the AST node ID of the expression that returned the error. +func (e *Err) NodeID() int64 { + return e.id +} + // Is implements errors.Is. func (e *Err) Is(target error) bool { return e.error.Error() == target.Error() diff --git a/constraint/vendor/github.com/google/cel-go/common/types/int.go b/constraint/vendor/github.com/google/cel-go/common/types/int.go index 940772aed..0ae9507c3 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/int.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/int.go @@ -90,6 +90,18 @@ func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) { return nil, err } return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int8: + v, err := int64ToInt8Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int16: + v, err := int64ToInt16Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil case reflect.Int64: return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil case reflect.Ptr: diff --git a/constraint/vendor/github.com/google/cel-go/common/types/list.go b/constraint/vendor/github.com/google/cel-go/common/types/list.go index d4932b4a9..ca47d39fe 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/list.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/list.go @@ -190,7 +190,13 @@ func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { // Allow the element ConvertToNative() function to determine whether conversion is possible. otherElemType := typeDesc.Elem() elemCount := l.size - nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount) + var nativeList reflect.Value + if typeDesc.Kind() == reflect.Array { + nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0) + } else { + nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount) + + } for i := 0; i < elemCount; i++ { elem := l.NativeToValue(l.get(i)) nativeElemVal, err := elem.ConvertToNative(otherElemType) @@ -250,6 +256,15 @@ func (l *baseList) IsZeroValue() bool { return l.size == 0 } +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *baseList) Fold(f traits.Folder) { + for i := 0; i < l.size; i++ { + if !f.FoldEntry(i, l.get(i)) { + break + } + } +} + // Iterator implements the traits.Iterable interface method. func (l *baseList) Iterator() traits.Iterator { return newListIterator(l) @@ -427,6 +442,15 @@ func (l *concatList) IsZeroValue() bool { return l.Size().(Int) == 0 } +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *concatList) Fold(f traits.Folder) { + for i := Int(0); i < l.Size().(Int); i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} + // Iterator implements the traits.Iterable interface method. func (l *concatList) Iterator() traits.Iterator { return newListIterator(l) @@ -521,3 +545,30 @@ func IndexOrError(index ref.Val) (int, error) { return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) } } + +// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold is +// driven via the Size() and Get() calls which means that the folding will function, but take a +// performance hit. +func ToFoldableList(l traits.Lister) traits.Foldable { + if f, ok := l.(traits.Foldable); ok { + return f + } + return interopFoldableList{Lister: l} +} + +type interopFoldableList struct { + traits.Lister +} + +// Fold implements the traits.Foldable interface method and performs an iteration over the +// range of elements of the list. +func (l interopFoldableList) Fold(f traits.Folder) { + sz := l.Size().(Int) + for i := Int(0); i < sz; i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} diff --git a/constraint/vendor/github.com/google/cel-go/common/types/map.go b/constraint/vendor/github.com/google/cel-go/common/types/map.go index 739b7aab0..cb6cce78b 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/map.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/map.go @@ -94,6 +94,24 @@ func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { } } +// NewMutableMap constructs a mutable map from an adapter and a set of map values. +func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper { + mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues)) + for k, v := range mutableValues { + mutableCopy[k] = v + } + m := &mutableMap{ + baseMap: &baseMap{ + Adapter: adapter, + mapAccessor: newRefValMapAccessor(mutableCopy), + value: mutableCopy, + size: len(mutableCopy), + }, + mutableValues: mutableCopy, + } + return m +} + // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -105,6 +123,9 @@ type mapAccessor interface { // Iterator returns an Iterator over the map key set. Iterator() traits.Iterator + + // Fold calls the FoldEntry method for each (key, value) pair in the map. + Fold(traits.Folder) } // baseMap is a reflection based map implementation designed to handle a variety of map-like types. @@ -307,6 +328,28 @@ func (m *baseMap) Value() any { return m.value } +// mutableMap holds onto a set of mutable values which are used for intermediate computations. +type mutableMap struct { + *baseMap + mutableValues map[ref.Val]ref.Val +} + +// Insert implements the traits.MutableMapper interface method, returning true if the key insertion +// succeeds. +func (m *mutableMap) Insert(k, v ref.Val) ref.Val { + if _, found := m.Find(k); found { + return NewErr("insert failed: key %v already exists", k) + } + m.mutableValues[k] = v + return m +} + +// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map +// an immutable map implementation. +func (m *mutableMap) ToImmutableMap() traits.Mapper { + return NewRefValMap(m.Adapter, m.mutableValues) +} + func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ Adapter: adapter, @@ -350,6 +393,15 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *jsonStructAccessor) Fold(f traits.Folder) { + for k, v := range a.st { + if !f.FoldEntry(k, v) { + break + } + } +} + func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ @@ -424,6 +476,16 @@ func (m *reflectMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *reflectMapAccessor) Fold(f traits.Folder) { + mapRange := m.refValue.MapRange() + for mapRange.Next() { + if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) { + break + } + } +} + func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { return &refValMapAccessor{mapVal: mapVal} } @@ -477,6 +539,15 @@ func (a *refValMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *refValMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + func newStringMapAccessor(strMap map[string]string) mapAccessor { return &stringMapAccessor{mapVal: strMap} } @@ -515,6 +586,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ Adapter: adapter, @@ -557,6 +637,15 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { @@ -769,6 +858,13 @@ func (m *protoMap) Iterator() traits.Iterator { } } +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *protoMap) Fold(f traits.Folder) { + m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + return f.FoldEntry(k.Interface(), v.Interface()) + }) +} + // Size returns the number of entries in the protoreflect.Map. func (m *protoMap) Size() ref.Val { return Int(m.value.Len()) @@ -852,3 +948,55 @@ func (it *stringKeyIterator) Next() ref.Val { } return nil } + +// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold +// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method +// which means that the folding will function, but take a performance hit. +func ToFoldableMap(m traits.Mapper) traits.Foldable { + if f, ok := m.(traits.Foldable); ok { + return f + } + return interopFoldableMap{Mapper: m} +} + +type interopFoldableMap struct { + traits.Mapper +} + +func (m interopFoldableMap) Fold(f traits.Folder) { + it := m.Iterator() + for it.HasNext() == True { + k := it.Next() + if !f.FoldEntry(k, m.Get(k)) { + break + } + } +} + +// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not +// already contain the given key. +// +// If the map is mutable, it is modified in-place per the MutableMapper contract. +// If the map is not mutable, a copy containing the new key, value pair is made. +func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { + if mutable, ok := m.(traits.MutableMapper); ok { + return mutable.Insert(k, v) + } + + // Otherwise perform the slow version of the insertion which makes a copy of the incoming map. + if _, found := m.Find(k); !found { + size := m.Size().(Int) + copy := make(map[ref.Val]ref.Val, size+1) + copy[k] = v + it := m.Iterator() + for it.HasNext() == True { + nextK := it.Next() + nextV := m.Get(nextK) + copy[nextK] = nextV + } + return DefaultTypeAdapter.NativeToValue(copy) + } + return NewErr("insert failed: key %v already exists", k) +} diff --git a/constraint/vendor/github.com/google/cel-go/common/types/null.go b/constraint/vendor/github.com/google/cel-go/common/types/null.go index 926ca3dc9..36514ff20 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/null.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/null.go @@ -35,6 +35,8 @@ var ( // golang reflect type for Null values. nullReflectType = reflect.TypeOf(NullValue) + + protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem() ) // ConvertToNative implements ref.Val.ConvertToNative. @@ -61,8 +63,14 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { return structpb.NewNullValue(), nil case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, - uint64WrapperType: + uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: return nil, nil + case jsonListValueType, jsonStructType: + // skip handling + default: + if typeDesc.Implements(protoIfaceType) { + return nil, nil + } } case reflect.Interface: nv := n.Value() diff --git a/constraint/vendor/github.com/google/cel-go/common/types/optional.go b/constraint/vendor/github.com/google/cel-go/common/types/optional.go index a9f30aed0..97845a740 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/optional.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/optional.go @@ -24,7 +24,7 @@ import ( var ( // OptionalType indicates the runtime type of an optional value. - OptionalType = NewOpaqueType("optional") + OptionalType = NewOpaqueType("optional_type") // OptionalNone is a sentinel value which is used to indicate an empty optional value. OptionalNone = &Optional{} diff --git a/constraint/vendor/github.com/google/cel-go/common/types/overflow.go b/constraint/vendor/github.com/google/cel-go/common/types/overflow.go index c68a92182..dcb66ef59 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/overflow.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/overflow.go @@ -326,6 +326,26 @@ func int64ToUint64Checked(v int64) (uint64, error) { return uint64(v), nil } +// int64ToInt8Checked converts an int64 to an int8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt8Checked(v int64) (int8, error) { + if v < math.MinInt8 || v > math.MaxInt8 { + return 0, errIntOverflow + } + return int8(v), nil +} + +// int64ToInt16Checked converts an int64 to an int16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt16Checked(v int64) (int16, error) { + if v < math.MinInt16 || v > math.MaxInt16 { + return 0, errIntOverflow + } + return int16(v), nil +} + // int64ToInt32Checked converts an int64 to an int32 value. // // If the conversion fails due to overflow the error return value will be non-nil. @@ -336,6 +356,26 @@ func int64ToInt32Checked(v int64) (int32, error) { return int32(v), nil } +// uint64ToUint8Checked converts a uint64 to a uint8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint8Checked(v uint64) (uint8, error) { + if v > math.MaxUint8 { + return 0, errUintOverflow + } + return uint8(v), nil +} + +// uint64ToUint16Checked converts a uint64 to a uint16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint16Checked(v uint64) (uint16, error) { + if v > math.MaxUint16 { + return 0, errUintOverflow + } + return uint16(v), nil +} + // uint64ToUint32Checked converts a uint64 to a uint32 value. // // If the conversion fails due to overflow the error return value will be non-nil. diff --git a/constraint/vendor/github.com/google/cel-go/common/types/pb/type.go b/constraint/vendor/github.com/google/cel-go/common/types/pb/type.go index 6cc95c276..bdd474c95 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -427,22 +427,49 @@ func unwrap(desc description, msg proto.Message) (any, bool, error) { return structpb.NullValue_NULL_VALUE, true, nil } case *wrapperspb.BoolValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.BytesValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.DoubleValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.FloatValue: + if v == nil { + return nil, true, nil + } return float64(v.GetValue()), true, nil case *wrapperspb.Int32Value: + if v == nil { + return nil, true, nil + } return int64(v.GetValue()), true, nil case *wrapperspb.Int64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.StringValue: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil case *wrapperspb.UInt32Value: + if v == nil { + return nil, true, nil + } return uint64(v.GetValue()), true, nil case *wrapperspb.UInt64Value: + if v == nil { + return nil, true, nil + } return v.GetValue(), true, nil } return msg, false, nil diff --git a/constraint/vendor/github.com/google/cel-go/common/types/provider.go b/constraint/vendor/github.com/google/cel-go/common/types/provider.go index e80b4622e..936a4e28b 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/provider.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/provider.go @@ -54,6 +54,10 @@ type Provider interface { // Returns false if not found. FindStructType(structType string) (*Type, bool) + // FindStructFieldNames returns thet field names associated with the type, if the type + // is found. + FindStructFieldNames(structType string) ([]string, bool) + // FieldStructFieldType returns the field type for a checked type value. Returns // false if the field could not be found. FindStructFieldType(structType, fieldName string) (*FieldType, bool) @@ -154,7 +158,7 @@ func (p *Registry) EnumValue(enumName string) ref.Val { return Int(enumVal.Value()) } -// FieldFieldType returns the field type for a checked type value. Returns false if +// FindFieldType returns the field type for a checked type value. Returns false if // the field could not be found. // // Deprecated: use FindStructFieldType @@ -173,7 +177,24 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, GetFrom: field.GetFrom}, true } -// FieldStructFieldType returns the field type for a checked type value. Returns +// FindStructFieldNames returns the set of field names for the given struct type, +// if the type exists in the registry. +func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return []string{}, false + } + fieldMap := msgType.FieldMap() + fields := make([]string, len(fieldMap)) + idx := 0 + for f := range fieldMap { + fields[idx] = f + idx++ + } + return fields, true +} + +// FindStructFieldType returns the field type for a checked type value. Returns // false if the field could not be found. func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { msgType, found := p.pbdb.DescribeType(structType) @@ -255,7 +276,7 @@ func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Va } err := msgSetField(msg, field, value) if err != nil { - return &Err{err} + return &Err{error: err} } } return p.NativeToValue(msg.Interface()) @@ -564,17 +585,46 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { refKind := refValue.Kind() switch refKind { case reflect.Array, reflect.Slice: + if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { + if refValue.CanAddr() { + return Bytes(refValue.Bytes()), true + } + tmp := reflect.New(refValue.Type()) + tmp.Elem().Set(refValue) + return Bytes(tmp.Elem().Bytes()), true + } return NewDynamicList(a, v), true case reflect.Map: return NewDynamicMap(a, v), true // type aliases of primitive types cannot be asserted as that type, but rather need // to be downcast to int32 before being converted to a CEL representation. + case reflect.Bool: + boolTupe := reflect.TypeOf(false) + return Bool(refValue.Convert(boolTupe).Interface().(bool)), true + case reflect.Int: + intType := reflect.TypeOf(int(0)) + return Int(refValue.Convert(intType).Interface().(int)), true + case reflect.Int8: + intType := reflect.TypeOf(int8(0)) + return Int(refValue.Convert(intType).Interface().(int8)), true + case reflect.Int16: + intType := reflect.TypeOf(int16(0)) + return Int(refValue.Convert(intType).Interface().(int16)), true case reflect.Int32: intType := reflect.TypeOf(int32(0)) return Int(refValue.Convert(intType).Interface().(int32)), true case reflect.Int64: intType := reflect.TypeOf(int64(0)) return Int(refValue.Convert(intType).Interface().(int64)), true + case reflect.Uint: + uintType := reflect.TypeOf(uint(0)) + return Uint(refValue.Convert(uintType).Interface().(uint)), true + case reflect.Uint8: + uintType := reflect.TypeOf(uint8(0)) + return Uint(refValue.Convert(uintType).Interface().(uint8)), true + case reflect.Uint16: + uintType := reflect.TypeOf(uint16(0)) + return Uint(refValue.Convert(uintType).Interface().(uint16)), true case reflect.Uint32: uintType := reflect.TypeOf(uint32(0)) return Uint(refValue.Convert(uintType).Interface().(uint32)), true @@ -587,6 +637,9 @@ func nativeToValue(a Adapter, value any) (ref.Val, bool) { case reflect.Float64: doubleType := reflect.TypeOf(float64(0)) return Double(refValue.Convert(doubleType).Interface().(float64)), true + case reflect.String: + stringType := reflect.TypeOf("") + return String(refValue.Convert(stringType).Interface().(string)), true } } return nil, false diff --git a/constraint/vendor/github.com/google/cel-go/common/types/string.go b/constraint/vendor/github.com/google/cel-go/common/types/string.go index 028e6824d..3a93743f2 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/string.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/string.go @@ -66,10 +66,7 @@ func (s String) Compare(other ref.Val) ref.Val { func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) { switch typeDesc.Kind() { case reflect.String: - if reflect.TypeOf(s).AssignableTo(typeDesc) { - return s, nil - } - return s.Value(), nil + return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil case reflect.Ptr: switch typeDesc { case anyValueType: @@ -158,7 +155,7 @@ func (s String) Match(pattern ref.Val) ref.Val { } matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string)) if err != nil { - return &Err{err} + return &Err{error: err} } return Bool(matched) } diff --git a/constraint/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/constraint/vendor/github.com/google/cel-go/common/types/traits/iterator.go index 42dd371aa..91c10f08f 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/traits/iterator.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -34,3 +34,16 @@ type Iterator interface { // Next returns the next element. Next() ref.Val } + +// Foldable aggregate types support iteration over (key, value) or (index, value) pairs. +type Foldable interface { + // Fold invokes the Folder.FoldEntry for all entries in the type + Fold(Folder) +} + +// Folder performs a fold on a given entry and indicates whether to continue folding. +type Folder interface { + // FoldEntry indicates the key, value pair associated with the entry. + // If the output is true, continue folding. Otherwise, terminate the fold. + FoldEntry(key, val any) bool +} diff --git a/constraint/vendor/github.com/google/cel-go/common/types/traits/lister.go b/constraint/vendor/github.com/google/cel-go/common/types/traits/lister.go index 5cf2593f3..e54781a60 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/traits/lister.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -27,6 +27,9 @@ type Lister interface { } // MutableLister interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. type MutableLister interface { Lister ToImmutableList() Lister diff --git a/constraint/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/constraint/vendor/github.com/google/cel-go/common/types/traits/mapper.go index 2f7c919a8..d13333f3f 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/traits/mapper.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -31,3 +31,18 @@ type Mapper interface { // (Unknown|Err, false). Find(key ref.Val) (ref.Val, bool) } + +// MutableMapper interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. +type MutableMapper interface { + Mapper + + // Insert a key, value pair into the map, returning the map if the insert is successful + // and an error if key already exists in the mutable map. + Insert(k, v ref.Val) ref.Val + + // ToImmutableMap converts a mutable map into an immutable map. + ToImmutableMap() Mapper +} diff --git a/constraint/vendor/github.com/google/cel-go/common/types/traits/traits.go b/constraint/vendor/github.com/google/cel-go/common/types/traits/traits.go index 6da3e6a3e..51a09df56 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/traits/traits.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -59,6 +59,21 @@ const ( // SizerType types support the size() method. SizerType - // SubtractorType type support '-' operations. + // SubtractorType types support '-' operations. SubtractorType + + // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs. + FoldableType +) + +const ( + // ListerType supports a set of traits necessary for list operations. + // + // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators. + ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType + + // MapperType supports a set of traits necessary for map operations. + // + // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators. + MapperType = ContainerType | IndexerType | IterableType | SizerType ) diff --git a/constraint/vendor/github.com/google/cel-go/common/types/types.go b/constraint/vendor/github.com/google/cel-go/common/types/types.go index 76624eefd..1c5b6c40c 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/types.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/types.go @@ -19,10 +19,13 @@ import ( "reflect" "strings" + "google.golang.org/protobuf/proto" + chkdecls "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + celpb "cel.dev/expr" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -373,6 +376,23 @@ func (t *Type) TypeName() string { return t.runtimeTypeName } +// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter. +// +// This method should be used with Opaque types where the type acts like a container, e.g. vector. +func (t *Type) WithTraits(traits int) *Type { + if t == nil { + return nil + } + return &Type{ + kind: t.kind, + parameters: t.parameters, + runtimeTypeName: t.runtimeTypeName, + isAssignableType: t.isAssignableType, + isAssignableRuntimeType: t.isAssignableRuntimeType, + traitMask: traits, + } +} + // String returns a human-readable definition of the type name. func (t *Type) String() string { if len(t.Parameters()) == 0 { @@ -496,7 +516,7 @@ func NewNullableType(wrapped *Type) *Type { // NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. func NewOptionalType(param *Type) *Type { - return NewOpaqueType("optional", param) + return NewOpaqueType("optional_type", param) } // NewOpaqueType creates an abstract parameterized type with a given name. @@ -649,85 +669,99 @@ func TypeToExprType(t *Type) (*exprpb.Type, error) { // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { + return AlphaProtoAsType(t) +} + +// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation. +func AlphaProtoAsType(t *exprpb.Type) (*Type, error) { + canonical := &celpb.Type{} + if err := convertProto(t, canonical); err != nil { + return nil, err + } + return ProtoAsType(canonical) +} + +// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation. +func ProtoAsType(t *celpb.Type) (*Type, error) { switch t.GetTypeKind().(type) { - case *exprpb.Type_Dyn: + case *celpb.Type_Dyn: return DynType, nil - case *exprpb.Type_AbstractType_: + case *celpb.Type_AbstractType_: paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) for i, p := range t.GetAbstractType().GetParameterTypes() { - pt, err := ExprTypeToType(p) + pt, err := ProtoAsType(p) if err != nil { return nil, err } paramTypes[i] = pt } return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil - case *exprpb.Type_ListType_: - et, err := ExprTypeToType(t.GetListType().GetElemType()) + case *celpb.Type_ListType_: + et, err := ProtoAsType(t.GetListType().GetElemType()) if err != nil { return nil, err } return NewListType(et), nil - case *exprpb.Type_MapType_: - kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + case *celpb.Type_MapType_: + kt, err := ProtoAsType(t.GetMapType().GetKeyType()) if err != nil { return nil, err } - vt, err := ExprTypeToType(t.GetMapType().GetValueType()) + vt, err := ProtoAsType(t.GetMapType().GetValueType()) if err != nil { return nil, err } return NewMapType(kt, vt), nil - case *exprpb.Type_MessageType: + case *celpb.Type_MessageType: return NewObjectType(t.GetMessageType()), nil - case *exprpb.Type_Null: + case *celpb.Type_Null: return NullType, nil - case *exprpb.Type_Primitive: + case *celpb.Type_Primitive: switch t.GetPrimitive() { - case exprpb.Type_BOOL: + case celpb.Type_BOOL: return BoolType, nil - case exprpb.Type_BYTES: + case celpb.Type_BYTES: return BytesType, nil - case exprpb.Type_DOUBLE: + case celpb.Type_DOUBLE: return DoubleType, nil - case exprpb.Type_INT64: + case celpb.Type_INT64: return IntType, nil - case exprpb.Type_STRING: + case celpb.Type_STRING: return StringType, nil - case exprpb.Type_UINT64: + case celpb.Type_UINT64: return UintType, nil default: return nil, fmt.Errorf("unsupported primitive type: %v", t) } - case *exprpb.Type_TypeParam: + case *celpb.Type_TypeParam: return NewTypeParamType(t.GetTypeParam()), nil - case *exprpb.Type_Type: + case *celpb.Type_Type: if t.GetType().GetTypeKind() != nil { - p, err := ExprTypeToType(t.GetType()) + p, err := ProtoAsType(t.GetType()) if err != nil { return nil, err } return NewTypeTypeWithParam(p), nil } return TypeType, nil - case *exprpb.Type_WellKnown: + case *celpb.Type_WellKnown: switch t.GetWellKnown() { - case exprpb.Type_ANY: + case celpb.Type_ANY: return AnyType, nil - case exprpb.Type_DURATION: + case celpb.Type_DURATION: return DurationType, nil - case exprpb.Type_TIMESTAMP: + case celpb.Type_TIMESTAMP: return TimestampType, nil default: return nil, fmt.Errorf("unsupported well-known type: %v", t) } - case *exprpb.Type_Wrapper: - t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) + case *celpb.Type_Wrapper: + t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}}) if err != nil { return nil, err } return NewNullableType(t), nil - case *exprpb.Type_Error: + case *celpb.Type_Error: return ErrorType, nil default: return nil, fmt.Errorf("unsupported type: %v", t) @@ -759,6 +793,23 @@ func maybeForeignType(t ref.Type) *Type { return NewObjectType(t.TypeName(), traitMask) } +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} + +func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type { + return &celpb.Type{ + TypeKind: &celpb.Type_Primitive{ + Primitive: primitive, + }, + } +} + var ( checkedWellKnowns = map[string]*Type{ // Wrapper types. @@ -803,4 +854,11 @@ var ( } structTypeTraitMask = traits.FieldTesterType | traits.IndexerType + + boolType = primitiveType(celpb.Type_BOOL) + bytesType = primitiveType(celpb.Type_BYTES) + doubleType = primitiveType(celpb.Type_DOUBLE) + intType = primitiveType(celpb.Type_INT64) + stringType = primitiveType(celpb.Type_STRING) + uintType = primitiveType(celpb.Type_UINT64) ) diff --git a/constraint/vendor/github.com/google/cel-go/common/types/uint.go b/constraint/vendor/github.com/google/cel-go/common/types/uint.go index 3257f9ade..6d74f30d8 100644 --- a/constraint/vendor/github.com/google/cel-go/common/types/uint.go +++ b/constraint/vendor/github.com/google/cel-go/common/types/uint.go @@ -80,6 +80,18 @@ func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) { return 0, err } return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint8: + v, err := uint64ToUint8Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint16: + v, err := uint64ToUint16Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil case reflect.Uint64: return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil case reflect.Ptr: diff --git a/constraint/vendor/github.com/google/cel-go/ext/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/ext/BUILD.bazel index 6fdcc60c6..1fece7006 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -7,7 +7,9 @@ package( go_library( name = "go_default_library", srcs = [ + "bindings.go", "encoders.go", + "formatting.go", "guards.go", "lists.go", "math.go", @@ -21,14 +23,16 @@ go_library( deps = [ "//cel:go_default_library", "//checker:go_default_library", - "//checker/decls:go_default_library", + "//common/ast:go_default_library", + "//common/decls:go_default_library", "//common/overloads:go_default_library", + "//common/operators:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//parser:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", "@org_golang_google_protobuf//types/known/structpb", @@ -59,9 +63,8 @@ go_test( "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//test:go_default_library", - "//test/proto2pb:go_default_library", - "//test/proto3pb:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", diff --git a/constraint/vendor/github.com/google/cel-go/ext/README.md b/constraint/vendor/github.com/google/cel-go/ext/README.md index 6f621ac4a..07e544d0d 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/README.md +++ b/constraint/vendor/github.com/google/cel-go/ext/README.md @@ -3,12 +3,12 @@ CEL extensions are a related set of constants, functions, macros, or other features which may not be covered by the core CEL spec. -## Bindings +## Bindings Returns a cel.EnvOption to configure support for local variable bindings in expressions. -# Cel.Bind +### Cel.Bind Binds a simple identifier to an initialization expression which may be used in a subsequenct result expression. Bindings may also be nested within each @@ -19,11 +19,11 @@ other. Examples: cel.bind(a, 'hello', - cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" + cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" // Avoid a list allocation within the exists comprehension. cel.bind(valid_values, [a, b, c], - [d, e, f].exists(elem, elem in valid_values)) + [d, e, f].exists(elem, elem in valid_values)) Local bindings are not guaranteed to be evaluated before use. @@ -100,7 +100,8 @@ argument. Simple numeric and list literals are supported as valid argument types; however, other literals will be flagged as errors during macro expansion. If the argument expression does not resolve to a numeric or list(numeric) type during type-checking, or during runtime then an error -will be produced. If a list argument is empty, this too will produce an error. +will be produced. If a list argument is empty, this too will produce an +error. math.least(, ...) -> @@ -117,6 +118,244 @@ Examples: math.least(a, b) // check-time error if a or b is non-numeric math.least(dyn('string')) // runtime error +### Math.BitOr + +Introduced at version: 1 + +Performs a bitwise-OR operation over two int or uint values. + + math.bitOr(, ) -> + math.bitOr(, ) -> + +Examples: + + math.bitOr(1u, 2u) // returns 3u + math.bitOr(-2, -4) // returns -2 + +### Math.BitAnd + +Introduced at version: 1 + +Performs a bitwise-AND operation over two int or uint values. + + math.bitAnd(, ) -> + math.bitAnd(, ) -> + +Examples: + + math.bitAnd(3u, 2u) // return 2u + math.bitAnd(3, 5) // returns 3 + math.bitAnd(-3, -5) // returns -7 + +### Math.BitXor + +Introduced at version: 1 + + math.bitXor(, ) -> + math.bitXor(, ) -> + +Performs a bitwise-XOR operation over two int or uint values. + +Examples: + + math.bitXor(3u, 5u) // returns 6u + math.bitXor(1, 3) // returns 2 + +### Math.BitNot + +Introduced at version: 1 + +Function which accepts a single int or uint and performs a bitwise-NOT +ones-complement of the given binary value. + + math.bitNot() -> + math.bitNot() -> + +Examples + + math.bitNot(1) // returns -1 + math.bitNot(-1) // return 0 + math.bitNot(0u) // returns 18446744073709551615u + +### Math.BitShiftLeft + +Introduced at version: 1 + +Perform a left shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will be always be returned +since the number of bits shifted is greater than or equal to the total bit +length of the number being shifted. Negative valued bit shifts will result +in a runtime error. + + math.bitShiftLeft(, ) -> + math.bitShiftLeft(, ) -> + +Examples + + math.bitShiftLeft(1, 2) // returns 4 + math.bitShiftLeft(-1, 2) // returns -4 + math.bitShiftLeft(1u, 2) // return 4u + math.bitShiftLeft(1u, 200) // returns 0u + +### Math.BitShiftRight + +Introduced at version: 1 + +Perform a right shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will always be returned since +the number of bits shifted is greater than or equal to the total bit length +of the number being shifted. Negative valued bit shifts will result in a +runtime error. + +The sign bit extension will not be preserved for this operation: vacant bits +on the left are filled with 0. + + math.bitShiftRight(, ) -> + math.bitShiftRight(, ) -> + +Examples + + math.bitShiftRight(1024, 2) // returns 256 + math.bitShiftRight(1024u, 2) // returns 256u + math.bitShiftRight(1024u, 64) // returns 0u + +### Math.Ceil + +Introduced at version: 1 + +Compute the ceiling of a double value. + + math.ceil() -> + +Examples: + + math.ceil(1.2) // returns 2.0 + math.ceil(-1.2) // returns -1.0 + +### Math.Floor + +Introduced at version: 1 + +Compute the floor of a double value. + + math.floor() -> + +Examples: + + math.floor(1.2) // returns 1.0 + math.floor(-1.2) // returns -2.0 + +### Math.Round + +Introduced at version: 1 + +Rounds the double value to the nearest whole number with ties rounding away +from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. + + math.round() -> + +Examples: + + math.round(1.2) // returns 1.0 + math.round(1.5) // returns 2.0 + math.round(-1.5) // returns -2.0 + +### Math.Trunc + +Introduced at version: 1 + +Truncates the fractional portion of the double value. + + math.trunc() -> + +Examples: + + math.trunc(-1.3) // returns -1.0 + math.trunc(1.3) // returns 1.0 + +### Math.Abs + +Introduced at version: 1 + +Returns the absolute value of the numeric type provided as input. If the +value is NaN, the output is NaN. If the input is int64 min, the function +will result in an overflow error. + + math.abs() -> + math.abs() -> + math.abs() -> + +Examples: + + math.abs(-1) // returns 1 + math.abs(1) // returns 1 + math.abs(-9223372036854775808) // overlflow error + +### Math.Sign + +Introduced at version: 1 + +Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +uint depending on the overload. For floating point values, if NaN is +provided as input, the output is also NaN. The implementation does not +differentiate between positive and negative zero. + + math.sign() -> + math.sign() -> + math.sign() -> + +Examples: + + math.sign(-42) // returns -1 + math.sign(0) // returns 0 + math.sign(42) // returns 1 + +### Math.IsInf + +Introduced at version: 1 + +Returns true if the input double value is -Inf or +Inf. + + math.isInf() -> + +Examples: + + math.isInf(1.0/0.0) // returns true + math.isInf(1.2) // returns false + +### Math.IsNaN + +Introduced at version: 1 + +Returns true if the input double value is NaN, false otherwise. + + math.isNaN() -> + +Examples: + + math.isNaN(0.0/0.0) // returns true + math.isNaN(1.2) // returns false + +### Math.IsFinite + +Introduced at version: 1 + +Returns true if the value is a finite number. Equivalent in behavior to: +!math.isNaN(double) && !math.isInf(double) + + math.isFinite() -> + +Examples: + + math.isFinite(0.0/0.0) // returns false + math.isFinite(1.2) // returns true + ## Protos Protos configure extended macros and functions for proto manipulation. @@ -154,6 +393,65 @@ Example: Extended functions for list manipulation. As a general note, all indices are zero-based. +### Distinct + +**Introduced in version 2** + +Returns the distinct elements of a list. + + .distinct() -> + +Examples: + + [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] + ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] + [1, "b", 2, "b"].distinct() // return [1, "b", 2] + +### Flatten + +**Introduced in version 1** + +Flattens a list recursively. +If an optional depth is provided, the list is flattened to a the specificied level. +A negative depth value will result in an error. + + .flatten() -> + .flatten(, ) -> + +Examples: + + [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] + [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] + [1,[2,[3,[4]]]].flatten(-1) // error + +### Range + +**Introduced in version 2** + +Returns a list of integers from 0 to n-1. + + lists.range() -> + +Examples: + + lists.range(5) -> [0, 1, 2, 3, 4] + + +### Reverse + +**Introduced in version 2** + +Returns the elements of a list in reverse order. + + .reverse() -> + +Examples: + + [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] + + ### Slice @@ -164,7 +462,43 @@ Returns a new sub-list using the indexes provided. Examples: [1,2,3,4].slice(1, 3) // return [2, 3] - [1,2,3,4].slice(2, 4) // return [3 ,4] + [1,2,3,4].slice(2, 4) // return [3, 4] + +### Sort + +**Introduced in version 2** + +Sorts a list with comparable elements. If the element type is not comparable +or the element types are not the same, the function will produce an error. + + .sort() -> + T in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [3, 2, 1].sort() // return [1, 2, 3] + ["b", "c", "a"].sort() // return ["a", "b", "c"] + [1, "b"].sort() // error + [[1, 2, 3]].sort() // error + +### SortBy + +**Introduced in version 2** + +Sorts a list by a key value, i.e., the order is determined by the result of +an expression applied to each element of the list. + + .sortBy(, ) -> + keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [ + Player { name: "foo", score: 0 }, + Player { name: "bar", score: -10 }, + Player { name: "baz", score: 1000 }, + ].sortBy(e, e.score).map(e, e.name) + == ["bar", "foo", "baz"] ## Sets @@ -259,7 +593,8 @@ Examples: 'hello mellow'.indexOf('jello') // returns -1 'hello mellow'.indexOf('', 2) // returns 2 'hello mellow'.indexOf('ello', 2) // returns 7 - 'hello mellow'.indexOf('ello', 20) // error + 'hello mellow'.indexOf('ello', 20) // returns -1 + 'hello mellow'.indexOf('ello', -1) // error ### Join @@ -273,10 +608,10 @@ elements in the resulting string. Examples: - ['hello', 'mellow'].join() // returns 'hellomellow' - ['hello', 'mellow'].join(' ') // returns 'hello mellow' - [].join() // returns '' - [].join('/') // returns '' + ['hello', 'mellow'].join() // returns 'hellomellow' + ['hello', 'mellow'].join(' ') // returns 'hello mellow' + [].join() // returns '' + [].join('/') // returns '' ### LastIndexOf @@ -297,6 +632,7 @@ Examples: 'hello mellow'.lastIndexOf('ello') // returns 7 'hello mellow'.lastIndexOf('jello') // returns -1 'hello mellow'.lastIndexOf('ello', 6) // returns 1 + 'hello mellow'.lastIndexOf('ello', 20) // returns -1 'hello mellow'.lastIndexOf('ello', -1) // error ### LowerAscii @@ -414,3 +750,150 @@ Examples: 'TacoCat'.upperAscii() // returns 'TACOCAT' 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' + +### Reverse + +Returns a new string whose characters are the same as the target string, only formatted in +reverse order. +This function relies on converting strings to rune arrays in order to reverse. +It can be located in Version 3 of strings. + + .reverse() -> + +Examples: + + 'gums'.reverse() // returns 'smug' + 'John Smith'.reverse() // returns 'htimS nhoJ' + +## TwoVarComprehensions + +TwoVarComprehensions introduces support for two-variable comprehensions. + +The two-variable form of comprehensions looks similar to the one-variable +counterparts. Where possible, the same macro names were used and additional +macro signatures added. The notable distinction for two-variable comprehensions +is the introduction of `transformList`, `transformMap`, and `transformMapEntry` +support for list and map types rather than the more traditional `map` and +`filter` macros. + +### All + +Comprehension which tests whether all elements in the list or map satisfy a +given predicate. The `all` macro evaluates in a manner consistent with logical +AND and will short-circuit when encountering a `false` value. + + .all(indexVar, valueVar, ) -> bool + .all(keyVar, valueVar, ) -> bool + +Examples: + + [1, 2, 3].all(i, j, i < j) // returns true + {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false + + // Combines two-variable comprehension with single variable + {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} + .all(k, vals, vals.all(v, v.startsWith(k))) // returns true + +### Exists + +Comprehension which tests whether any element in a list or map exists which +satisfies a given predicate. The `exists` macro evaluates in a manner consistent +with logical OR and will short-circuit when encountering a `true` value. + + .exists(indexVar, valueVar, ) -> bool + .exists(keyVar, valueVar, ) -> bool + +Examples: + + {'greeting': 'hello', 'farewell': 'goodbye'} + .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true + [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false + +### ExistsOne + +Comprehension which tests whether exactly one element in a list or map exists +which satisfies a given predicate expression. This comprehension does not +short-circuit in keeping with the one-variable exists one macro semantics. + + .existsOne(indexVar, valueVar, ) + .existsOne(keyVar, valueVar, ) + +This macro may also be used with the `exists_one` function name, for +compatibility with the one-variable macro of the same name. + +Examples: + + [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false + [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true + {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true + +### TransformList + +Comprehension which converts a map or a list into a list value. The output +expression of the comprehension determines the contents of the output list. +Elements in the list may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformList(indexVar, valueVar, ) + .transformList(indexVar, valueVar, , ) + .transformList(keyVar, valueVar, ) + .transformList(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformList(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns [1, 4, 9] + [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns [1, 9] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(k, _, k) // returns ['greeting', 'farewell'] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(_, v, v) // returns ['hello', 'goodbye'] + +### TransformMap + +Comprehension which converts a map or a list into a map value. The output +expression of the comprehension determines the value of the output map entry; +however, the key remains fixed. Elements in the map may optionally be filtered +according to a predicate expression, where elements that satisfy the predicate +are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformMap(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} + [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} + {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} + +### TransformMapEntry + +Comprehension which converts a map or a list into a map value; however, this +transform expects the entry expression be a map literal. If the transform +produces an entry which duplicates a key in the target map, the comprehension +will error. Note, that key equality is determined using CEL equality which +asserts that numeric values which are equal, even if they don't have the same +type will cause a key collision. + +Elements in the map may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + // returns {'hello': 'greeting'} + {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) + // reverse lookup, require all values in list be unique + [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) + + {'greeting': 'aloha', 'farewell': 'aloha'} + .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key diff --git a/constraint/vendor/github.com/google/cel-go/ext/bindings.go b/constraint/vendor/github.com/google/cel-go/ext/bindings.go index 4ac9a7f07..50cf4fb3d 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/bindings.go +++ b/constraint/vendor/github.com/google/cel-go/ext/bindings.go @@ -15,9 +15,19 @@ package ext import ( - "github.com/google/cel-go/cel" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" ) // Bindings returns a cel.EnvOption to configure support for local variable @@ -41,56 +51,286 @@ import ( // [d, e, f].exists(elem, elem in valid_values)) // // Local bindings are not guaranteed to be evaluated before use. -func Bindings() cel.EnvOption { - return cel.Lib(celBindings{}) +func Bindings(options ...BindingsOption) cel.EnvOption { + b := &celBindings{version: math.MaxUint32} + for _, o := range options { + b = o(b) + } + return cel.Lib(b) } const ( celNamespace = "cel" bindMacro = "bind" + blockFunc = "@block" unusedIterVar = "#unused" ) -type celBindings struct{} +// BindingsOption declares a functional operator for configuring the Bindings library behavior. +type BindingsOption func(*celBindings) *celBindings + +// BindingsVersion sets the version of the bindings library to an explicit version. +func BindingsVersion(version uint32) BindingsOption { + return func(lib *celBindings) *celBindings { + lib.version = version + return lib + } +} + +type celBindings struct { + version uint32 +} -func (celBindings) LibraryName() string { +func (*celBindings) LibraryName() string { return "cel.lib.ext.cel.bindings" } -func (celBindings) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ +func (lib *celBindings) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ cel.Macros( // cel.bind(var, , ) - cel.NewReceiverMacro(bindMacro, 3, celBind), + cel.ReceiverMacro(bindMacro, 3, celBind), ), } + if lib.version >= 1 { + // The cel.@block signature takes a list of subexpressions and a typed expression which is + // used as the output type. + paramType := cel.TypeParamType("T") + opts = append(opts, + cel.Function("cel.@block", + cel.Overload("cel_block_list", + []*cel.Type{cel.ListType(cel.DynType), paramType}, paramType)), + ) + opts = append(opts, cel.ASTValidators(blockValidationExemption{})) + } + return opts } -func (celBindings) ProgramOptions() []cel.ProgramOption { +func (lib *celBindings) ProgramOptions() []cel.ProgramOption { + if lib.version >= 1 { + celBlockPlan := func(i interpreter.Interpretable) (interpreter.Interpretable, error) { + call, ok := i.(interpreter.InterpretableCall) + if !ok { + return i, nil + } + switch call.Function() { + case "cel.@block": + args := call.Args() + if len(args) != 2 { + return nil, fmt.Errorf("cel.@block expects two arguments, but got %d", len(args)) + } + expr := args[1] + // Non-empty block + if block, ok := args[0].(interpreter.InterpretableConstructor); ok { + slotExprs := block.InitVals() + return newDynamicBlock(slotExprs, expr), nil + } + // Constant valued block which can happen during runtime optimization. + if cons, ok := args[0].(interpreter.InterpretableConst); ok { + if cons.Value().Type() == types.ListType { + l := cons.Value().(traits.Lister) + if l.Size().Equal(types.IntZero) == types.True { + return args[1], nil + } + return newConstantBlock(l, expr), nil + } + } + return nil, errors.New("cel.@block expects a list constructor as the first argument") + default: + return i, nil + } + } + return []cel.ProgramOption{cel.CustomDecorator(celBlockPlan)} + } return []cel.ProgramOption{} } -func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +type blockValidationExemption struct{} + +// Name returns the name of the validator. +func (blockValidationExemption) Name() string { + return "cel.lib.ext.validate.functions.cel.block" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (blockValidationExemption) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "cel.@block") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate is a no-op as the intent is to simply disable strong type-checks for list literals during +// when they occur within cel.@block calls as the arg types have already been validated. +func (blockValidationExemption) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { +} + +func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(celNamespace, target) { return nil, nil } varIdent := args[0] varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() default: - return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers") + return nil, mef.NewError(varIdent.ID(), "cel.bind() variable names must be simple identifiers") } varInit := args[1] resultExpr := args[2] - return meh.Fold( + return mef.NewComprehension( + mef.NewList(), unusedIterVar, - meh.NewList(), varName, varInit, - meh.LiteralBool(false), - meh.Ident(varName), + mef.NewLiteral(types.False), + mef.NewIdent(varName), resultExpr, ), nil } + +func newDynamicBlock(slotExprs []interpreter.Interpretable, expr interpreter.Interpretable) interpreter.Interpretable { + bs := &dynamicBlock{ + slotExprs: slotExprs, + expr: expr, + } + bs.slotActivationPool = &sync.Pool{ + New: func() any { + slotCount := len(slotExprs) + sa := &dynamicSlotActivation{ + slotExprs: slotExprs, + slotCount: slotCount, + slotVals: make([]*slotVal, slotCount), + } + for i := 0; i < slotCount; i++ { + sa.slotVals[i] = &slotVal{} + } + return sa + }, + } + return bs +} + +type dynamicBlock struct { + slotExprs []interpreter.Interpretable + expr interpreter.Interpretable + slotActivationPool *sync.Pool +} + +// ID implements the Interpretable interface method. +func (b *dynamicBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the Interpretable interface method. +func (b *dynamicBlock) Eval(activation interpreter.Activation) ref.Val { + sa := b.slotActivationPool.Get().(*dynamicSlotActivation) + sa.Activation = activation + defer b.clearSlots(sa) + return b.expr.Eval(sa) +} + +func (b *dynamicBlock) clearSlots(sa *dynamicSlotActivation) { + sa.reset() + b.slotActivationPool.Put(sa) +} + +type slotVal struct { + value *ref.Val + visited bool +} + +type dynamicSlotActivation struct { + interpreter.Activation + slotExprs []interpreter.Interpretable + slotCount int + slotVals []*slotVal +} + +// ResolveName implements the Activation interface method but handles variables prefixed with `@index` +// as special variables which exist within the slot-based memory of the cel.@block() where each slot +// refers to an expression which must be computed only once. +func (sa *dynamicSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + v := sa.slotVals[idx] + if v.visited { + // Return not found if the index expression refers to itself + if v.value == nil { + return nil, false + } + return *v.value, true + } + v.visited = true + val := sa.slotExprs[idx].Eval(sa) + v.value = &val + return val, true + } + return sa.Activation.ResolveName(name) +} + +func (sa *dynamicSlotActivation) reset() { + sa.Activation = nil + for _, sv := range sa.slotVals { + sv.visited = false + sv.value = nil + } +} + +func newConstantBlock(slots traits.Lister, expr interpreter.Interpretable) interpreter.Interpretable { + count := slots.Size().(types.Int) + return &constantBlock{slots: slots, slotCount: int(count), expr: expr} +} + +type constantBlock struct { + slots traits.Lister + slotCount int + expr interpreter.Interpretable +} + +// ID implements the interpreter.Interpretable interface method. +func (b *constantBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the interpreter.Interpretable interface method, and will proxy @index prefixed variable +// lookups into a set of constant slots determined from the plan step. +func (b *constantBlock) Eval(activation interpreter.Activation) ref.Val { + vars := constantSlotActivation{Activation: activation, slots: b.slots, slotCount: b.slotCount} + return b.expr.Eval(vars) +} + +type constantSlotActivation struct { + interpreter.Activation + slots traits.Lister + slotCount int +} + +// ResolveName implements Activation interface method and proxies @index prefixed lookups into the slot +// activation associated with the block scope. +func (sa constantSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + return sa.slots.Get(types.Int(idx)), true + } + return sa.Activation.ResolveName(name) +} + +func matchSlot(name string, slotCount int) (int, bool) { + if idx, found := strings.CutPrefix(name, indexPrefix); found { + idx, err := strconv.Atoi(idx) + // Return not found if the index is not numeric + if err != nil { + return -1, false + } + // Return not found if the index is not a valid slot + if idx < 0 || idx >= slotCount { + return -1, false + } + return idx, true + } + return -1, false +} + +var ( + indexPrefix = "@index" +) diff --git a/constraint/vendor/github.com/google/cel-go/ext/comprehensions.go b/constraint/vendor/github.com/google/cel-go/ext/comprehensions.go new file mode 100644 index 000000000..1428558d8 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/ext/comprehensions.go @@ -0,0 +1,410 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" +) + +const ( + mapInsert = "cel.@mapInsert" + mapInsertOverloadMap = "@mapInsert_map_map" + mapInsertOverloadKeyValue = "@mapInsert_map_key_value" +) + +// TwoVarComprehensions introduces support for two-variable comprehensions. +// +// The two-variable form of comprehensions looks similar to the one-variable counterparts. +// Where possible, the same macro names were used and additional macro signatures added. +// The notable distinction for two-variable comprehensions is the introduction of +// `transformList`, `transformMap`, and `transformMapEntry` support for list and map types +// rather than the more traditional `map` and `filter` macros. +// +// # All +// +// Comprehension which tests whether all elements in the list or map satisfy a given +// predicate. The `all` macro evaluates in a manner consistent with logical AND and will +// short-circuit when encountering a `false` value. +// +// .all(indexVar, valueVar, ) -> bool +// .all(keyVar, valueVar, ) -> bool +// +// Examples: +// +// [1, 2, 3].all(i, j, i < j) // returns true +// {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false +// +// // Combines two-variable comprehension with single variable +// {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} +// .all(k, vals, vals.all(v, v.startsWith(k))) // returns true +// +// # Exists +// +// Comprehension which tests whether any element in a list or map exists which satisfies +// a given predicate. The `exists` macro evaluates in a manner consistent with logical OR +// and will short-circuit when encountering a `true` value. +// +// .exists(indexVar, valueVar, ) -> bool +// .exists(keyVar, valueVar, ) -> bool +// +// Examples: +// +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true +// [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false +// +// # ExistsOne +// +// Comprehension which tests whether exactly one element in a list or map exists which +// satisfies a given predicate expression. This comprehension does not short-circuit in +// keeping with the one-variable exists one macro semantics. +// +// .existsOne(indexVar, valueVar, ) +// .existsOne(keyVar, valueVar, ) +// +// This macro may also be used with the `exists_one` function name, for compatibility +// with the one-variable macro of the same name. +// +// Examples: +// +// [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false +// [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true +// {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true +// +// # TransformList +// +// Comprehension which converts a map or a list into a list value. The output expression +// of the comprehension determines the contents of the output list. Elements in the list +// may optionally be filtered according to a predicate expression, where elements that +// satisfy the predicate are transformed. +// +// .transformList(indexVar, valueVar, ) +// .transformList(indexVar, valueVar, , ) +// .transformList(keyVar, valueVar, ) +// .transformList(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformList(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns [1, 4, 9] +// [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns [1, 9] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(k, _, k) // returns ['greeting', 'farewell'] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(_, v, v) // returns ['hello', 'goodbye'] +// +// # TransformMap +// +// Comprehension which converts a map or a list into a map value. The output expression +// of the comprehension determines the value of the output map entry; however, the key +// remains fixed. Elements in the map may optionally be filtered according to a predicate +// expression, where elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformMap(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} +// [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} +// {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} +// +// # TransformMapEntry +// +// Comprehension which converts a map or a list into a map value; however, this transform +// expects the entry expression be a map literal. If the tranform produces an entry which +// duplicates a key in the target map, the comprehension will error. Note, that key +// equality is determined using CEL equality which asserts that numeric values which are +// equal, even if they don't have the same type will cause a key collision. +// +// Elements in the map may optionally be filtered according to a predicate expression, where +// elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// // returns {'hello': 'greeting'} +// {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) +// // reverse lookup, require all values in list be unique +// [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) +// +// {'greeting': 'aloha', 'farewell': 'aloha'} +// .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key +func TwoVarComprehensions() cel.EnvOption { + return cel.Lib(compreV2Lib{}) +} + +type compreV2Lib struct{} + +// LibraryName implements that SingletonLibrary interface method. +func (compreV2Lib) LibraryName() string { + return "cel.lib.ext.comprev2" +} + +// CompileOptions implements the cel.Library interface method. +func (compreV2Lib) CompileOptions() []cel.EnvOption { + kType := cel.TypeParamType("K") + vType := cel.TypeParamType("V") + mapKVType := cel.MapType(kType, vType) + opts := []cel.EnvOption{ + cel.Macros( + cel.ReceiverMacro("all", 3, quantifierAll), + cel.ReceiverMacro("exists", 3, quantifierExists), + cel.ReceiverMacro("existsOne", 3, quantifierExistsOne), + cel.ReceiverMacro("exists_one", 3, quantifierExistsOne), + cel.ReceiverMacro("transformList", 3, transformList), + cel.ReceiverMacro("transformList", 4, transformList), + cel.ReceiverMacro("transformMap", 3, transformMap), + cel.ReceiverMacro("transformMap", 4, transformMap), + cel.ReceiverMacro("transformMapEntry", 3, transformMapEntry), + cel.ReceiverMacro("transformMapEntry", 4, transformMapEntry), + ), + cel.Function(mapInsert, + cel.Overload(mapInsertOverloadKeyValue, []*cel.Type{mapKVType, kType, vType}, mapKVType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + m := args[0].(traits.Mapper) + k := args[1] + v := args[2] + return types.InsertMapKeyValue(m, k, v) + })), + cel.Overload(mapInsertOverloadMap, []*cel.Type{mapKVType, mapKVType}, mapKVType, + cel.BinaryBinding(func(targetMap, updateMap ref.Val) ref.Val { + tm := targetMap.(traits.Mapper) + um := updateMap.(traits.Mapper) + umIt := um.Iterator() + for umIt.HasNext() == types.True { + k := umIt.Next() + updateOrErr := types.InsertMapKeyValue(tm, k, um.Get(k)) + if types.IsError(updateOrErr) { + return updateOrErr + } + tm = updateOrErr.(traits.Mapper) + } + return tm + })), + ), + } + return opts +} + +// ProgramOptions implements the cel.Library interface method +func (compreV2Lib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func quantifierAll(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.True), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewAccuIdent()), + /*step=*/ mef.NewCall(operators.LogicalAnd, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExists(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.False), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewCall(operators.LogicalNot, mef.NewAccuIdent())), + /*step=*/ mef.NewCall(operators.LogicalOr, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExistsOne(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.Int(0)), + /*condition=*/ mef.NewLiteral(types.True), + /*step=*/ mef.NewCall(operators.Conditional, args[2], + mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + mef.NewAccuIdent()), + /*result=*/ mef.NewCall(operators.Equals, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + ), nil +} + +func transformList(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = __result__ + [transform] + step := mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewList(transform)) + if filter != nil { + // __result__ = (filter) ? __result__ + [transform] : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewList(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMap(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, iterVar1, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), mef.NewIdent(iterVar1), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, iterVar1, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMapEntry(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func extractIterVars(mef cel.MacroExprFactory, arg0, arg1 ast.Expr) (string, string, *cel.Error) { + iterVar1, err := extractIterVar(mef, arg0) + if err != nil { + return "", "", err + } + iterVar2, err := extractIterVar(mef, arg1) + if err != nil { + return "", "", err + } + if iterVar1 == iterVar2 { + return "", "", mef.NewError(arg1.ID(), fmt.Sprintf("duplicate variable name: %s", iterVar1)) + } + if iterVar1 == parser.AccumulatorName { + return "", "", mef.NewError(arg0.ID(), "iteration variable overwrites accumulator variable") + } + if iterVar2 == parser.AccumulatorName { + return "", "", mef.NewError(arg1.ID(), "iteration variable overwrites accumulator variable") + } + return iterVar1, iterVar2, nil +} + +func extractIterVar(mef cel.MacroExprFactory, target ast.Expr) (string, *cel.Error) { + iterVar, found := extractIdent(target) + if !found { + return "", mef.NewError(target.ID(), "argument must be a simple name") + } + return iterVar, nil +} diff --git a/constraint/vendor/github.com/google/cel-go/ext/encoders.go b/constraint/vendor/github.com/google/cel-go/ext/encoders.go index 61ac0b777..ac04b1a7b 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/encoders.go +++ b/constraint/vendor/github.com/google/cel-go/ext/encoders.go @@ -36,7 +36,7 @@ import ( // Examples: // // base64.decode('aGVsbG8=') // return b'hello' -// base64.decode('aGVsbG8') // error +// base64.decode('aGVsbG8') // return b'hello' // // # Base64.Encode // @@ -79,7 +79,14 @@ func (encoderLib) ProgramOptions() []cel.ProgramOption { } func base64DecodeString(str string) ([]byte, error) { - return base64.StdEncoding.DecodeString(str) + b, err := base64.StdEncoding.DecodeString(str) + if err == nil { + return b, nil + } + if _, tryAltEncoding := err.(base64.CorruptInputError); tryAltEncoding { + return base64.RawStdEncoding.DecodeString(str) + } + return nil, err } func base64EncodeBytes(bytes []byte) (string, error) { diff --git a/constraint/vendor/github.com/google/cel-go/ext/formatting.go b/constraint/vendor/github.com/google/cel-go/ext/formatting.go new file mode 100644 index 000000000..dbff613b2 --- /dev/null +++ b/constraint/vendor/github.com/google/cel-go/ext/formatting.go @@ -0,0 +1,904 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/language" + "golang.org/x/text/message" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type clauseImpl func(ref.Val, string) (string, error) + +func clauseForType(argType ref.Type) (clauseImpl, error) { + switch argType { + case types.IntType, types.UintType: + return formatDecimal, nil + case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: + return FormatString, nil + case types.TimestampType, types.DurationType: + // special case to ensure timestamps/durations get printed as CEL literals + return func(arg ref.Val, locale string) (string, error) { + argStrVal := arg.ConvertToType(types.StringType) + argStr := argStrVal.Value().(string) + if arg.Type() == types.TimestampType { + return fmt.Sprintf("timestamp(%q)", argStr), nil + } + if arg.Type() == types.DurationType { + return fmt.Sprintf("duration(%q)", argStr), nil + } + return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) + }, nil + case types.ListType: + return formatList, nil + case types.MapType: + return formatMap, nil + case types.DoubleType: + // avoid formatFixed so we can output a period as the decimal separator in order + // to always be a valid CEL literal + return func(arg ref.Val, locale string) (string, error) { + argDouble, ok := arg.Value().(float64) + if !ok { + return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) + } + fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) + return fmt.Sprintf(fmtStr, argDouble), nil + }, nil + case types.TypeType: + return func(arg ref.Val, locale string) (string, error) { + return fmt.Sprintf("type(%s)", arg.Value().(string)), nil + }, nil + default: + return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) + } +} + +func formatList(arg ref.Val, locale string) (string, error) { + argList := arg.(traits.Lister) + argIterator := argList.Iterator() + var listStrBuilder strings.Builder + _, err := listStrBuilder.WriteRune('[') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + for argIterator.HasNext() == types.True { + member := argIterator.Next() + memberFormat, err := clauseForType(member.Type()) + if err != nil { + return "", err + } + unquotedStr, err := memberFormat(member, locale) + if err != nil { + return "", err + } + str := quoteForCEL(member, unquotedStr) + _, err = listStrBuilder.WriteString(str) + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + if argIterator.HasNext() == types.True { + _, err = listStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + } + } + _, err = listStrBuilder.WriteRune(']') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + return listStrBuilder.String(), nil +} + +func formatMap(arg ref.Val, locale string) (string, error) { + argMap := arg.(traits.Mapper) + argIterator := argMap.Iterator() + type mapPair struct { + key string + value string + } + argPairs := make([]mapPair, argMap.Size().Value().(int64)) + i := 0 + for argIterator.HasNext() == types.True { + key := argIterator.Next() + var keyFormat clauseImpl + switch key.Type() { + case types.StringType, types.BoolType: + keyFormat = FormatString + case types.IntType, types.UintType: + keyFormat = formatDecimal + default: + return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) + } + unquotedKeyStr, err := keyFormat(key, locale) + if err != nil { + return "", err + } + keyStr := quoteForCEL(key, unquotedKeyStr) + value, found := argMap.Find(key) + if !found { + return "", fmt.Errorf("could not find key: %q", key) + } + valueFormat, err := clauseForType(value.Type()) + if err != nil { + return "", err + } + unquotedValueStr, err := valueFormat(value, locale) + if err != nil { + return "", err + } + valueStr := quoteForCEL(value, unquotedValueStr) + argPairs[i] = mapPair{keyStr, valueStr} + i++ + } + sort.SliceStable(argPairs, func(x, y int) bool { + return argPairs[x].key < argPairs[y].key + }) + var mapStrBuilder strings.Builder + _, err := mapStrBuilder.WriteRune('{') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + for i, entry := range argPairs { + _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + if i < len(argPairs)-1 { + _, err = mapStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + } + } + _, err = mapStrBuilder.WriteRune('}') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + return mapStrBuilder.String(), nil +} + +// quoteForCEL takes a formatted, unquoted value and quotes it in a manner suitable +// for embedding directly in CEL. +func quoteForCEL(refVal ref.Val, unquotedValue string) string { + switch refVal.Type() { + case types.StringType: + return fmt.Sprintf("%q", unquotedValue) + case types.BytesType: + return fmt.Sprintf("b%q", unquotedValue) + case types.DoubleType: + // special case to handle infinity/NaN + num := refVal.Value().(float64) + if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { + return fmt.Sprintf("%q", unquotedValue) + } + return unquotedValue + default: + return unquotedValue + } +} + +// FormatString returns the string representation of a CEL value. +// +// It is used to implement the %s specifier in the (string).format() extension function. +func FormatString(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.ListType: + return formatList(arg, locale) + case types.MapType: + return formatMap(arg, locale) + case types.IntType, types.UintType, types.DoubleType, + types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: + argStrVal := arg.ConvertToType(types.StringType) + argStr, ok := argStrVal.Value().(string) + if !ok { + return "", fmt.Errorf("could not convert argument %q to string", argStrVal) + } + return argStr, nil + case types.NullType: + return "null", nil + default: + return "", stringFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func formatDecimal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + case types.UintType: + argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + default: + return "", decimalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func matchLanguage(locale string) (language.Tag, error) { + matcher, err := makeMatcher(locale) + if err != nil { + return language.Und, err + } + tag, _ := language.MatchStrings(matcher, locale) + return tag, nil +} + +func makeMatcher(locale string) (language.Matcher, error) { + tags := make([]language.Tag, 0) + tag, err := language.Parse(locale) + if err != nil { + return nil, err + } + tags = append(tags, tag) + return language.NewMatcher(tags), nil +} + +type stringFormatter struct{} + +func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { + return FormatString(arg, locale) +} + +func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { + return formatDecimal(arg, locale) +} + +func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", fixedPointFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) + } + fmtStr := fmt.Sprintf("%%.%df", *precision) + + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", scientificFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%v\" to float64", argFloatVal.Value()) + } + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + fmtStr := fmt.Sprintf("%%%de", *precision) + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + // locale is intentionally unused as integers formatted as binary + // strings are locale-independent + return fmt.Sprintf("%b", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%b", argInt), nil + case types.BoolType: + argBool := arg.Value().(bool) + if argBool { + return "1", nil + } + return "0", nil + default: + return "", binaryFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + fmtStr := "%x" + if useUpper { + fmtStr = "%X" + } + switch arg.Type() { + case types.StringType, types.BytesType: + if arg.Type() == types.BytesType { + return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil + } + return fmt.Sprintf(fmtStr, arg.Value().(string)), nil + case types.IntType: + argInt, ok := arg.Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + case types.UintType: + argInt, ok := arg.Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + default: + return "", hexFormatError(runtimeID, arg.Type().TypeName()) + } + } +} + +func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + return fmt.Sprintf("%o", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%o", argInt), nil + default: + return "", octalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +// stringFormatValidator implements the cel.ASTValidator interface allowing for static validation +// of string.format calls. +type stringFormatValidator struct{} + +// Name returns the name of the validator. +func (stringFormatValidator) Name() string { + return "cel.lib.ext.validate.functions.string.format" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "format") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate parses all literal format strings and type checks the format clause against the argument +// at the corresponding ordinal within the list literal argument to the function, if one is specified. +func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { + root := ast.NavigateAST(a) + formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) + for _, e := range formatCallExprs { + call := e.AsCall() + formatStr := call.Target().AsLiteral().Value().(string) + args := call.Args()[0].AsList().Elements() + formatCheck := &stringFormatChecker{ + args: args, + ast: a, + } + // use a placeholder locale, since locale doesn't affect syntax + _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US") + if err != nil { + iss.ReportErrorAtID(getErrorExprID(e.ID(), err), err.Error()) + continue + } + seenArgs := formatCheck.argsRequested + if len(args) > seenArgs { + iss.ReportErrorAtID(e.ID(), + "too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(args)) + } + } +} + +// getErrorExprID determines which list literal argument triggered a type-disagreement for the +// purposes of more accurate error message reports. +func getErrorExprID(id int64, err error) int64 { + fmtErr, ok := err.(formatError) + if ok { + return fmtErr.id + } + wrapped := errors.Unwrap(err) + if wrapped != nil { + return getErrorExprID(id, wrapped) + } + return id +} + +// matchConstantFormatStringWithListLiteralArgs matches all valid expression nodes for string +// format checking. +func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if !call.IsMemberFunction() || call.FunctionName() != "format" { + return false + } + overloadIDs := a.GetOverloadIDs(e.ID()) + if len(overloadIDs) != 0 { + found := false + for _, overload := range overloadIDs { + if overload == overloads.ExtFormatString { + found = true + break + } + } + if !found { + return false + } + } + formatString := call.Target() + if formatString.Kind() != ast.LiteralKind || formatString.AsLiteral().Type() != cel.StringType { + return false + } + args := call.Args() + if len(args) != 1 { + return false + } + formatArgs := args[0] + return formatArgs.Kind() == ast.ListKind + } +} + +// stringFormatChecker implements the formatStringInterpolater interface +type stringFormatChecker struct { + args []ast.Expr + argsRequested int + currArgIndex int64 + ast *ast.AST +} + +func (c *stringFormatChecker) String(arg ref.Val, locale string) (string, error) { + formatArg := c.args[c.currArgIndex] + valid, badID := c.verifyString(formatArg) + if !valid { + return "", stringFormatError(badID, c.typeOf(badID).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Decimal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", decimalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Fixed(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", fixedPointFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Scientific(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", scientificFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Binary(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.BoolType) + if !valid { + return "", binaryFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.StringType, types.BytesType) + if !valid { + return "", hexFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Octal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", octalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Arg(index int64) (ref.Val, error) { + c.argsRequested++ + c.currArgIndex = index + // return a dummy value - this is immediately passed to back to us + // through one of the FormatCallback functions, so anything will do + return types.Int(0), nil +} + +func (c *stringFormatChecker) Size() int64 { + return int64(len(c.args)) +} + +func (c *stringFormatChecker) typeOf(id int64) *cel.Type { + return c.ast.GetType(id) +} + +func (c *stringFormatChecker) verifyTypeOneOf(id int64, validTypes ...*cel.Type) bool { + t := c.typeOf(id) + if t == cel.DynType { + return true + } + for _, vt := range validTypes { + // Only check runtime type compatibility without delving deeper into parameterized types + if t.Kind() == vt.Kind() { + return true + } + } + return false +} + +func (c *stringFormatChecker) verifyString(sub ast.Expr) (bool, int64) { + paramA := cel.TypeParamType("A") + paramB := cel.TypeParamType("B") + subVerified := c.verifyTypeOneOf(sub.ID(), + cel.ListType(paramA), cel.MapType(paramA, paramB), + cel.IntType, cel.UintType, cel.DoubleType, cel.BoolType, cel.StringType, + cel.TimestampType, cel.BytesType, cel.DurationType, cel.TypeType, cel.NullType) + if !subVerified { + return false, sub.ID() + } + switch sub.Kind() { + case ast.ListKind: + for _, e := range sub.AsList().Elements() { + // recursively verify if we're dealing with a list/map + verified, id := c.verifyString(e) + if !verified { + return false, id + } + } + return true, sub.ID() + case ast.MapKind: + for _, e := range sub.AsMap().Entries() { + // recursively verify if we're dealing with a list/map + entry := e.AsMapEntry() + verified, id := c.verifyString(entry.Key()) + if !verified { + return false, id + } + verified, id = c.verifyString(entry.Value()) + if !verified { + return false, id + } + } + return true, sub.ID() + default: + return true, sub.ID() + } +} + +// helper routines for reporting common errors during string formatting static validation and +// runtime execution. + +func binaryFormatError(id int64, badType string) error { + return newFormatError(id, "only integers and bools can be formatted as binary, was given %s", badType) +} + +func decimalFormatError(id int64, badType string) error { + return newFormatError(id, "decimal clause can only be used on integers, was given %s", badType) +} + +func fixedPointFormatError(id int64, badType string) error { + return newFormatError(id, "fixed-point clause can only be used on doubles, was given %s", badType) +} + +func hexFormatError(id int64, badType string) error { + return newFormatError(id, "only integers, byte buffers, and strings can be formatted as hex, was given %s", badType) +} + +func octalFormatError(id int64, badType string) error { + return newFormatError(id, "octal clause can only be used on integers, was given %s", badType) +} + +func scientificFormatError(id int64, badType string) error { + return newFormatError(id, "scientific clause can only be used on doubles, was given %s", badType) +} + +func stringFormatError(id int64, badType string) error { + return newFormatError(id, "string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", badType) +} + +type formatError struct { + id int64 + msg string +} + +func newFormatError(id int64, msg string, args ...any) error { + return formatError{ + id: id, + msg: fmt.Sprintf(msg, args...), + } +} + +func (e formatError) Error() string { + return e.msg +} + +func (e formatError) Is(target error) bool { + return e.msg == target.Error() +} + +// stringArgList implements the formatListArgs interface. +type stringArgList struct { + args traits.Lister +} + +func (c *stringArgList) Arg(index int64) (ref.Val, error) { + if index >= c.args.Size().Value().(int64) { + return nil, fmt.Errorf("index %d out of range", index) + } + return c.args.Get(types.Int(index)), nil +} + +func (c *stringArgList) Size() int64 { + return c.args.Size().Value().(int64) +} + +// formatStringInterpolator is an interface that allows user-defined behavior +// for formatting clause implementations, as well as argument retrieval. +// Each function is expected to support the appropriate types as laid out in +// the string.format documentation, and to return an error if given an inappropriate type. +type formatStringInterpolator interface { + // String takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a string, or an error if one occurred. + String(ref.Val, string) (string, error) + + // Decimal takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a decimal integer, or an error if one occurred. + Decimal(ref.Val, string) (string, error) + + // Fixed takes an int pointer representing precision (or nil if none was given) and + // returns a function operating in a similar manner to String and Decimal, taking a + // ref.Val and locale and returning the appropriate string. A closure is returned + // so precision can be set without needing an additional function call/configuration. + Fixed(*int) func(ref.Val, string) (string, error) + + // Scientific functions identically to Fixed, except the string returned from the closure + // is expected to be in scientific notation. + Scientific(*int) func(ref.Val, string) (string, error) + + // Binary takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a binary integer, or an error if one occurred. + Binary(ref.Val, string) (string, error) + + // Hex takes a boolean that, if true, indicates the hex string output by the returned + // closure should use uppercase letters for A-F. + Hex(bool) func(ref.Val, string) (string, error) + + // Octal takes a ref.Val and a string representing the current locale identifier and + // returns the Val formatted in octal, or an error if one occurred. + Octal(ref.Val, string) (string, error) +} + +// formatListArgs is an interface that allows user-defined list-like datatypes to be used +// for formatting clause implementations. +type formatListArgs interface { + // Arg returns the ref.Val at the given index, or an error if one occurred. + Arg(int64) (ref.Val, error) + + // Size returns the length of the argument list. + Size() int64 +} + +// parseFormatString formats a string according to the string.format syntax, taking the clause implementations +// from the provided FormatCallback and the args from the given FormatList. +func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) { + i := 0 + argIndex := 0 + var builtStr strings.Builder + for i < len(formatStr) { + if formatStr[i] == '%' { + if i+1 < len(formatStr) && formatStr[i+1] == '%' { + err := builtStr.WriteByte('%') + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += 2 + continue + } else { + argAny, err := list.Arg(int64(argIndex)) + if err != nil { + return "", err + } + if i+1 >= len(formatStr) { + return "", errors.New("unexpected end of string") + } + if int64(argIndex) >= list.Size() { + return "", fmt.Errorf("index %d out of range", argIndex) + } + numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) + if refErr != nil { + return "", refErr + } + _, err = builtStr.WriteString(val) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += numRead + argIndex++ + } + } else { + err := builtStr.WriteByte(formatStr[i]) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i++ + } + } + return builtStr.String(), nil +} + +// parseAndFormatClause parses the format clause at the start of the given string with val, and returns +// how many characters were consumed and the substituted string form of val, or an error if one occurred. +func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) { + i := 1 + read, formatter, err := parseFormattingClause(formatStr[i:], callback) + i += read + if err != nil { + return -1, "", newParseFormatError("could not parse formatting clause", err) + } + + valStr, err := formatter(val, locale) + if err != nil { + return -1, "", newParseFormatError("error during formatting", err) + } + return i, valStr, nil +} + +func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) { + i := 0 + read, precision, err := parsePrecision(formatStr[i:]) + i += read + if err != nil { + return -1, nil, fmt.Errorf("error while parsing precision: %w", err) + } + r := rune(formatStr[i]) + i++ + switch r { + case 's': + return i, callback.String, nil + case 'd': + return i, callback.Decimal, nil + case 'f': + return i, callback.Fixed(precision), nil + case 'e': + return i, callback.Scientific(precision), nil + case 'b': + return i, callback.Binary, nil + case 'x', 'X': + return i, callback.Hex(unicode.IsUpper(r)), nil + case 'o': + return i, callback.Octal, nil + default: + return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) + } +} + +func parsePrecision(formatStr string) (int, *int, error) { + i := 0 + if formatStr[i] != '.' { + return i, nil, nil + } + i++ + var buffer strings.Builder + for { + if i >= len(formatStr) { + return -1, nil, errors.New("could not find end of precision specifier") + } + if !isASCIIDigit(rune(formatStr[i])) { + break + } + buffer.WriteByte(formatStr[i]) + i++ + } + precision, err := strconv.Atoi(buffer.String()) + if err != nil { + return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) + } + return i, &precision, nil +} + +func isASCIIDigit(r rune) bool { + return r <= unicode.MaxASCII && unicode.IsDigit(r) +} + +type parseFormatError struct { + msg string + wrapped error +} + +func newParseFormatError(msg string, wrapped error) error { + return parseFormatError{msg: msg, wrapped: wrapped} +} + +func (e parseFormatError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.wrapped.Error()) +} + +func (e parseFormatError) Is(target error) bool { + return e.Error() == target.Error() +} + +func (e parseFormatError) Unwrap() error { + return e.wrapped +} + +const ( + runtimeID = int64(-1) +) diff --git a/constraint/vendor/github.com/google/cel-go/ext/guards.go b/constraint/vendor/github.com/google/cel-go/ext/guards.go index 785c8675b..ccede289f 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/guards.go +++ b/constraint/vendor/github.com/google/cel-go/ext/guards.go @@ -15,10 +15,9 @@ package ext import ( + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // function invocation guards for common call signatures within extension functions. @@ -51,14 +50,18 @@ func listStringOrError(strs []string, err error) ref.Val { return types.DefaultTypeAdapter.NativeToValue(strs) } -func macroTargetMatchesNamespace(ns string, target *exprpb.Expr) bool { - switch target.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - if target.GetIdentExpr().GetName() != ns { - return false - } - return true +func extractIdent(target ast.Expr) (string, bool) { + switch target.Kind() { + case ast.IdentKind: + return target.AsIdent(), true default: - return false + return "", false + } +} + +func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { + if id, found := extractIdent(target); found { + return id == ns } + return false } diff --git a/constraint/vendor/github.com/google/cel-go/ext/lists.go b/constraint/vendor/github.com/google/cel-go/ext/lists.go index 08751d08a..d0b90ea92 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/lists.go +++ b/constraint/vendor/github.com/google/cel-go/ext/lists.go @@ -16,15 +16,70 @@ package ext import ( "fmt" + "math" + "sort" "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" ) +var comparableTypes = []*cel.Type{ + cel.IntType, + cel.UintType, + cel.DoubleType, + cel.BoolType, + cel.DurationType, + cel.TimestampType, + cel.StringType, + cel.BytesType, +} + // Lists returns a cel.EnvOption to configure extended functions for list manipulation. // As a general note, all indices are zero-based. +// +// # Distinct +// +// Introduced in version: 2 +// +// Returns the distinct elements of a list. +// +// .distinct() -> +// +// Examples: +// +// [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] +// ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] +// [1, "b", 2, "b"].distinct() // return [1, "b", 2] +// +// # Range +// +// Introduced in version: 2 +// +// Returns a list of integers from 0 to n-1. +// +// lists.range() -> +// +// Examples: +// +// lists.range(5) -> [0, 1, 2, 3, 4] +// +// # Reverse +// +// Introduced in version: 2 +// +// Returns the elements of a list in reverse order. +// +// .reverse() -> +// +// Examples: +// +// [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] +// // # Slice // // Returns a new sub-list using the indexes provided. @@ -35,21 +90,105 @@ import ( // // [1,2,3,4].slice(1, 3) // return [2, 3] // [1,2,3,4].slice(2, 4) // return [3 ,4] -func Lists() cel.EnvOption { - return cel.Lib(listsLib{}) +// +// # Flatten +// +// Flattens a list recursively. +// If an optional depth is provided, the list is flattened to a the specificied level. +// A negative depth value will result in an error. +// +// .flatten() -> +// .flatten(, ) -> +// +// Examples: +// +// [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] +// [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] +// [1,[2,[3,[4]]]].flatten(-1) // error +// +// # Sort +// +// Introduced in version: 2 +// +// Sorts a list with comparable elements. If the element type is not comparable +// or the element types are not the same, the function will produce an error. +// +// .sort() -> +// T in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Examples: +// +// [3, 2, 1].sort() // return [1, 2, 3] +// ["b", "c", "a"].sort() // return ["a", "b", "c"] +// [1, "b"].sort() // error +// [[1, 2, 3]].sort() // error +// +// # SortBy +// +// Sorts a list by a key value, i.e., the order is determined by the result of +// an expression applied to each element of the list. +// The output of the key expression must be a comparable type, otherwise the +// function will return an error. +// +// .sortBy(, ) -> +// keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +// Examples: +// +// [ +// Player { name: "foo", score: 0 }, +// Player { name: "bar", score: -10 }, +// Player { name: "baz", score: 1000 }, +// ].sortBy(e, e.score).map(e, e.name) +// == ["bar", "foo", "baz"] + +func Lists(options ...ListsOption) cel.EnvOption { + l := &listsLib{ + version: math.MaxUint32, + } + for _, o := range options { + l = o(l) + } + + return cel.Lib(l) } -type listsLib struct{} +type listsLib struct { + version uint32 +} // LibraryName implements the SingletonLibrary interface method. func (listsLib) LibraryName() string { return "cel.lib.ext.lists" } +// ListsOption is a functional interface for configuring the strings library. +type ListsOption func(*listsLib) *listsLib + +// ListsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func ListsVersion(version uint32) ListsOption { + return func(lib *listsLib) *listsLib { + lib.version = version + return lib + } +} + // CompileOptions implements the Library interface method. -func (listsLib) CompileOptions() []cel.EnvOption { +func (lib listsLib) CompileOptions() []cel.EnvOption { listType := cel.ListType(cel.TypeParamType("T")) - return []cel.EnvOption{ + listListType := cel.ListType(listType) + listDyn := cel.ListType(cel.DynType) + opts := []cel.EnvOption{ cel.Function("slice", cel.MemberOverload("list_slice", []*cel.Type{listType, cel.IntType, cel.IntType}, listType, @@ -66,6 +205,151 @@ func (listsLib) CompileOptions() []cel.EnvOption { ), ), } + if lib.version >= 1 { + opts = append(opts, + cel.Function("flatten", + cel.MemberOverload("list_flatten", + []*cel.Type{listListType}, listType, + cel.UnaryBinding(func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + flatList, err := flatten(list, 1) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + cel.MemberOverload("list_flatten_int", + []*cel.Type{listDyn, types.IntType}, listDyn, + cel.BinaryBinding(func(arg1, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + depth, ok := arg2.(types.Int) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + flatList, err := flatten(list, int64(depth)) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + // To handle the case where a variable of just `list(T)` is provided at runtime + // with a graceful failure more, disable the type guards since the implementation + // can handle lists which are already flat. + decls.DisableTypeGuards(true), + ), + ) + } + if lib.version >= 2 { + sortDecl := cel.Function("sort", + append( + templatedOverloads(comparableTypes, func(t *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sort", t.TypeName()), + []*cel.Type{cel.ListType(t)}, cel.ListType(t), + ) + }), + cel.SingletonUnaryBinding( + func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + sorted, err := sortList(list) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + ) + opts = append(opts, sortDecl) + opts = append(opts, cel.Macros(cel.ReceiverMacro("sortBy", 2, sortByMacro))) + opts = append(opts, cel.Function("@sortByAssociatedKeys", + append( + templatedOverloads(comparableTypes, func(u *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sortByAssociatedKeys", u.TypeName()), + []*cel.Type{listType, cel.ListType(u)}, listType, + ) + }), + cel.SingletonBinaryBinding( + func(arg1 ref.Val, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + keys, ok := arg2.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + sorted, err := sortListByAssociatedKeys(list, keys) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + )) + + opts = append(opts, cel.Function("lists.range", + cel.Overload("lists_range", + []*cel.Type{cel.IntType}, cel.ListType(cel.IntType), + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + n := args[0].(types.Int) + result, err := genRange(n) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("reverse", + cel.MemberOverload("list_reverse", + []*cel.Type{listType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + result, err := reverseList(list) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("distinct", + cel.MemberOverload("list_distinct", + []*cel.Type{listType}, listType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + result, err := distinctList(list.(traits.Lister)) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + } + + return opts } // ProgramOptions implements the Library interface method. @@ -73,6 +357,24 @@ func (listsLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } +func genRange(n types.Int) (ref.Val, error) { + var newList []ref.Val + for i := types.Int(0); i < n; i++ { + newList = append(newList, i) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + +func reverseList(list traits.Lister) (ref.Val, error) { + var newList []ref.Val + listLength := list.Size().(types.Int) + for i := types.Int(0); i < listLength; i++ { + val := list.Get(listLength - i - 1) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { listLength := list.Size().(types.Int) if start < 0 || end < 0 { @@ -92,3 +394,167 @@ func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { } return types.DefaultTypeAdapter.NativeToValue(newList), nil } + +func flatten(list traits.Lister, depth int64) ([]ref.Val, error) { + if depth < 0 { + return nil, fmt.Errorf("level must be non-negative") + } + + var newList []ref.Val + iter := list.Iterator() + + for iter.HasNext() == types.True { + val := iter.Next() + nestedList, isList := val.(traits.Lister) + + if !isList || depth == 0 { + newList = append(newList, val) + continue + } else { + flattenedList, err := flatten(nestedList, depth-1) + if err != nil { + return nil, err + } + + newList = append(newList, flattenedList...) + } + } + + return newList, nil +} + +func sortList(list traits.Lister) (ref.Val, error) { + return sortListByAssociatedKeys(list, list) +} + +// Internal function used for the implementation of sort() and sortBy(). +// +// Sorts a list of arbitrary elements, according to the order produced by sorting +// another list of comparable elements. If the element type of the keys is not +// comparable or the element types are not the same, the function will produce an error. +// +// .@sortByAssociatedKeys() -> +// U in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Example: +// +// ["foo", "bar", "baz"].@sortByAssociatedKeys([3, 1, 2]) // return ["bar", "baz", "foo"] +func sortListByAssociatedKeys(list, keys traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + keysLength := keys.Size().(types.Int) + if listLength != keysLength { + return nil, fmt.Errorf( + "@sortByAssociatedKeys() expected a list of the same size as the associated keys list, but got %d and %d elements respectively", + listLength, + keysLength, + ) + } + if listLength == 0 { + return list, nil + } + elem := keys.Get(types.IntZero) + if _, ok := elem.(traits.Comparer); !ok { + return nil, fmt.Errorf("list elements must be comparable") + } + + sortedIndices := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + if keys.Get(i).Type() != elem.Type() { + return nil, fmt.Errorf("list elements must have the same type") + } + sortedIndices = append(sortedIndices, i) + } + + sort.Slice(sortedIndices, func(i, j int) bool { + iKey := keys.Get(sortedIndices[i]) + jKey := keys.Get(sortedIndices[j]) + return iKey.(traits.Comparer).Compare(jKey) == types.IntNegOne + }) + + sorted := make([]ref.Val, 0, listLength) + + for _, sortedIdx := range sortedIndices { + sorted = append(sorted, list.Get(sortedIdx)) + } + return types.DefaultTypeAdapter.NativeToValue(sorted), nil +} + +// sortByMacro transforms an expression like: +// +// mylistExpr.sortBy(e, -math.abs(e)) +// +// into something equivalent to: +// +// cel.bind( +// __sortBy_input__, +// myListExpr, +// __sortBy_input__.@sortByAssociatedKeys(__sortBy_input__.map(e, -math.abs(e)) +// ) +func sortByMacro(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + varIdent := meh.NewIdent("@__sortBy_input__") + varName := varIdent.AsIdent() + + targetKind := target.Kind() + if targetKind != ast.ListKind && + targetKind != ast.SelectKind && + targetKind != ast.IdentKind && + targetKind != ast.ComprehensionKind && targetKind != ast.CallKind { + return nil, meh.NewError(target.ID(), fmt.Sprintf("sortBy can only be applied to a list, identifier, comprehension, call or select expression")) + } + + mapCompr, err := parser.MakeMap(meh, meh.Copy(varIdent), args) + if err != nil { + return nil, err + } + callExpr := meh.NewMemberCall("@sortByAssociatedKeys", + meh.Copy(varIdent), + mapCompr, + ) + + bindExpr := meh.NewComprehension( + meh.NewList(), + "#unused", + varName, + target, + meh.NewLiteral(types.False), + varIdent, + callExpr, + ) + + return bindExpr, nil +} + +func distinctList(list traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + if listLength == 0 { + return list, nil + } + uniqueList := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + val := list.Get(i) + seen := false + for j := types.IntZero; j < types.Int(len(uniqueList)); j++ { + if i == j { + continue + } + other := uniqueList[j] + if val.Equal(other) == types.True { + seen = true + break + } + } + if !seen { + uniqueList = append(uniqueList, val) + } + } + + return types.DefaultTypeAdapter.NativeToValue(uniqueList), nil +} + +func templatedOverloads(types []*cel.Type, template func(t *cel.Type) cel.FunctionOpt) []cel.FunctionOpt { + overloads := make([]cel.FunctionOpt, len(types)) + for i, t := range types { + overloads[i] = template(t) + } + return overloads +} diff --git a/constraint/vendor/github.com/google/cel-go/ext/math.go b/constraint/vendor/github.com/google/cel-go/ext/math.go index 0b9a36103..250246db1 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/math.go +++ b/constraint/vendor/github.com/google/cel-go/ext/math.go @@ -16,14 +16,14 @@ package ext import ( "fmt" + "math" "strings" "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Math returns a cel.EnvOption to configure namespaced math helper macros and @@ -87,33 +87,317 @@ import ( // math.least('string') // parse error // math.least(a, b) // check-time error if a or b is non-numeric // math.least(dyn('string')) // runtime error -func Math() cel.EnvOption { - return cel.Lib(mathLib{}) +// +// # Math.BitOr +// +// Introduced at version: 1 +// +// Performs a bitwise-OR operation over two int or uint values. +// +// math.bitOr(, ) -> +// math.bitOr(, ) -> +// +// Examples: +// +// math.bitOr(1u, 2u) // returns 3u +// math.bitOr(-2, -4) // returns -2 +// +// # Math.BitAnd +// +// Introduced at version: 1 +// +// Performs a bitwise-AND operation over two int or uint values. +// +// math.bitAnd(, ) -> +// math.bitAnd(, ) -> +// +// Examples: +// +// math.bitAnd(3u, 2u) // return 2u +// math.bitAnd(3, 5) // returns 3 +// math.bitAnd(-3, -5) // returns -7 +// +// # Math.BitXor +// +// Introduced at version: 1 +// +// math.bitXor(, ) -> +// math.bitXor(, ) -> +// +// Performs a bitwise-XOR operation over two int or uint values. +// +// Examples: +// +// math.bitXor(3u, 5u) // returns 6u +// math.bitXor(1, 3) // returns 2 +// +// # Math.BitNot +// +// Introduced at version: 1 +// +// Function which accepts a single int or uint and performs a bitwise-NOT +// ones-complement of the given binary value. +// +// math.bitNot() -> +// math.bitNot() -> +// +// Examples +// +// math.bitNot(1) // returns -1 +// math.bitNot(-1) // return 0 +// math.bitNot(0u) // returns 18446744073709551615u +// +// # Math.BitShiftLeft +// +// Introduced at version: 1 +// +// Perform a left shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will be always be returned +// since the number of bits shifted is greater than or equal to the total bit +// length of the number being shifted. Negative valued bit shifts will result +// in a runtime error. +// +// math.bitShiftLeft(, ) -> +// math.bitShiftLeft(, ) -> +// +// Examples +// +// math.bitShiftLeft(1, 2) // returns 4 +// math.bitShiftLeft(-1, 2) // returns -4 +// math.bitShiftLeft(1u, 2) // return 4u +// math.bitShiftLeft(1u, 200) // returns 0u +// +// # Math.BitShiftRight +// +// Introduced at version: 1 +// +// Perform a right shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will always be returned since +// the number of bits shifted is greater than or equal to the total bit length +// of the number being shifted. Negative valued bit shifts will result in a +// runtime error. +// +// The sign bit extension will not be preserved for this operation: vacant bits +// on the left are filled with 0. +// +// math.bitShiftRight(, ) -> +// math.bitShiftRight(, ) -> +// +// Examples +// +// math.bitShiftRight(1024, 2) // returns 256 +// math.bitShiftRight(1024u, 2) // returns 256u +// math.bitShiftRight(1024u, 64) // returns 0u +// +// # Math.Ceil +// +// Introduced at version: 1 +// +// Compute the ceiling of a double value. +// +// math.ceil() -> +// +// Examples: +// +// math.ceil(1.2) // returns 2.0 +// math.ceil(-1.2) // returns -1.0 +// +// # Math.Floor +// +// Introduced at version: 1 +// +// Compute the floor of a double value. +// +// math.floor() -> +// +// Examples: +// +// math.floor(1.2) // returns 1.0 +// math.floor(-1.2) // returns -2.0 +// +// # Math.Round +// +// Introduced at version: 1 +// +// Rounds the double value to the nearest whole number with ties rounding away +// from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. +// +// math.round() -> +// +// Examples: +// +// math.round(1.2) // returns 1.0 +// math.round(1.5) // returns 2.0 +// math.round(-1.5) // returns -2.0 +// +// # Math.Trunc +// +// Introduced at version: 1 +// +// Truncates the fractional portion of the double value. +// +// math.trunc() -> +// +// Examples: +// +// math.trunc(-1.3) // returns -1.0 +// math.trunc(1.3) // returns 1.0 +// +// # Math.Abs +// +// Introduced at version: 1 +// +// Returns the absolute value of the numeric type provided as input. If the +// value is NaN, the output is NaN. If the input is int64 min, the function +// will result in an overflow error. +// +// math.abs() -> +// math.abs() -> +// math.abs() -> +// +// Examples: +// +// math.abs(-1) // returns 1 +// math.abs(1) // returns 1 +// math.abs(-9223372036854775808) // overflow error +// +// # Math.Sign +// +// Introduced at version: 1 +// +// Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +// uint depending on the overload. For floating point values, if NaN is +// provided as input, the output is also NaN. The implementation does not +// differentiate between positive and negative zero. +// +// math.sign() -> +// math.sign() -> +// math.sign() -> +// +// Examples: +// +// math.sign(-42) // returns -1 +// math.sign(0) // returns 0 +// math.sign(42) // returns 1 +// +// # Math.IsInf +// +// Introduced at version: 1 +// +// Returns true if the input double value is -Inf or +Inf. +// +// math.isInf() -> +// +// Examples: +// +// math.isInf(1.0/0.0) // returns true +// math.isInf(1.2) // returns false +// +// # Math.IsNaN +// +// Introduced at version: 1 +// +// Returns true if the input double value is NaN, false otherwise. +// +// math.isNaN() -> +// +// Examples: +// +// math.isNaN(0.0/0.0) // returns true +// math.isNaN(1.2) // returns false +// +// # Math.IsFinite +// +// Introduced at version: 1 +// +// Returns true if the value is a finite number. Equivalent in behavior to: +// !math.isNaN(double) && !math.isInf(double) +// +// math.isFinite() -> +// +// Examples: +// +// math.isFinite(0.0/0.0) // returns false +// math.isFinite(1.2) // returns true +func Math(options ...MathOption) cel.EnvOption { + m := &mathLib{version: math.MaxUint32} + for _, o := range options { + m = o(m) + } + return cel.Lib(m) } const ( mathNamespace = "math" leastMacro = "least" greatestMacro = "greatest" - minFunc = "math.@min" - maxFunc = "math.@max" + + // Min-max functions + minFunc = "math.@min" + maxFunc = "math.@max" + + // Rounding functions + ceilFunc = "math.ceil" + floorFunc = "math.floor" + roundFunc = "math.round" + truncFunc = "math.trunc" + + // Floating point helper functions + isInfFunc = "math.isInf" + isNanFunc = "math.isNaN" + isFiniteFunc = "math.isFinite" + + // Signedness functions + absFunc = "math.abs" + signFunc = "math.sign" + + // Bitwise functions + bitAndFunc = "math.bitAnd" + bitOrFunc = "math.bitOr" + bitXorFunc = "math.bitXor" + bitNotFunc = "math.bitNot" + bitShiftLeftFunc = "math.bitShiftLeft" + bitShiftRightFunc = "math.bitShiftRight" ) -type mathLib struct{} +var ( + errIntOverflow = types.NewErr("integer overflow") +) + +// MathOption declares a functional operator for configuring math extensions. +type MathOption func(*mathLib) *mathLib + +// MathVersion sets the library version for math extensions. +func MathVersion(version uint32) MathOption { + return func(lib *mathLib) *mathLib { + lib.version = version + return lib + } +} + +type mathLib struct { + version uint32 +} // LibraryName implements the SingletonLibrary interface method. -func (mathLib) LibraryName() string { +func (*mathLib) LibraryName() string { return "cel.lib.ext.math" } // CompileOptions implements the Library interface method. -func (mathLib) CompileOptions() []cel.EnvOption { - return []cel.EnvOption{ +func (lib *mathLib) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ cel.Macros( // math.least(num, ...) - cel.NewReceiverVarArgMacro(leastMacro, mathLeast), + cel.ReceiverVarArgMacro(leastMacro, mathLeast), // math.greatest(num, ...) - cel.NewReceiverVarArgMacro(greatestMacro, mathGreatest), + cel.ReceiverVarArgMacro(greatestMacro, mathGreatest), ), cel.Function(minFunc, cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, @@ -180,64 +464,149 @@ func (mathLib) CompileOptions() []cel.EnvOption { cel.UnaryBinding(maxList)), ), } + if lib.version >= 1 { + opts = append(opts, + // Rounding function declarations + cel.Function(ceilFunc, + cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(ceil))), + cel.Function(floorFunc, + cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(floor))), + cel.Function(roundFunc, + cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(round))), + cel.Function(truncFunc, + cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(trunc))), + + // Floating point helpers + cel.Function(isInfFunc, + cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isInf))), + cel.Function(isNanFunc, + cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isNaN))), + cel.Function(isFiniteFunc, + cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isFinite))), + + // Signedness functions + cel.Function(absFunc, + cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(absDouble)), + cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(absInt)), + cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + ), + cel.Function(signFunc, + cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(sign)), + ), + + // Bitwise operator declarations + cel.Function(bitAndFunc, + cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitAndPairInt)), + cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitAndPairUint)), + ), + cel.Function(bitOrFunc, + cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitOrPairInt)), + cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitOrPairUint)), + ), + cel.Function(bitXorFunc, + cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitXorPairInt)), + cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitXorPairUint)), + ), + cel.Function(bitNotFunc, + cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(bitNotInt)), + cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(bitNotUint)), + ), + cel.Function(bitShiftLeftFunc, + cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftLeftIntInt)), + cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftLeftUintInt)), + ), + cel.Function(bitShiftRightFunc, + cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftRightIntInt)), + cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftRightUintInt)), + ), + ) + } + return opts } // ProgramOptions implements the Library interface method. -func (mathLib) ProgramOptions() []cel.ProgramOption { +func (*mathLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument") + return nil, meh.NewError(target.ID(), "math.least() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { - return meh.GlobalCall(minFunc, args[0]), nil + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return meh.NewCall(minFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value") + return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value") case 2: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { return nil, err } - return meh.GlobalCall(minFunc, args...), nil + return meh.NewCall(minFunc, args...), nil default: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { return nil, err } - return meh.GlobalCall(minFunc, meh.NewList(args...)), nil + return meh.NewCall(minFunc, meh.NewList(args...)), nil } } -func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument") + return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument") case 1: - if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { - return meh.GlobalCall(maxFunc, args[0]), nil + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return mef.NewCall(maxFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value") + return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value") case 2: - err := checkInvalidArgs(meh, "math.greatest()", args) + err := checkInvalidArgs(mef, "math.greatest()", args) if err != nil { return nil, err } - return meh.GlobalCall(maxFunc, args...), nil + return mef.NewCall(maxFunc, args...), nil default: - err := checkInvalidArgs(meh, "math.greatest()", args) + err := checkInvalidArgs(mef, "math.greatest()", args) if err != nil { return nil, err } - return meh.GlobalCall(maxFunc, meh.NewList(args...)), nil + return mef.NewCall(maxFunc, mef.NewList(args...)), nil } } @@ -245,6 +614,165 @@ func identity(val ref.Val) ref.Val { return val } +func ceil(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Ceil(float64(v))) +} + +func floor(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Floor(float64(v))) +} + +func round(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Round(float64(v))) +} + +func trunc(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Trunc(float64(v))) +} + +func isInf(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsInf(float64(v), 0)) +} + +func isFinite(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v)) +} + +func isNaN(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsNaN(float64(v))) +} + +func absDouble(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Double(math.Abs(v)) +} + +func absInt(val ref.Val) ref.Val { + v := int64(val.(types.Int)) + if v == math.MinInt64 { + return errIntOverflow + } + if v >= 0 { + return val + } + return -types.Int(v) +} + +func sign(val ref.Val) ref.Val { + switch v := val.(type) { + case types.Double: + if isNaN(v) == types.True { + return v + } + zero := types.Double(0) + if v > zero { + return types.Double(1) + } + if v < zero { + return types.Double(-1) + } + return zero + case types.Int: + return v.Compare(types.IntZero) + case types.Uint: + if v == types.Uint(0) { + return types.Uint(0) + } + return types.Uint(1) + default: + return maybeSuffixError(val, "math.sign") + } +} + +func bitAndPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l & r +} + +func bitAndPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l & r +} + +func bitOrPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l | r +} + +func bitOrPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l | r +} + +func bitXorPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l ^ r +} + +func bitXorPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l ^ r +} + +func bitNotInt(value ref.Val) ref.Val { + v := value.(types.Int) + return ^v +} + +func bitNotUint(value ref.Val) ref.Val { + v := value.(types.Uint) + return ^v +} + +func bitShiftLeftIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftLeftUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftRightIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return types.Int(types.Uint(v) >> bs) +} + +func bitShiftRightUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return v >> bs +} + func minPair(first, second ref.Val) ref.Val { cmp, ok := first.(traits.Comparer) if !ok { @@ -311,49 +839,49 @@ func maxList(numList ref.Val) ref.Val { } } -func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error { +func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr) *cel.Error { for _, arg := range args { err := checkInvalidArgLiteral(funcName, arg) if err != nil { - return meh.NewError(arg.GetId(), err.Error()) + return meh.NewError(arg.ID(), err.Error()) } } return nil } -func checkInvalidArgLiteral(funcName string, arg *exprpb.Expr) error { - if !isValidArgType(arg) { +func checkInvalidArgLiteral(funcName string, arg ast.Expr) error { + if !isNumericArgType(arg) { return fmt.Errorf("%s simple literal arguments must be numeric", funcName) } return nil } -func isValidArgType(arg *exprpb.Expr) bool { - switch arg.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - c := arg.GetConstExpr() - switch c.GetConstantKind().(type) { - case *exprpb.Constant_DoubleValue, *exprpb.Constant_Int64Value, *exprpb.Constant_Uint64Value: +func isNumericArgType(arg ast.Expr) bool { + switch arg.Kind() { + case ast.LiteralKind: + c := ref.Val(arg.AsLiteral()) + switch c.(type) { + case types.Double, types.Int, types.Uint: return true default: return false } - case *exprpb.Expr_ListExpr, *exprpb.Expr_StructExpr: + case ast.ListKind, ast.MapKind, ast.StructKind: return false default: return true } } -func isListLiteralWithValidArgs(arg *exprpb.Expr) bool { - switch arg.GetExprKind().(type) { - case *exprpb.Expr_ListExpr: - list := arg.GetListExpr() - if len(list.GetElements()) == 0 { +func isListLiteralWithNumericArgs(arg ast.Expr) bool { + switch arg.Kind() { + case ast.ListKind: + list := arg.AsList() + if list.Size() == 0 { return false } - for _, e := range list.GetElements() { - if !isValidArgType(e) { + for _, e := range list.Elements() { + if !isNumericArgType(e) { return false } } diff --git a/constraint/vendor/github.com/google/cel-go/ext/native.go b/constraint/vendor/github.com/google/cel-go/ext/native.go index 0b5fc38ca..36ab4a7ae 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/native.go +++ b/constraint/vendor/github.com/google/cel-go/ext/native.go @@ -15,6 +15,7 @@ package ext import ( + "errors" "fmt" "reflect" "strings" @@ -77,12 +78,45 @@ var ( // same advice holds if you are using custom type adapters and type providers. The native type // provider composes over whichever type adapter and provider is configured in the cel.Env at // the time that it is invoked. -func NativeTypes(refTypes ...any) cel.EnvOption { +// +// There is also the possibility to rename the fields of native structs by setting the `cel` tag +// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag` +// option. Here is an example to see it in action: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// OwnerName string `cel:"owner"` +// } +// +// ``` +// +// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`. +// In case there are duplicated field names in the struct, an error will be returned. +func NativeTypes(args ...any) cel.EnvOption { return func(env *cel.Env) (*cel.Env, error) { - tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...) + nativeTypes := make([]any, 0, len(args)) + tpOptions := nativeTypeOptions{} + + for _, v := range args { + switch v := v.(type) { + case NativeTypesOption: + err := v(&tpOptions) + if err != nil { + return nil, err + } + default: + nativeTypes = append(nativeTypes, v) + } + } + + tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...) if err != nil { return nil, err } + env, err = cel.CustomTypeAdapter(tp)(env) if err != nil { return nil, err @@ -91,22 +125,93 @@ func NativeTypes(refTypes ...any) cel.EnvOption { } } -func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { +// NativeTypesOption is a functional interface for configuring handling of native types. +type NativeTypesOption func(*nativeTypeOptions) error + +// NativeTypesFieldNameHandler is a handler for mapping a reflect.StructField to a CEL field name. +// This can be used to override the default Go struct field to CEL field name mapping. +type NativeTypesFieldNameHandler = func(field reflect.StructField) string + +func fieldNameByTag(structTagToParse string) func(field reflect.StructField) string { + return func(field reflect.StructField) string { + tag, found := field.Tag.Lookup(structTagToParse) + if found { + splits := strings.Split(tag, ",") + if len(splits) > 0 { + // We make the assumption that the leftmost entry in the tag is the name. + // This seems to be true for most tags that have the concept of a name/key, such as: + // https://pkg.go.dev/encoding/xml#Marshal + // https://pkg.go.dev/encoding/json#Marshal + // https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#hdr-Structs + // https://pkg.go.dev/gopkg.in/yaml.v2#Marshal + name := splits[0] + return name + } + } + + return field.Name + } +} + +type nativeTypeOptions struct { + // fieldNameHandler controls how CEL should perform struct field renames. + // This is most commonly used for switching to parsing based off the struct field tag, + // such as "cel" or "json". + fieldNameHandler NativeTypesFieldNameHandler +} + +// ParseStructTags configures if native types field names should be overridable by CEL struct tags. +// This is equivalent to ParseStructTag("cel") +func ParseStructTags(enabled bool) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + if enabled { + ntp.fieldNameHandler = fieldNameByTag("cel") + } else { + ntp.fieldNameHandler = nil + } + return nil + } +} + +// ParseStructTag configures the struct tag to parse. The 0th item in the tag is used as the name of the CEL field. +// For example: +// If the tag to parse is "cel" and the struct field has tag cel:"foo", the CEL struct field will be "foo". +// If the tag to parse is "json" and the struct field has tag json:"foo,omitempty", the CEL struct field will be "foo". +func ParseStructTag(tag string) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = fieldNameByTag(tag) + return nil + } +} + +// ParseStructField configures how to parse Go struct fields. It can be used to customize struct field parsing. +func ParseStructField(handler NativeTypesFieldNameHandler) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = handler + return nil + } +} + +func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { nativeTypes := make(map[string]*nativeType, len(refTypes)) for _, refType := range refTypes { switch rt := refType.(type) { case reflect.Type: - t, err := newNativeType(rt) + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt) if err != nil { return nil, err } - nativeTypes[t.TypeName()] = t + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } case reflect.Value: - t, err := newNativeType(rt.Type()) + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt.Type()) if err != nil { return nil, err } - nativeTypes[t.TypeName()] = t + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } default: return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt) } @@ -115,6 +220,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy nativeTypes: nativeTypes, baseAdapter: adapter, baseProvider: provider, + options: tpOptions, }, nil } @@ -122,6 +228,7 @@ type nativeTypeProvider struct { nativeTypes map[string]*nativeType baseAdapter types.Adapter baseProvider types.Provider + options nativeTypeOptions } // EnumValue proxies to the types.Provider configured at the times the NativeTypes @@ -151,6 +258,32 @@ func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool return tp.baseProvider.FindStructType(typeName) } +func toFieldName(fieldNameHandler NativeTypesFieldNameHandler, f reflect.StructField) string { + if fieldNameHandler == nil { + return f.Name + } + + return fieldNameHandler(f) +} + +// FindStructFieldNames looks up the type definition first from the native types, then from +// the backing provider type set. If found, a set of field names corresponding to the type +// will be returned. +func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + if t, found := tp.nativeTypes[typeName]; found { + fieldCount := t.refType.NumField() + fields := make([]string, fieldCount) + for i := 0; i < fieldCount; i++ { + fields[i] = toFieldName(tp.options.fieldNameHandler, t.refType.Field(i)) + } + return fields, true + } + if celTypeFields, found := tp.baseProvider.FindStructFieldNames(typeName); found { + return celTypeFields, true + } + return tp.baseProvider.FindStructFieldNames(typeName) +} + // FindStructFieldType looks up a native type's field definition, and if the type name is not a native // type then proxies to the composed types.Provider func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { @@ -170,13 +303,13 @@ func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (* Type: celType, IsSet: func(obj any) bool { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) + refField := refVal.FieldByName(refField.Name) return !refField.IsZero() }, GetFrom: func(obj any) (any, error) { refVal := reflect.Indirect(reflect.ValueOf(obj)) - refField := refVal.FieldByName(fieldName) - return getFieldValue(tp, refField), nil + refField := refVal.FieldByName(refField.Name) + return getFieldValue(refField), nil }, }, true } @@ -227,6 +360,9 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { case []byte: return tp.baseAdapter.NativeToValue(val) default: + if refVal.Type().Elem() == reflect.TypeOf(byte(0)) { + return tp.baseAdapter.NativeToValue(val) + } return types.NewDynamicList(tp, val) } case reflect.Map: @@ -237,7 +373,7 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { time.Time: return tp.baseAdapter.NativeToValue(val) default: - return newNativeObject(tp, val, rawVal) + return tp.newNativeObject(val, rawVal) } default: return tp.baseAdapter.NativeToValue(val) @@ -297,13 +433,13 @@ func convertToCelType(refType reflect.Type) (*cel.Type, bool) { return nil, false } -func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val { - valType, err := newNativeType(refValue.Type()) +func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val { + valType, err := newNativeType(tp.options.fieldNameHandler, refValue.Type()) if err != nil { return types.NewErr(err.Error()) } return &nativeObj{ - Adapter: adapter, + Adapter: tp, val: val, valType: valType, refValue: refValue, @@ -350,12 +486,13 @@ func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) { if !fieldValue.IsValid() || fieldValue.IsZero() { continue } + fieldName := toFieldName(o.valType.fieldNameHandler, fieldType) fieldCELVal := o.NativeToValue(fieldValue.Interface()) fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType) if err != nil { return nil, err } - fields[fieldType.Name] = fieldJSONVal.(*structpb.Value) + fields[fieldName] = fieldJSONVal.(*structpb.Value) } return &structpb.Struct{Fields: fields}, nil } @@ -447,7 +584,47 @@ func (o *nativeObj) Value() any { return o.val } -func newNativeType(rawType reflect.Type) (*nativeType, error) { +func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) ([]*nativeType, error) { + nt, err := newNativeType(fieldNameHandler, rawType) + if err != nil { + return nil, err + } + result := []*nativeType{nt} + + alreadySeen := make(map[string]struct{}) + var iterateStructMembers func(reflect.Type) + iterateStructMembers = func(t reflect.Type) { + if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + if _, seen := alreadySeen[t.String()]; seen { + return + } + alreadySeen[t.String()] = struct{}{} + nt, ntErr := newNativeType(fieldNameHandler, t) + if ntErr != nil { + err = ntErr + return + } + result = append(result, nt) + + for idx := 0; idx < t.NumField(); idx++ { + iterateStructMembers(t.Field(idx).Type) + } + } + iterateStructMembers(rawType) + + return result, err +} + +var ( + errDuplicatedFieldName = errors.New("field name already exists in struct") +) + +func newNativeType(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) (*nativeType, error) { refType := rawType if refType.Kind() == reflect.Pointer { refType = refType.Elem() @@ -455,15 +632,34 @@ func newNativeType(rawType reflect.Type) (*nativeType, error) { if !isValidObjectType(refType) { return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType) } + + // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled. + if fieldNameHandler != nil { + fieldNames := make(map[string]struct{}) + + for idx := 0; idx < refType.NumField(); idx++ { + field := refType.Field(idx) + fieldName := toFieldName(fieldNameHandler, field) + + if _, found := fieldNames[fieldName]; found { + return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName) + } else { + fieldNames[fieldName] = struct{}{} + } + } + } + return &nativeType{ - typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), - refType: refType, + typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + refType: refType, + fieldNameHandler: fieldNameHandler, }, nil } type nativeType struct { - typeName string - refType reflect.Type + typeName string + refType reflect.Type + fieldNameHandler NativeTypesFieldNameHandler } // ConvertToNative implements ref.Val.ConvertToNative. @@ -511,9 +707,26 @@ func (t *nativeType) Value() any { return t.typeName } +// fieldByName returns the corresponding reflect.StructField for the give name either by matching +// field tag or field name. +func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) { + if t.fieldNameHandler == nil { + return t.refType.FieldByName(fieldName) + } + + for i := 0; i < t.refType.NumField(); i++ { + f := t.refType.Field(i) + if toFieldName(t.fieldNameHandler, f) == fieldName { + return f, true + } + } + + return reflect.StructField{}, false +} + // hasField returns whether a field name has a corresponding Golang reflect.StructField func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { - f, found := t.refType.FieldByName(fieldName) + f, found := t.fieldByName(fieldName) if !found || !f.IsExported() || !isSupportedType(f.Type) { return reflect.StructField{}, false } @@ -521,21 +734,16 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { } func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { - return adapter.NativeToValue(getFieldValue(adapter, refField)) + return adapter.NativeToValue(getFieldValue(refField)) } -func getFieldValue(adapter types.Adapter, refField reflect.Value) any { +func getFieldValue(refField reflect.Value) any { if refField.IsZero() { switch refField.Kind() { - case reflect.Array, reflect.Slice: - return types.NewDynamicList(adapter, []ref.Val{}) - case reflect.Map: - return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{}) case reflect.Struct: if refField.Type() == timestampType { - return types.Timestamp{Time: time.Unix(0, 0)} + return time.Unix(0, 0) } - return reflect.New(refField.Type()).Elem().Interface() case reflect.Pointer: return reflect.New(refField.Type().Elem()).Interface() } diff --git a/constraint/vendor/github.com/google/cel-go/ext/protos.go b/constraint/vendor/github.com/google/cel-go/ext/protos.go index a7ca27a6a..68796f60a 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/protos.go +++ b/constraint/vendor/github.com/google/cel-go/ext/protos.go @@ -16,8 +16,7 @@ package ext import ( "github.com/google/cel-go/cel" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" ) // Protos returns a cel.EnvOption to configure extended macros and functions for @@ -72,9 +71,9 @@ func (protoLib) CompileOptions() []cel.EnvOption { return []cel.EnvOption{ cel.Macros( // proto.getExt(msg, select_expression) - cel.NewReceiverMacro(getExtension, 2, getProtoExt), + cel.ReceiverMacro(getExtension, 2, getProtoExt), // proto.hasExt(msg, select_expression) - cel.NewReceiverMacro(hasExtension, 2, hasProtoExt), + cel.ReceiverMacro(hasExtension, 2, hasProtoExt), ), } } @@ -85,56 +84,56 @@ func (protoLib) ProgramOptions() []cel.ProgramOption { } // hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. -func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func hasProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } - extensionField, err := getExtFieldName(meh, args[1]) + extensionField, err := getExtFieldName(mef, args[1]) if err != nil { return nil, err } - return meh.PresenceTest(args[0], extensionField), nil + return mef.NewPresenceTest(args[0], extensionField), nil } // getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. -func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func getProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } - extFieldName, err := getExtFieldName(meh, args[1]) + extFieldName, err := getExtFieldName(mef, args[1]) if err != nil { return nil, err } - return meh.Select(args[0], extFieldName), nil + return mef.NewSelect(args[0], extFieldName), nil } -func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) { +func getExtFieldName(mef cel.MacroExprFactory, expr ast.Expr) (string, *cel.Error) { isValid := false extensionField := "" - switch expr.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: + switch expr.Kind() { + case ast.SelectKind: extensionField, isValid = validateIdentifier(expr) } if !isValid { - return "", meh.NewError(expr.GetId(), "invalid extension field") + return "", mef.NewError(expr.ID(), "invalid extension field") } return extensionField, nil } -func validateIdentifier(expr *exprpb.Expr) (string, bool) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - return expr.GetIdentExpr().GetName(), true - case *exprpb.Expr_SelectExpr: - sel := expr.GetSelectExpr() - if sel.GetTestOnly() { +func validateIdentifier(expr ast.Expr) (string, bool) { + switch expr.Kind() { + case ast.IdentKind: + return expr.AsIdent(), true + case ast.SelectKind: + sel := expr.AsSelect() + if sel.IsTestOnly() { return "", false } - opStr, isIdent := validateIdentifier(sel.GetOperand()) + opStr, isIdent := validateIdentifier(sel.Operand()) if !isIdent { return "", false } - return opStr + "." + sel.GetField(), true + return opStr + "." + sel.FieldName(), true default: return "", false } diff --git a/constraint/vendor/github.com/google/cel-go/ext/sets.go b/constraint/vendor/github.com/google/cel-go/ext/sets.go index 833c15f61..7e9416655 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/sets.go +++ b/constraint/vendor/github.com/google/cel-go/ext/sets.go @@ -19,6 +19,8 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -119,6 +121,68 @@ func (setsLib) ProgramOptions() []cel.ProgramOption { } } +// NewSetMembershipOptimizer rewrites set membership tests using the `in` operator against a list +// of constant values of enum, int, uint, string, or boolean type into a set membership test against +// a map where the map keys are the elements of the list. +func NewSetMembershipOptimizer() (cel.ASTOptimizer, error) { + return setsLib{}, nil +} + +func (setsLib) Optimize(ctx *cel.OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + matches := ast.MatchDescendants(root, matchInConstantList(a)) + for _, match := range matches { + call := match.AsCall() + listArg := call.Args()[1] + entries := make([]ast.EntryExpr, len(listArg.AsList().Elements())) + for i, elem := range listArg.AsList().Elements() { + var entry ast.EntryExpr + if r, found := a.ReferenceMap()[elem.ID()]; found && r.Value != nil { + entry = ctx.NewMapEntry(ctx.NewLiteral(r.Value), ctx.NewLiteral(types.True), false) + } else { + entry = ctx.NewMapEntry(elem, ctx.NewLiteral(types.True), false) + } + entries[i] = entry + } + mapArg := ctx.NewMap(entries) + ctx.UpdateExpr(listArg, mapArg) + } + return a +} + +func matchInConstantList(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if call.FunctionName() != operators.In { + return false + } + aggregateVal := call.Args()[1] + if aggregateVal.Kind() != ast.ListKind { + return false + } + listVal := aggregateVal.AsList() + for _, elem := range listVal.Elements() { + if r, found := a.ReferenceMap()[elem.ID()]; found { + if r.Value != nil { + continue + } + } + if elem.Kind() != ast.LiteralKind { + return false + } + lit := elem.AsLiteral() + if !(lit.Type() == cel.StringType || lit.Type() == cel.IntType || + lit.Type() == cel.UintType || lit.Type() == cel.BoolType) { + return false + } + } + return true + } +} + func setsIntersects(listA, listB ref.Val) ref.Val { lA := listA.(traits.Lister) lB := listB.(traits.Lister) diff --git a/constraint/vendor/github.com/google/cel-go/ext/strings.go b/constraint/vendor/github.com/google/cel-go/ext/strings.go index 88c119f2b..2e590a4c5 100644 --- a/constraint/vendor/github.com/google/cel-go/ext/strings.go +++ b/constraint/vendor/github.com/google/cel-go/ext/strings.go @@ -21,19 +21,16 @@ import ( "fmt" "math" "reflect" - "sort" "strings" "unicode" "unicode/utf8" "golang.org/x/text/language" - "golang.org/x/text/message" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter" ) const ( @@ -99,7 +96,7 @@ const ( // "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]" // "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1" // -// Passing an incorrect type (an integer to `%s`) is considered an error, as well as attempting +// Passing an incorrect type (a string to `%b`) is considered an error, as well as attempting // to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance). // If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal, // then letting any arguments go unused/unformatted is also considered an error. @@ -122,7 +119,8 @@ const ( // 'hello mellow'.indexOf('jello') // returns -1 // 'hello mellow'.indexOf('', 2) // returns 2 // 'hello mellow'.indexOf('ello', 2) // returns 7 -// 'hello mellow'.indexOf('ello', 20) // error +// 'hello mellow'.indexOf('ello', 20) // returns -1 +// 'hello mellow'.indexOf('ello', -1) // error // // # Join // @@ -158,6 +156,7 @@ const ( // 'hello mellow'.lastIndexOf('ello') // returns 7 // 'hello mellow'.lastIndexOf('jello') // returns -1 // 'hello mellow'.lastIndexOf('ello', 6) // returns 1 +// 'hello mellow'.lastIndexOf('ello', 20) // returns -1 // 'hello mellow'.lastIndexOf('ello', -1) // error // // # LowerAscii @@ -205,6 +204,8 @@ const ( // 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' // 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' // 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' +// 'hello hello'.replace('', '_') // returns '_h_e_l_l_o_ _h_e_l_l_o_' +// 'hello hello'.replace('h', '') // returns 'ello ello' // // # Split // @@ -270,8 +271,26 @@ const ( // // 'TacoCat'.upperAscii() // returns 'TACOCAT' // 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' +// +// # Reverse +// +// Introduced at version: 3 +// +// Returns a new string whose characters are the same as the target string, only formatted in +// reverse order. +// This function relies on converting strings to rune arrays in order to reverse +// +// .reverse() -> +// +// Examples: +// +// 'gums'.reverse() // returns 'smug' +// 'John Smith'.reverse() // returns 'htimS nhoJ' func Strings(options ...StringsOption) cel.EnvOption { - s := &stringLib{version: math.MaxUint32} + s := &stringLib{ + version: math.MaxUint32, + validateFormat: true, + } for _, o := range options { s = o(s) } @@ -279,8 +298,9 @@ func Strings(options ...StringsOption) cel.EnvOption { } type stringLib struct { - locale string - version uint32 + locale string + version uint32 + validateFormat bool } // LibraryName implements the SingletonLibrary interface method. @@ -317,6 +337,17 @@ func StringsVersion(version uint32) StringsOption { } } +// StringsValidateFormatCalls validates type-checked ASTs to ensure that string.format() calls have +// valid formatting clauses and valid argument types for each clause. +// +// Enabled by default. +func StringsValidateFormatCalls(value bool) StringsOption { + return func(s *stringLib) *stringLib { + s.validateFormat = value + return s + } +} + // CompileOptions implements the Library interface method. func (lib *stringLib) CompileOptions() []cel.EnvOption { formatLocale := "en_US" @@ -440,13 +471,15 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.FunctionBinding(func(args ...ref.Val) ref.Val { s := string(args[0].(types.String)) formatArgs := args[1].(traits.Lister) - return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) + return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) }))), cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType, cel.UnaryBinding(func(str ref.Val) ref.Val { s := str.(types.String) return stringOrError(quote(string(s))) - })))) + }))), + + cel.ASTValidators(stringFormatValidator{})) } if lib.version >= 2 { @@ -471,7 +504,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.UnaryBinding(func(list ref.Val) ref.Val { l, err := list.ConvertToNative(stringListType) if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } return stringOrError(join(l.([]string))) })), @@ -479,13 +512,26 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { cel.BinaryBinding(func(list, delim ref.Val) ref.Val { l, err := list.ConvertToNative(stringListType) if err != nil { - return types.NewErr(err.Error()) + return types.WrapErr(err) } d := delim.(types.String) return stringOrError(joinSeparator(l.([]string), string(d))) }))), ) } + if lib.version >= 3 { + opts = append(opts, + cel.Function("reverse", + cel.MemberOverload("string_reverse", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(reverse(string(s))) + }))), + ) + } + if lib.validateFormat { + opts = append(opts, cel.ASTValidators(stringFormatValidator{})) + } return opts } @@ -517,9 +563,13 @@ func indexOfOffset(str, substr string, offset int64) (int64, error) { off := int(offset) runes := []rune(str) subrunes := []rune(substr) - if off < 0 || off >= len(runes) { + if off < 0 { return -1, fmt.Errorf("index out of range: %d", off) } + // If the offset exceeds the length, return -1 rather than error. + if off >= len(runes) { + return -1, nil + } for i := off; i < len(runes)-(len(subrunes)-1); i++ { found := true for j := 0; j < len(subrunes); j++ { @@ -550,9 +600,13 @@ func lastIndexOfOffset(str, substr string, offset int64) (int64, error) { off := int(offset) runes := []rune(str) subrunes := []rune(substr) - if off < 0 || off >= len(runes) { + if off < 0 { return -1, fmt.Errorf("index out of range: %d", off) } + // If the offset is far greater than the length return -1 + if off >= len(runes) { + return -1, nil + } if off > len(runes)-len(subrunes) { off = len(runes) - len(subrunes) } @@ -636,6 +690,14 @@ func upperASCII(str string) (string, error) { return string(runes), nil } +func reverse(str string) (string, error) { + chars := []rune(str) + for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 { + chars[i], chars[j] = chars[j], chars[i] + } + return string(chars), nil +} + func joinSeparator(strs []string, separator string) (string, error) { return strings.Join(strs, separator), nil } @@ -661,238 +723,6 @@ func joinValSeparator(strs traits.Lister, separator string) (string, error) { return sb.String(), nil } -type clauseImpl func(ref.Val, string) (string, error) - -func clauseForType(argType ref.Type) (clauseImpl, error) { - switch argType { - case types.IntType, types.UintType: - return formatDecimal, nil - case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: - return FormatString, nil - case types.TimestampType, types.DurationType: - // special case to ensure timestamps/durations get printed as CEL literals - return func(arg ref.Val, locale string) (string, error) { - argStrVal := arg.ConvertToType(types.StringType) - argStr := argStrVal.Value().(string) - if arg.Type() == types.TimestampType { - return fmt.Sprintf("timestamp(%q)", argStr), nil - } - if arg.Type() == types.DurationType { - return fmt.Sprintf("duration(%q)", argStr), nil - } - return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) - }, nil - case types.ListType: - return formatList, nil - case types.MapType: - return formatMap, nil - case types.DoubleType: - // avoid formatFixed so we can output a period as the decimal separator in order - // to always be a valid CEL literal - return func(arg ref.Val, locale string) (string, error) { - argDouble, ok := arg.Value().(float64) - if !ok { - return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) - } - fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) - return fmt.Sprintf(fmtStr, argDouble), nil - }, nil - case types.TypeType: - return func(arg ref.Val, locale string) (string, error) { - return fmt.Sprintf("type(%s)", arg.Value().(string)), nil - }, nil - default: - return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) - } -} - -func formatList(arg ref.Val, locale string) (string, error) { - argList := arg.(traits.Lister) - argIterator := argList.Iterator() - var listStrBuilder strings.Builder - _, err := listStrBuilder.WriteRune('[') - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - for argIterator.HasNext() == types.True { - member := argIterator.Next() - memberFormat, err := clauseForType(member.Type()) - if err != nil { - return "", err - } - unquotedStr, err := memberFormat(member, locale) - if err != nil { - return "", err - } - str := quoteForCEL(member, unquotedStr) - _, err = listStrBuilder.WriteString(str) - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - if argIterator.HasNext() == types.True { - _, err = listStrBuilder.WriteString(", ") - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - } - } - _, err = listStrBuilder.WriteRune(']') - if err != nil { - return "", fmt.Errorf("error writing to list string: %w", err) - } - return listStrBuilder.String(), nil -} - -func formatMap(arg ref.Val, locale string) (string, error) { - argMap := arg.(traits.Mapper) - argIterator := argMap.Iterator() - type mapPair struct { - key string - value string - } - argPairs := make([]mapPair, argMap.Size().Value().(int64)) - i := 0 - for argIterator.HasNext() == types.True { - key := argIterator.Next() - var keyFormat clauseImpl - switch key.Type() { - case types.StringType, types.BoolType: - keyFormat = FormatString - case types.IntType, types.UintType: - keyFormat = formatDecimal - default: - return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) - } - unquotedKeyStr, err := keyFormat(key, locale) - if err != nil { - return "", err - } - keyStr := quoteForCEL(key, unquotedKeyStr) - value, found := argMap.Find(key) - if !found { - return "", fmt.Errorf("could not find key: %q", key) - } - valueFormat, err := clauseForType(value.Type()) - if err != nil { - return "", err - } - unquotedValueStr, err := valueFormat(value, locale) - if err != nil { - return "", err - } - valueStr := quoteForCEL(value, unquotedValueStr) - argPairs[i] = mapPair{keyStr, valueStr} - i++ - } - sort.SliceStable(argPairs, func(x, y int) bool { - return argPairs[x].key < argPairs[y].key - }) - var mapStrBuilder strings.Builder - _, err := mapStrBuilder.WriteRune('{') - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - for i, entry := range argPairs { - _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - if i < len(argPairs)-1 { - _, err = mapStrBuilder.WriteString(", ") - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - } - } - _, err = mapStrBuilder.WriteRune('}') - if err != nil { - return "", fmt.Errorf("error writing to map string: %w", err) - } - return mapStrBuilder.String(), nil -} - -// quoteForCEL takes a formatted, unquoted value and quotes it in a manner -// suitable for embedding directly in CEL. -func quoteForCEL(refVal ref.Val, unquotedValue string) string { - switch refVal.Type() { - case types.StringType: - return fmt.Sprintf("%q", unquotedValue) - case types.BytesType: - return fmt.Sprintf("b%q", unquotedValue) - case types.DoubleType: - // special case to handle infinity/NaN - num := refVal.Value().(float64) - if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { - return fmt.Sprintf("%q", unquotedValue) - } - return unquotedValue - default: - return unquotedValue - } -} - -// FormatString returns the string representation of a CEL value. -// It is used to implement the %s specifier in the (string).format() extension -// function. -func FormatString(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.ListType: - return formatList(arg, locale) - case types.MapType: - return formatMap(arg, locale) - case types.IntType, types.UintType, types.DoubleType, - types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: - argStrVal := arg.ConvertToType(types.StringType) - argStr, ok := argStrVal.Value().(string) - if !ok { - return "", fmt.Errorf("could not convert argument %q to string", argStrVal) - } - return argStr, nil - case types.NullType: - return "null", nil - default: - return "", fmt.Errorf("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", arg.Type().TypeName()) - } -} - -func formatDecimal(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) - } - return fmt.Sprintf("%d", argInt), nil - case types.UintType: - argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) - } - return fmt.Sprintf("%d", argInt), nil - default: - return "", fmt.Errorf("decimal clause can only be used on integers, was given %s", arg.Type().TypeName()) - } -} - -func matchLanguage(locale string) (language.Tag, error) { - matcher, err := makeMatcher(locale) - if err != nil { - return language.Und, err - } - tag, _ := language.MatchStrings(matcher, locale) - return tag, nil -} - -func makeMatcher(locale string) (language.Matcher, error) { - tags := make([]language.Tag, 0) - tag, err := language.Parse(locale) - if err != nil { - return nil, err - } - tags = append(tags, tag) - return language.NewMatcher(tags), nil -} - // quote implements a string quoting function. The string will be wrapped in // double quotes, and all valid CEL escape sequences will be escaped to show up // literally if printed. If the input contains any invalid UTF-8, the invalid runes @@ -940,156 +770,6 @@ func sanitize(s string) string { return sanitizedStringBuilder.String() } -type stringFormatter struct{} - -func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { - return FormatString(arg, locale) -} - -func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { - return formatDecimal(arg, locale) -} - -func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { - if precision == nil { - precision = new(int) - *precision = defaultPrecision - } - return func(arg ref.Val, locale string) (string, error) { - strException := false - if arg.Type() == types.StringType { - argStr := arg.Value().(string) - if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { - strException = true - } - } - if arg.Type() != types.DoubleType && !strException { - return "", fmt.Errorf("fixed-point clause can only be used on doubles, was given %s", arg.Type().TypeName()) - } - argFloatVal := arg.ConvertToType(types.DoubleType) - argFloat, ok := argFloatVal.Value().(float64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) - } - fmtStr := fmt.Sprintf("%%.%df", *precision) - - matchedLocale, err := matchLanguage(locale) - if err != nil { - return "", fmt.Errorf("error matching locale: %w", err) - } - return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil - } -} - -func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { - if precision == nil { - precision = new(int) - *precision = defaultPrecision - } - return func(arg ref.Val, locale string) (string, error) { - strException := false - if arg.Type() == types.StringType { - argStr := arg.Value().(string) - if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { - strException = true - } - } - if arg.Type() != types.DoubleType && !strException { - return "", fmt.Errorf("scientific clause can only be used on doubles, was given %s", arg.Type().TypeName()) - } - argFloatVal := arg.ConvertToType(types.DoubleType) - argFloat, ok := argFloatVal.Value().(float64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) - } - matchedLocale, err := matchLanguage(locale) - if err != nil { - return "", fmt.Errorf("error matching locale: %w", err) - } - fmtStr := fmt.Sprintf("%%%de", *precision) - return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil - } -} - -func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt := arg.Value().(int64) - // locale is intentionally unused as integers formatted as binary - // strings are locale-independent - return fmt.Sprintf("%b", argInt), nil - case types.UintType: - argInt := arg.Value().(uint64) - return fmt.Sprintf("%b", argInt), nil - case types.BoolType: - argBool := arg.Value().(bool) - if argBool { - return "1", nil - } - return "0", nil - default: - return "", fmt.Errorf("only integers and bools can be formatted as binary, was given %s", arg.Type().TypeName()) - } -} - -func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - fmtStr := "%x" - if useUpper { - fmtStr = "%X" - } - switch arg.Type() { - case types.StringType, types.BytesType: - if arg.Type() == types.BytesType { - return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil - } - return fmt.Sprintf(fmtStr, arg.Value().(string)), nil - case types.IntType: - argInt, ok := arg.Value().(int64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) - } - return fmt.Sprintf(fmtStr, argInt), nil - case types.UintType: - argInt, ok := arg.Value().(uint64) - if !ok { - return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) - } - return fmt.Sprintf(fmtStr, argInt), nil - default: - return "", fmt.Errorf("only integers, byte buffers, and strings can be formatted as hex, was given %s", arg.Type().TypeName()) - } - } -} - -func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { - switch arg.Type() { - case types.IntType: - argInt := arg.Value().(int64) - return fmt.Sprintf("%o", argInt), nil - case types.UintType: - argInt := arg.Value().(uint64) - return fmt.Sprintf("%o", argInt), nil - default: - return "", fmt.Errorf("octal clause can only be used on integers, was given %s", arg.Type().TypeName()) - } -} - -type stringArgList struct { - args traits.Lister -} - -func (c *stringArgList) Arg(index int64) (ref.Val, error) { - if index >= c.args.Size().Value().(int64) { - return nil, fmt.Errorf("index %d out of range", index) - } - return c.args.Get(types.Int(index)), nil -} - -func (c *stringArgList) ArgSize() int64 { - return c.args.Size().Value().(int64) -} - var ( stringListType = reflect.TypeOf([]string{}) ) diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/interpreter/BUILD.bazel index 3a5219eb5..220e23d47 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -14,7 +14,6 @@ go_library( "decorators.go", "dispatcher.go", "evalstate.go", - "formatting.go", "interpretable.go", "interpreter.go", "optimizations.go", diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/activation.go b/constraint/vendor/github.com/google/cel-go/interpreter/activation.go index a80264451..1577f3590 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/activation.go @@ -17,7 +17,6 @@ package interpreter import ( "errors" "fmt" - "sync" "github.com/google/cel-go/common/types/ref" ) @@ -167,35 +166,3 @@ type partActivation struct { func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { return a.unknowns } - -// varActivation represents a single mutable variable binding. -// -// This activation type should only be used within folds as the fold loop controls the object -// life-cycle. -type varActivation struct { - parent Activation - name string - val ref.Val -} - -// Parent implements the Activation interface method. -func (v *varActivation) Parent() Activation { - return v.parent -} - -// ResolveName implements the Activation interface method. -func (v *varActivation) ResolveName(name string) (any, bool) { - if name == v.name { - return v.val, true - } - return v.parent.ResolveName(name) -} - -var ( - // pool of var activations to reduce allocations during folds. - varActivationPool = &sync.Pool{ - New: func() any { - return &varActivation{} - }, - } -) diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/constraint/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 1fbaaf17e..8f19bde7e 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -178,10 +178,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. -func NewPartialAttributeFactory(container *containers.Container, - adapter types.Adapter, - provider types.Provider) AttributeFactory { - fac := NewAttributeFactory(container, adapter, provider) +func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider, opts...) return &partialAttributeFactory{ AttributeFactory: fac, container: container, diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/attributes.go b/constraint/vendor/github.com/google/cel-go/interpreter/attributes.go index ca97bdfcf..b1b3aacc8 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -126,21 +126,39 @@ type NamespacedAttribute interface { Qualifiers() []Qualifier } +// AttrFactoryOption specifies a functional option for configuring an attribute factory. +type AttrFactoryOption func(*attrFactory) *attrFactory + +// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection +// is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { + return func(fac *attrFactory) *attrFactory { + fac.errorOnBadPresenceTest = value + return fac + } +} + // NewAttributeFactory returns a default AttributeFactory which is produces Attribute values // capable of resolving types by simple names and qualify the values using the supported qualifier // types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { - return &attrFactory{ +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := &attrFactory{ container: cont, adapter: a, provider: p, } + for _, o := range opts { + fac = o(fac) + } + return fac } type attrFactory struct { container *containers.Container adapter types.Adapter provider types.Provider + + errorOnBadPresenceTest bool } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -149,12 +167,13 @@ type attrFactory struct { // resolution rules. func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { return &absoluteAttribute{ - id: id, - namespaceNames: names, - qualifiers: []Qualifier{}, - adapter: r.adapter, - provider: r.provider, - fac: r, + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -188,11 +207,12 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { // RelativeAttribute refers to an expression and an optional qualifier path. func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { return &relativeAttribute{ - id: id, - operand: operand, - qualifiers: []Qualifier{}, - adapter: r.adapter, - fac: r, + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, } } @@ -214,7 +234,7 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o }, nil } } - return newQualifier(r.adapter, qualID, val, opt) + return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) } type absoluteAttribute struct { @@ -226,6 +246,8 @@ type absoluteAttribute struct { adapter types.Adapter provider types.Provider fac AttributeFactory + + errorOnBadPresenceTest bool } // ID implements the Attribute interface method. @@ -287,6 +309,9 @@ func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { // determine whether the type is unknown before returning. obj, found := vars.ResolveName(nm) if found { + if celErr, ok := obj.(*types.Err); ok { + return nil, celErr.Unwrap() + } obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers) if err != nil { return nil, err @@ -511,6 +536,8 @@ type relativeAttribute struct { qualifiers []Qualifier adapter types.Adapter fac AttributeFactory + + errorOnBadPresenceTest bool } // ID is an implementation of the Attribute interface method. @@ -574,7 +601,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { +func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { var qual Qualifier switch val := v.(type) { case Attribute: @@ -589,71 +616,138 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, }, nil case string: qual = &stringQualifier{ - id: id, - value: val, - celValue: types.String(val), - adapter: adapter, - optional: opt, + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int32: qual = &intQualifier{ - id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case int64: qual = &intQualifier{ - id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint32: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case uint64: qual = &uintQualifier{ - id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case bool: qual = &boolQualifier{ - id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Bool(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float32: qual = &doubleQualifier{ - id: id, - value: float64(val), - celValue: types.Double(val), - adapter: adapter, - optional: opt, + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case float64: qual = &doubleQualifier{ - id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt, + id: id, + value: val, + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.String: qual = &stringQualifier{ - id: id, value: string(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: string(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Int: qual = &intQualifier{ - id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: int64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Uint: qual = &uintQualifier{ - id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: uint64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Bool: qual = &boolQualifier{ - id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: bool(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case types.Double: qual = &doubleQualifier{ - id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, + id: id, + value: float64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, } case *types.Unknown: qual = &unknownQualifier{id: id, value: val} @@ -684,11 +778,12 @@ func (q *attrQualifier) IsOptional() bool { } type stringQualifier struct { - id int64 - value string - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -771,7 +866,7 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -785,11 +880,12 @@ func (q *stringQualifier) Value() ref.Val { } type intQualifier struct { - id int64 - value int64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -895,7 +991,7 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p return o[i], true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -912,11 +1008,12 @@ func (q *intQualifier) Value() ref.Val { } type uintQualifier struct { - id int64 - value uint64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -963,7 +1060,7 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -977,11 +1074,12 @@ func (q *uintQualifier) Value() ref.Val { } type boolQualifier struct { - id int64 - value bool - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1014,7 +1112,7 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, return obj, true, nil } default: - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } if presenceTest { return nil, false, nil @@ -1089,11 +1187,12 @@ func (q *fieldQualifier) Value() ref.Val { // type may not be known ahead of time and may not conform to the standard types supported as valid // protobuf map key types. type doubleQualifier struct { - id int64 - value float64 - celValue ref.Val - adapter types.Adapter - optional bool + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool } // ID is an implementation of the Qualifier interface method. @@ -1117,7 +1216,7 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl } func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { - return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly) + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) } // Value implements the ConstantQualifier interface @@ -1223,7 +1322,7 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { case *types.Unknown: @@ -1280,7 +1379,7 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese } return val, true, nil default: - if presenceTest { + if presenceTest && !errorOnBadPresenceTest { return nil, false, nil } return nil, false, missingKey(idx) diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/formatting.go b/constraint/vendor/github.com/google/cel-go/interpreter/formatting.go deleted file mode 100644 index e3f753374..000000000 --- a/constraint/vendor/github.com/google/cel-go/interpreter/formatting.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package interpreter - -import ( - "errors" - "fmt" - "strconv" - "strings" - "unicode" - - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" -) - -type typeVerifier func(int64, ...ref.Type) (bool, error) - -// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports -// any errors at compile time. -func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator { - return func(inter Interpretable) (Interpretable, error) { - call, ok := inter.(InterpretableCall) - if !ok { - return inter, nil - } - if call.OverloadID() != "string_format" { - return inter, nil - } - args := call.Args() - if len(args) != 2 { - return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args)) - } - fmtStrInter, ok := args[0].(InterpretableConst) - if !ok { - return inter, nil - } - var fmtArgsInter InterpretableConstructor - fmtArgsInter, ok = args[1].(InterpretableConstructor) - if !ok { - return inter, nil - } - if fmtArgsInter.Type() != types.ListType { - // don't necessarily return an error since the list may be DynType - return inter, nil - } - formatStr := fmtStrInter.Value().Value().(string) - initVals := fmtArgsInter.InitVals() - - formatCheck := &formatCheck{ - args: initVals, - verifier: verifier, - } - // use a placeholder locale, since locale doesn't affect syntax - _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US") - if err != nil { - return nil, err - } - seenArgs := formatCheck.argsRequested - if len(initVals) > seenArgs { - return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals)) - } - return inter, nil - } -} - -type formatCheck struct { - args []Interpretable - argsRequested int - curArgIndex int64 - enableCheckArgTypes bool - verifier typeVerifier -} - -func (c *formatCheck) String(arg ref.Val, locale string) (string, error) { - valid, err := verifyString(c.args[c.curArgIndex], c.verifier) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps") - } - return "", nil -} - -func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("integer clause can only be used on integers") - } - return "", nil -} - -func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values - valid, err := c.verifier(id, types.DoubleType, types.StringType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("fixed-point clause can only be used on doubles") - } - return "", nil - } -} - -func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.DoubleType, types.StringType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("scientific clause can only be used on doubles") - } - return "", nil - } -} - -func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("only integers and bools can be formatted as binary") - } - return "", nil -} - -func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) { - return func(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("only integers, byte buffers, and strings can be formatted as hex") - } - return "", nil - } -} - -func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) { - id := c.args[c.curArgIndex].ID() - valid, err := c.verifier(id, types.IntType, types.UintType) - if err != nil { - return "", err - } - if !valid { - return "", errors.New("octal clause can only be used on integers") - } - return "", nil -} - -func (c *formatCheck) Arg(index int64) (ref.Val, error) { - c.argsRequested++ - c.curArgIndex = index - // return a dummy value - this is immediately passed to back to us - // through one of the FormatCallback functions, so anything will do - return types.Int(0), nil -} - -func (c *formatCheck) ArgSize() int64 { - return int64(len(c.args)) -} - -func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) { - subVerified, err := verifier(sub.ID(), - types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType, - types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType) - if err != nil { - return false, err - } - if !subVerified { - return false, nil - } - con, ok := sub.(InterpretableConstructor) - if ok { - members := con.InitVals() - for _, m := range members { - // recursively verify if we're dealing with a list/map - verified, err := verifyString(m, verifier) - if err != nil { - return false, err - } - if !verified { - return false, nil - } - } - } - return true, nil - -} - -// FormatStringInterpolator is an interface that allows user-defined behavior -// for formatting clause implementations, as well as argument retrieval. -// Each function is expected to support the appropriate types as laid out in -// the string.format documentation, and to return an error if given an inappropriate type. -type FormatStringInterpolator interface { - // String takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a string, or an error if one occurred. - String(ref.Val, string) (string, error) - - // Decimal takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a decimal integer, or an error if one occurred. - Decimal(ref.Val, string) (string, error) - - // Fixed takes an int pointer representing precision (or nil if none was given) and - // returns a function operating in a similar manner to String and Decimal, taking a - // ref.Val and locale and returning the appropriate string. A closure is returned - // so precision can be set without needing an additional function call/configuration. - Fixed(*int) func(ref.Val, string) (string, error) - - // Scientific functions identically to Fixed, except the string returned from the closure - // is expected to be in scientific notation. - Scientific(*int) func(ref.Val, string) (string, error) - - // Binary takes a ref.Val and a string representing the current locale identifier - // and returns the Val formatted as a binary integer, or an error if one occurred. - Binary(ref.Val, string) (string, error) - - // Hex takes a boolean that, if true, indicates the hex string output by the returned - // closure should use uppercase letters for A-F. - Hex(bool) func(ref.Val, string) (string, error) - - // Octal takes a ref.Val and a string representing the current locale identifier and - // returns the Val formatted in octal, or an error if one occurred. - Octal(ref.Val, string) (string, error) -} - -// FormatList is an interface that allows user-defined list-like datatypes to be used -// for formatting clause implementations. -type FormatList interface { - // Arg returns the ref.Val at the given index, or an error if one occurred. - Arg(int64) (ref.Val, error) - // ArgSize returns the length of the argument list. - ArgSize() int64 -} - -type clauseImpl func(ref.Val, string) (string, error) - -// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations -// from the provided FormatCallback and the args from the given FormatList. -func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) { - i := 0 - argIndex := 0 - var builtStr strings.Builder - for i < len(formatStr) { - if formatStr[i] == '%' { - if i+1 < len(formatStr) && formatStr[i+1] == '%' { - err := builtStr.WriteByte('%') - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i += 2 - continue - } else { - argAny, err := list.Arg(int64(argIndex)) - if err != nil { - return "", err - } - if i+1 >= len(formatStr) { - return "", errors.New("unexpected end of string") - } - if int64(argIndex) >= list.ArgSize() { - return "", fmt.Errorf("index %d out of range", argIndex) - } - numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) - if refErr != nil { - return "", refErr - } - _, err = builtStr.WriteString(val) - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i += numRead - argIndex++ - } - } else { - err := builtStr.WriteByte(formatStr[i]) - if err != nil { - return "", fmt.Errorf("error writing format string: %w", err) - } - i++ - } - } - return builtStr.String(), nil -} - -// parseAndFormatClause parses the format clause at the start of the given string with val, and returns -// how many characters were consumed and the substituted string form of val, or an error if one occurred. -func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) { - i := 1 - read, formatter, err := parseFormattingClause(formatStr[i:], callback) - i += read - if err != nil { - return -1, "", fmt.Errorf("could not parse formatting clause: %s", err) - } - - valStr, err := formatter(val, locale) - if err != nil { - return -1, "", fmt.Errorf("error during formatting: %s", err) - } - return i, valStr, nil -} - -func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) { - i := 0 - read, precision, err := parsePrecision(formatStr[i:]) - i += read - if err != nil { - return -1, nil, fmt.Errorf("error while parsing precision: %w", err) - } - r := rune(formatStr[i]) - i++ - switch r { - case 's': - return i, callback.String, nil - case 'd': - return i, callback.Decimal, nil - case 'f': - return i, callback.Fixed(precision), nil - case 'e': - return i, callback.Scientific(precision), nil - case 'b': - return i, callback.Binary, nil - case 'x', 'X': - return i, callback.Hex(unicode.IsUpper(r)), nil - case 'o': - return i, callback.Octal, nil - default: - return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) - } -} - -func parsePrecision(formatStr string) (int, *int, error) { - i := 0 - if formatStr[i] != '.' { - return i, nil, nil - } - i++ - var buffer strings.Builder - for { - if i >= len(formatStr) { - return -1, nil, errors.New("could not find end of precision specifier") - } - if !isASCIIDigit(rune(formatStr[i])) { - break - } - buffer.WriteByte(formatStr[i]) - i++ - } - precision, err := strconv.Atoi(buffer.String()) - if err != nil { - return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) - } - return i, &precision, nil -} - -func isASCIIDigit(r rune) bool { - return r <= unicode.MaxASCII && unicode.IsDigit(r) -} diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/interpretable.go b/constraint/vendor/github.com/google/cel-go/interpreter/interpretable.go index c4598dfa7..ebc432e9d 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -16,6 +16,7 @@ package interpreter import ( "fmt" + "sync" "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" @@ -96,7 +97,7 @@ type InterpretableCall interface { Args() []Interpretable } -// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map // or struct. type InterpretableConstructor interface { Interpretable @@ -125,7 +126,7 @@ func (test *evalTestOnly) Eval(ctx Activation) ref.Val { val, err := test.Resolve(ctx) // Return an error if the resolve step fails if err != nil { - return types.WrapErr(err) + return types.LabelErrNode(test.id, types.WrapErr(err)) } if optVal, isOpt := val.(*types.Optional); isOpt { return types.Bool(optVal.HasValue()) @@ -231,6 +232,7 @@ func (or *evalOr) Eval(ctx Activation) ref.Val { } else { err = types.MaybeNoSuchOverloadErr(val) } + err = types.LabelErrNode(or.id, err) } } } @@ -273,6 +275,7 @@ func (and *evalAnd) Eval(ctx Activation) ref.Val { } else { err = types.MaybeNoSuchOverloadErr(val) } + err = types.LabelErrNode(and.id, err) } } } @@ -377,7 +380,7 @@ func (zero *evalZeroArity) ID() int64 { // Eval implements the Interpretable interface method. func (zero *evalZeroArity) Eval(ctx Activation) ref.Val { - return zero.impl() + return types.LabelErrNode(zero.id, zero.impl()) } // Function implements the InterpretableCall interface method. @@ -421,14 +424,14 @@ func (un *evalUnary) Eval(ctx Activation) ref.Val { // If the implementation is bound and the argument value has the right traits required to // invoke it, then call the implementation. if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) { - return un.impl(argVal) + return types.LabelErrNode(un.id, un.impl(argVal)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if argVal.Type().HasTrait(traits.ReceiverType) { - return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{}) + return types.LabelErrNode(un.id, argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})) } - return types.NewErr("no such overload: %s", un.function) + return types.NewErrWithNodeID(un.id, "no such overload: %s", un.function) } // Function implements the InterpretableCall interface method. @@ -479,14 +482,14 @@ func (bin *evalBinary) Eval(ctx Activation) ref.Val { // If the implementation is bound and the argument value has the right traits required to // invoke it, then call the implementation. if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) { - return bin.impl(lVal, rVal) + return types.LabelErrNode(bin.id, bin.impl(lVal, rVal)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if lVal.Type().HasTrait(traits.ReceiverType) { - return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal}) + return types.LabelErrNode(bin.id, lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})) } - return types.NewErr("no such overload: %s", bin.function) + return types.NewErrWithNodeID(bin.id, "no such overload: %s", bin.function) } // Function implements the InterpretableCall interface method. @@ -545,14 +548,14 @@ func (fn *evalVarArgs) Eval(ctx Activation) ref.Val { // invoke it, then call the implementation. arg0 := argVals[0] if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) { - return fn.impl(argVals...) + return types.LabelErrNode(fn.id, fn.impl(argVals...)) } // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the // operand (arg0). if arg0.Type().HasTrait(traits.ReceiverType) { - return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:]) + return types.LabelErrNode(fn.id, arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])) } - return types.NewErr("no such overload: %s", fn.function) + return types.NewErrWithNodeID(fn.id, "no such overload: %s %d", fn.function, fn.id) } // Function implements the InterpretableCall interface method. @@ -595,7 +598,7 @@ func (l *evalList) Eval(ctx Activation) ref.Val { if l.hasOptionals && l.optionals[i] { optVal, ok := elemVal.(*types.Optional) if !ok { - return invalidOptionalElementInit(elemVal) + return types.LabelErrNode(l.id, invalidOptionalElementInit(elemVal)) } if !optVal.HasValue() { continue @@ -645,7 +648,7 @@ func (m *evalMap) Eval(ctx Activation) ref.Val { if m.hasOptionals && m.optionals[i] { optVal, ok := valVal.(*types.Optional) if !ok { - return invalidOptionalEntryInit(keyVal, valVal) + return types.LabelErrNode(m.id, invalidOptionalEntryInit(keyVal, valVal)) } if !optVal.HasValue() { delete(entries, keyVal) @@ -705,7 +708,7 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { if o.hasOptionals && o.optionals[i] { optVal, ok := val.(*types.Optional) if !ok { - return invalidOptionalEntryInit(field, val) + return types.LabelErrNode(o.id, invalidOptionalEntryInit(field, val)) } if !optVal.HasValue() { delete(fieldVals, field) @@ -715,27 +718,34 @@ func (o *evalObj) Eval(ctx Activation) ref.Val { } fieldVals[field] = val } - return o.provider.NewValue(o.typeName, fieldVals) + return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) } +// InitVals implements the InterpretableConstructor interface method. func (o *evalObj) InitVals() []Interpretable { return o.vals } +// Type implements the InterpretableConstructor interface method. func (o *evalObj) Type() ref.Type { - return types.NewObjectTypeValue(o.typeName) + return types.NewObjectType(o.typeName) } type evalFold struct { - id int64 - accuVar string - iterVar string - iterRange Interpretable - accu Interpretable - cond Interpretable - step Interpretable - result Interpretable - adapter types.Adapter + id int64 + accuVar string + iterVar string + iterVar2 string + iterRange Interpretable + accu Interpretable + cond Interpretable + step Interpretable + result Interpretable + adapter types.Adapter + + // note an exhaustive fold will ensure that all branches are evaluated + // when using mutable values, these branches will mutate the final result + // rather than make a throw-away computation. exhaustive bool interruptable bool } @@ -747,64 +757,30 @@ func (fold *evalFold) ID() int64 { // Eval implements the Interpretable interface method. func (fold *evalFold) Eval(ctx Activation) ref.Val { - foldRange := fold.iterRange.Eval(ctx) - if !foldRange.Type().HasTrait(traits.IterableType) { - return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) - } - // Configure the fold activation with the accumulator initial value. - accuCtx := varActivationPool.Get().(*varActivation) - accuCtx.parent = ctx - accuCtx.name = fold.accuVar - accuCtx.val = fold.accu.Eval(ctx) - // If the accumulator starts as an empty list, then the comprehension will build a list - // so create a mutable list to optimize the cost of the inner loop. - l, ok := accuCtx.val.(traits.Lister) - buildingList := false - if !fold.exhaustive && ok && l.Size() == types.IntZero { - buildingList = true - accuCtx.val = types.NewMutableList(fold.adapter) - } - iterCtx := varActivationPool.Get().(*varActivation) - iterCtx.parent = accuCtx - iterCtx.name = fold.iterVar - - interrupted := false - it := foldRange.(traits.Iterable).Iterator() - for it.HasNext() == types.True { - // Modify the iter var in the fold activation. - iterCtx.val = it.Next() + // Initialize the folder interface + f := newFolder(fold, ctx) + defer releaseFolder(f) - // Evaluate the condition, terminate the loop if false. - cond := fold.cond.Eval(iterCtx) - condBool, ok := cond.(types.Bool) - if !fold.exhaustive && ok && condBool != types.True { - break - } - // Evaluate the evaluation step into accu var. - accuCtx.val = fold.step.Eval(iterCtx) - if fold.interruptable { - if stop, found := ctx.ResolveName("#interrupted"); found && stop == true { - interrupted = true - break - } + foldRange := fold.iterRange.Eval(ctx) + if fold.iterVar2 != "" { + var foldable traits.Foldable + switch r := foldRange.(type) { + case traits.Mapper: + foldable = types.ToFoldableMap(r) + case traits.Lister: + foldable = types.ToFoldableList(r) + default: + return types.NewErrWithNodeID(fold.ID(), "unsupported comprehension range type: %T", foldRange) } - } - varActivationPool.Put(iterCtx) - if interrupted { - varActivationPool.Put(accuCtx) - return types.NewErr("operation interrupted") + foldable.Fold(f) + return f.evalResult() } - // Compute the result. - res := fold.result.Eval(accuCtx) - varActivationPool.Put(accuCtx) - // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result. - if !types.IsUnknownOrError(res) && buildingList { - if _, ok := res.(traits.MutableLister); ok { - res = res.(traits.MutableLister).ToImmutableList() - } + if !foldRange.Type().HasTrait(traits.IterableType) { + return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) } - return res + iterable := foldRange.(traits.Iterable) + return f.foldIterable(iterable) } // Optional Interpretable implementations that specialize, subsume, or extend the core evaluation @@ -921,7 +897,7 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.ConstantQualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -934,7 +910,7 @@ func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presence out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -964,7 +940,7 @@ func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.Attribute.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -977,7 +953,7 @@ func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceO out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -1001,7 +977,7 @@ func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { out, err := e.Qualifier.Qualify(vars, obj) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else { val = e.adapter.NativeToValue(out) } @@ -1014,7 +990,7 @@ func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly) var val ref.Val if err != nil { - val = types.WrapErr(err) + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) } else if out != nil { val = e.adapter.NativeToValue(out) } else if presenceOnly { @@ -1157,12 +1133,12 @@ func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { } if cBool { if tErr != nil { - return types.WrapErr(tErr) + return types.LabelErrNode(cond.id, types.WrapErr(tErr)) } return cond.adapter.NativeToValue(tVal) } if fErr != nil { - return types.WrapErr(fErr) + return types.LabelErrNode(cond.id, types.WrapErr(fErr)) } return cond.adapter.NativeToValue(fVal) } @@ -1202,7 +1178,7 @@ func (a *evalAttr) Adapter() types.Adapter { func (a *evalAttr) Eval(ctx Activation) ref.Val { v, err := a.attr.Resolve(ctx) if err != nil { - return types.WrapErr(err) + return types.LabelErrNode(a.ID(), types.WrapErr(err)) } return a.adapter.NativeToValue(v) } @@ -1260,3 +1236,172 @@ func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { func invalidOptionalElementInit(value ref.Val) ref.Val { return types.NewErr("cannot initialize optional list element from non-optional value %v", value) } + +// newFolder creates or initializes a pooled folder instance. +func newFolder(eval *evalFold, ctx Activation) *folder { + f := folderPool.Get().(*folder) + f.evalFold = eval + f.Activation = ctx + return f +} + +// releaseFolder resets and releases a pooled folder instance. +func releaseFolder(f *folder) { + f.reset() + folderPool.Put(f) +} + +// folder tracks the state associated with folding a list or map with a comprehension v2 style macro. +// +// The folder embeds an interpreter.Activation and Interpretable evalFold value as well as implements +// the traits.Folder interface methods. +// +// Instances of a folder are intended to be pooled to minimize allocation overhead with this temporary +// bookkeeping object which supports lazy evaluation of the accumulator init expression which is useful +// in preserving evaluation order semantics which might otherwise be disrupted through the use of +// cel.bind or cel.@block. +type folder struct { + *evalFold + Activation + + // fold state objects. + accuVal ref.Val + iterVar1Val any + iterVar2Val any + + // bookkeeping flags to modify Activation and fold behaviors. + initialized bool + mutableValue bool + interrupted bool + computeResult bool +} + +func (f *folder) foldIterable(iterable traits.Iterable) ref.Val { + it := iterable.Iterator() + for it.HasNext() == types.True { + f.iterVar1Val = it.Next() + + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return f.evalResult() + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return f.evalResult() + } + } + return f.evalResult() +} + +// FoldEntry will either fold comprehension v1 style macros if iterVar2 is unset, or comprehension v2 style +// macros if both the iterVar and iterVar2 are set to non-empty strings. +func (f *folder) FoldEntry(key, val any) bool { + // Default to referencing both values. + f.iterVar1Val = key + f.iterVar2Val = val + + // Terminate evaluation if evaluation is interrupted or the condition is not true and exhaustive + // eval is not enabled. + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return false + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return false + } + return true +} + +// ResolveName overrides the default Activation lookup to perform lazy initialization of the accumulator +// and specialized lookups of iteration values with consideration for whether the final result is being +// computed and the iteration variables should be ignored. +func (f *folder) ResolveName(name string) (any, bool) { + if name == f.accuVar { + if !f.initialized { + f.initialized = true + initVal := f.accu.Eval(f.Activation) + if !f.exhaustive { + if l, isList := initVal.(traits.Lister); isList && l.Size() == types.IntZero { + initVal = types.NewMutableList(f.adapter) + f.mutableValue = true + } + if m, isMap := initVal.(traits.Mapper); isMap && m.Size() == types.IntZero { + initVal = types.NewMutableMap(f.adapter, map[ref.Val]ref.Val{}) + f.mutableValue = true + } + } + f.accuVal = initVal + } + return f.accuVal, true + } + if !f.computeResult { + if name == f.iterVar { + f.iterVar1Val = f.adapter.NativeToValue(f.iterVar1Val) + return f.iterVar1Val, true + } + if name == f.iterVar2 { + f.iterVar2Val = f.adapter.NativeToValue(f.iterVar2Val) + return f.iterVar2Val, true + } + } + return f.Activation.ResolveName(name) +} + +// evalResult computes the final result of the fold after all entries have been folded and accumulated. +func (f *folder) evalResult() ref.Val { + f.computeResult = true + if f.interrupted { + return types.NewErr("operation interrupted") + } + res := f.result.Eval(f) + // Convert a mutable list or map to an immutable one if the comprehension has generated a list or + // map as a result. + if !types.IsUnknownOrError(res) && f.mutableValue { + if _, ok := res.(traits.MutableLister); ok { + res = res.(traits.MutableLister).ToImmutableList() + } + if _, ok := res.(traits.MutableMapper); ok { + res = res.(traits.MutableMapper).ToImmutableMap() + } + } + return res +} + +// reset clears any state associated with folder evaluation. +func (f *folder) reset() { + f.evalFold = nil + f.Activation = nil + f.accuVal = nil + f.iterVar1Val = nil + f.iterVar2Val = nil + + f.initialized = false + f.mutableValue = false + f.interrupted = false + f.computeResult = false +} + +func checkInterrupt(a Activation) bool { + stop, found := a.ResolveName("#interrupted") + return found && stop == true +} + +var ( + // pool of var folders to reduce allocations during folds. + folderPool = &sync.Pool{ + New: func() any { + return &folder{} + }, + } +) diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/interpreter.go b/constraint/vendor/github.com/google/cel-go/interpreter/interpreter.go index 00fc74732..0aca74d88 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -22,19 +22,13 @@ import ( "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Interpreter generates a new Interpretable from a checked or unchecked expression. type Interpreter interface { // NewInterpretable creates an Interpretable from a checked expression and an // optional list of InterpretableDecorator values. - NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error) - - // NewUncheckedInterpretable returns an Interpretable from a parsed expression - // and an optional list of InterpretableDecorator values. - NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error) + NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) } // EvalObserver is a functional interface that accepts an expression id and an observed value. @@ -177,7 +171,7 @@ func NewInterpreter(dispatcher Dispatcher, // NewIntepretable implements the Interpreter interface method. func (i *exprInterpreter) NewInterpretable( - checked *ast.CheckedAST, + checked *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) { p := newPlanner( i.dispatcher, @@ -187,19 +181,5 @@ func (i *exprInterpreter) NewInterpretable( i.container, checked, decorators...) - return p.Plan(checked.Expr) -} - -// NewUncheckedIntepretable implements the Interpreter interface method. -func (i *exprInterpreter) NewUncheckedInterpretable( - expr *exprpb.Expr, - decorators ...InterpretableDecorator) (Interpretable, error) { - p := newUncheckedPlanner( - i.dispatcher, - i.provider, - i.adapter, - i.attrFactory, - i.container, - decorators...) - return p.Plan(expr) + return p.Plan(checked.Expr()) } diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/planner.go b/constraint/vendor/github.com/google/cel-go/interpreter/planner.go index 757cd080e..3d918ce87 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/planner.go @@ -23,15 +23,12 @@ import ( "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value. type interpretablePlanner interface { // Plan generates an Interpretable value (or error) from the input proto Expr. - Plan(expr *exprpb.Expr) (Interpretable, error) + Plan(expr ast.Expr) (Interpretable, error) } // newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, @@ -43,28 +40,7 @@ func newPlanner(disp Dispatcher, adapter types.Adapter, attrFactory AttributeFactory, cont *containers.Container, - checked *ast.CheckedAST, - decorators ...InterpretableDecorator) interpretablePlanner { - return &planner{ - disp: disp, - provider: provider, - adapter: adapter, - attrFactory: attrFactory, - container: cont, - refMap: checked.ReferenceMap, - typeMap: checked.TypeMap, - decorators: decorators, - } -} - -// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, -// TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in -// Select expressions are resolved lazily at evaluation time. -func newUncheckedPlanner(disp Dispatcher, - provider types.Provider, - adapter types.Adapter, - attrFactory AttributeFactory, - cont *containers.Container, + exprAST *ast.AST, decorators ...InterpretableDecorator) interpretablePlanner { return &planner{ disp: disp, @@ -72,8 +48,8 @@ func newUncheckedPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: make(map[int64]*ast.ReferenceInfo), - typeMap: make(map[int64]*types.Type), + refMap: exprAST.ReferenceMap(), + typeMap: exprAST.TypeMap(), decorators: decorators, } } @@ -95,22 +71,24 @@ type planner struct { // useful for layering functionality into the evaluation that is not natively understood by CEL, // such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of // repeated expressions. -func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: +func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { + switch expr.Kind() { + case ast.CallKind: return p.decorate(p.planCall(expr)) - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: return p.decorate(p.planIdent(expr)) - case *exprpb.Expr_SelectExpr: + case ast.LiteralKind: + return p.decorate(p.planConst(expr)) + case ast.SelectKind: return p.decorate(p.planSelect(expr)) - case *exprpb.Expr_ListExpr: + case ast.ListKind: return p.decorate(p.planCreateList(expr)) - case *exprpb.Expr_StructExpr: + case ast.MapKind: + return p.decorate(p.planCreateMap(expr)) + case ast.StructKind: return p.decorate(p.planCreateStruct(expr)) - case *exprpb.Expr_ComprehensionExpr: + case ast.ComprehensionKind: return p.decorate(p.planComprehension(expr)) - case *exprpb.Expr_ConstExpr: - return p.decorate(p.planConst(expr)) } return nil, fmt.Errorf("unsupported expr: %v", expr) } @@ -132,16 +110,16 @@ func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { } // planIdent creates an Interpretable that resolves an identifier from an Activation. -func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) { +func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) { // Establish whether the identifier is in the reference map. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) } // Create the possible attribute list for the unresolved reference. - ident := expr.GetIdentExpr() + ident := expr.AsIdent() return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.MaybeAttribute(expr.GetId(), ident.Name), + attr: p.attrFactory.MaybeAttribute(expr.ID(), ident), }, nil } @@ -174,20 +152,20 @@ func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Inter // a) selects a field from a map or proto. // b) creates a field presence test for a select within a has() macro. // c) resolves the select expression to a namespaced identifier. -func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { +func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { // If the Select id appears in the reference map from the CheckedExpr proto then it is either // a namespaced identifier or enum value. - if identRef, found := p.refMap[expr.GetId()]; found { - return p.planCheckedIdent(expr.GetId(), identRef) + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) } - sel := expr.GetSelectExpr() + sel := expr.AsSelect() // Plan the operand evaluation. - op, err := p.Plan(sel.GetOperand()) + op, err := p.Plan(sel.Operand()) if err != nil { return nil, err } - opType := p.typeMap[sel.GetOperand().GetId()] + opType := p.typeMap[sel.Operand().ID()] // If the Select was marked TestOnly, this is a presence test. // @@ -211,14 +189,14 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { } // Build a qualifier for the attribute. - qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false) + qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false) if err != nil { return nil, err } // Modify the attribute to be test-only. - if sel.GetTestOnly() { + if sel.IsTestOnly() { attr = &evalTestOnly{ - id: expr.GetId(), + id: expr.ID(), InterpretableAttribute: attr, } } @@ -230,10 +208,10 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) { // planCall creates a callable Interpretable while specializing for common functions and invocation // patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in // optimized Interpretable values. -func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { - call := expr.GetCallExpr() +func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { + call := expr.AsCall() target, fnName, oName := p.resolveFunction(expr) - argCount := len(call.GetArgs()) + argCount := len(call.Args()) var offset int if target != nil { argCount++ @@ -248,7 +226,7 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { } args[0] = arg } - for i, argExpr := range call.GetArgs() { + for i, argExpr := range call.Args() { arg, err := p.Plan(argExpr) if err != nil { return nil, err @@ -307,7 +285,7 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) { } // planCallZero generates a zero-arity callable Interpretable. -func (p *planner) planCallZero(expr *exprpb.Expr, +func (p *planner) planCallZero(expr ast.Expr, function string, overload string, impl *functions.Overload) (Interpretable, error) { @@ -315,7 +293,7 @@ func (p *planner) planCallZero(expr *exprpb.Expr, return nil, fmt.Errorf("no such overload: %s()", function) } return &evalZeroArity{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, impl: impl.Function, @@ -323,7 +301,7 @@ func (p *planner) planCallZero(expr *exprpb.Expr, } // planCallUnary generates a unary callable Interpretable. -func (p *planner) planCallUnary(expr *exprpb.Expr, +func (p *planner) planCallUnary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -340,7 +318,7 @@ func (p *planner) planCallUnary(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalUnary{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, arg: args[0], @@ -351,7 +329,7 @@ func (p *planner) planCallUnary(expr *exprpb.Expr, } // planCallBinary generates a binary callable Interpretable. -func (p *planner) planCallBinary(expr *exprpb.Expr, +func (p *planner) planCallBinary(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -368,7 +346,7 @@ func (p *planner) planCallBinary(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalBinary{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, lhs: args[0], @@ -380,7 +358,7 @@ func (p *planner) planCallBinary(expr *exprpb.Expr, } // planCallVarArgs generates a variable argument callable Interpretable. -func (p *planner) planCallVarArgs(expr *exprpb.Expr, +func (p *planner) planCallVarArgs(expr ast.Expr, function string, overload string, impl *functions.Overload, @@ -397,7 +375,7 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr, nonStrict = impl.NonStrict } return &evalVarArgs{ - id: expr.GetId(), + id: expr.ID(), function: function, overload: overload, args: args, @@ -408,41 +386,41 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr, } // planCallEqual generates an equals (==) Interpretable. -func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalEq{ - id: expr.GetId(), + id: expr.ID(), lhs: args[0], rhs: args[1], }, nil } // planCallNotEqual generates a not equals (!=) Interpretable. -func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalNe{ - id: expr.GetId(), + id: expr.ID(), lhs: args[0], rhs: args[1], }, nil } // planCallLogicalAnd generates a logical and (&&) Interpretable. -func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalAnd{ - id: expr.GetId(), + id: expr.ID(), terms: args, }, nil } // planCallLogicalOr generates a logical or (||) Interpretable. -func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { return &evalOr{ - id: expr.GetId(), + id: expr.ID(), terms: args, }, nil } // planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. -func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { +func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { cond := args[0] t := args[1] var tAttr Attribute @@ -464,13 +442,13 @@ func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) ( return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.ConditionalAttribute(expr.GetId(), cond, tAttr, fAttr), + attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr), }, nil } // planCallIndex either extends an attribute with the argument to the index operation, or creates // a relative attribute based on the return of a function call or operation. -func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) { +func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { op := args[0] ind := args[1] opType := p.typeMap[op.ID()] @@ -489,11 +467,11 @@ func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optiona var qual Qualifier switch ind := ind.(type) { case InterpretableConst: - qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional) + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional) case InterpretableAttribute: - qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional) + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional) default: - qual, err = p.relativeAttr(expr.GetId(), ind, optional) + qual, err = p.relativeAttr(expr.ID(), ind, optional) } if err != nil { return nil, err @@ -505,10 +483,10 @@ func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optiona } // planCreateList generates a list construction Interpretable. -func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { - list := expr.GetListExpr() - optionalIndices := list.GetOptionalIndices() - elements := list.GetElements() +func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { + list := expr.AsList() + optionalIndices := list.OptionalIndices() + elements := list.Elements() optionals := make([]bool, len(elements)) for _, index := range optionalIndices { if index < 0 || index >= int32(len(elements)) { @@ -525,7 +503,7 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { elems[i] = elemVal } return &evalList{ - id: expr.GetId(), + id: expr.ID(), elems: elems, optionals: optionals, hasOptionals: len(optionals) != 0, @@ -534,31 +512,29 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) { } // planCreateStruct generates a map or object construction Interpretable. -func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) { - str := expr.GetStructExpr() - if len(str.MessageName) != 0 { - return p.planCreateObj(expr) - } - entries := str.GetEntries() +func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { + m := expr.AsMap() + entries := m.Entries() optionals := make([]bool, len(entries)) keys := make([]Interpretable, len(entries)) vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - keyVal, err := p.Plan(entry.GetMapKey()) + for i, e := range entries { + entry := e.AsMapEntry() + keyVal, err := p.Plan(entry.Key()) if err != nil { return nil, err } keys[i] = keyVal - valVal, err := p.Plan(entry.GetValue()) + valVal, err := p.Plan(entry.Value()) if err != nil { return nil, err } vals[i] = valVal - optionals[i] = entry.GetOptionalEntry() + optionals[i] = entry.IsOptional() } return &evalMap{ - id: expr.GetId(), + id: expr.ID(), keys: keys, vals: vals, optionals: optionals, @@ -568,27 +544,28 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) { } // planCreateObj generates an object construction Interpretable. -func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) { - obj := expr.GetStructExpr() - typeName, defined := p.resolveTypeName(obj.GetMessageName()) +func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { + obj := expr.AsStruct() + typeName, defined := p.resolveTypeName(obj.TypeName()) if !defined { - return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName()) - } - entries := obj.GetEntries() - optionals := make([]bool, len(entries)) - fields := make([]string, len(entries)) - vals := make([]Interpretable, len(entries)) - for i, entry := range entries { - fields[i] = entry.GetFieldKey() - val, err := p.Plan(entry.GetValue()) + return nil, fmt.Errorf("unknown type: %s", obj.TypeName()) + } + objFields := obj.Fields() + optionals := make([]bool, len(objFields)) + fields := make([]string, len(objFields)) + vals := make([]Interpretable, len(objFields)) + for i, f := range objFields { + field := f.AsStructField() + fields[i] = field.Name() + val, err := p.Plan(field.Value()) if err != nil { return nil, err } vals[i] = val - optionals[i] = entry.GetOptionalEntry() + optionals[i] = field.IsOptional() } return &evalObj{ - id: expr.GetId(), + id: expr.ID(), typeName: typeName, fields: fields, vals: vals, @@ -599,33 +576,34 @@ func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) { } // planComprehension generates an Interpretable fold operation. -func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) { - fold := expr.GetComprehensionExpr() - accu, err := p.Plan(fold.GetAccuInit()) +func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { + fold := expr.AsComprehension() + accu, err := p.Plan(fold.AccuInit()) if err != nil { return nil, err } - iterRange, err := p.Plan(fold.GetIterRange()) + iterRange, err := p.Plan(fold.IterRange()) if err != nil { return nil, err } - cond, err := p.Plan(fold.GetLoopCondition()) + cond, err := p.Plan(fold.LoopCondition()) if err != nil { return nil, err } - step, err := p.Plan(fold.GetLoopStep()) + step, err := p.Plan(fold.LoopStep()) if err != nil { return nil, err } - result, err := p.Plan(fold.GetResult()) + result, err := p.Plan(fold.Result()) if err != nil { return nil, err } return &evalFold{ - id: expr.GetId(), - accuVar: fold.AccuVar, + id: expr.ID(), + accuVar: fold.AccuVar(), accu: accu, - iterVar: fold.IterVar, + iterVar: fold.IterVar(), + iterVar2: fold.IterVar2(), iterRange: iterRange, cond: cond, step: step, @@ -635,37 +613,8 @@ func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) { } // planConst generates a constant valued Interpretable. -func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) { - val, err := p.constValue(expr.GetConstExpr()) - if err != nil { - return nil, err - } - return NewConstValue(expr.GetId(), val), nil -} - -// constValue converts a proto Constant value to a ref.Val. -func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) { - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return p.adapter.NativeToValue(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return p.adapter.NativeToValue(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return p.adapter.NativeToValue(c.GetDoubleValue()), nil - case *exprpb.Constant_DurationValue: - return p.adapter.NativeToValue(c.GetDurationValue().AsDuration()), nil - case *exprpb.Constant_Int64Value: - return p.adapter.NativeToValue(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return p.adapter.NativeToValue(c.GetNullValue()), nil - case *exprpb.Constant_StringValue: - return p.adapter.NativeToValue(c.GetStringValue()), nil - case *exprpb.Constant_TimestampValue: - return p.adapter.NativeToValue(c.GetTimestampValue().AsTime()), nil - case *exprpb.Constant_Uint64Value: - return p.adapter.NativeToValue(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unknown constant type: %v", c) +func (p *planner) planConst(expr ast.Expr) (Interpretable, error) { + return NewConstValue(expr.ID(), expr.AsLiteral()), nil } // resolveTypeName takes a qualified string constructed at parse time, applies the proto @@ -687,17 +636,20 @@ func (p *planner) resolveTypeName(typeName string) (string, bool) { // - The target expression may only consist of ident and select expressions. // - The function is declared in the environment using its fully-qualified name. // - The fully-qualified function name matches the string serialized target value. -func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, string) { +func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { // Note: similar logic exists within the `checker/checker.go`. If making changes here // please consider the impact on checker.go and consolidate implementations or mirror code // as appropriate. - call := expr.GetCallExpr() - target := call.GetTarget() - fnName := call.GetFunction() + call := expr.AsCall() + var target ast.Expr = nil + if call.IsMemberFunction() { + target = call.Target() + } + fnName := call.FunctionName() // Checked expressions always have a reference map entry, and _should_ have the fully qualified // function name as the fnName value. - oRef, hasOverload := p.refMap[expr.GetId()] + oRef, hasOverload := p.refMap[expr.ID()] if hasOverload { if len(oRef.OverloadIDs) == 1 { return target, fnName, oRef.OverloadIDs[0] @@ -771,16 +723,30 @@ func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (Interpre // toQualifiedName converts an expression AST into a qualified name if possible, with a boolean // 'found' value that indicates if the conversion is successful. -func (p *planner) toQualifiedName(operand *exprpb.Expr) (string, bool) { +func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { // If the checker identified the expression as an attribute by the type-checker, then it can't // possibly be part of qualified name in a namespace. - _, isAttr := p.refMap[operand.GetId()] + _, isAttr := p.refMap[operand.ID()] if isAttr { return "", false } // Since functions cannot be both namespaced and receiver functions, if the operand is not an // qualified variable name, return the (possibly) qualified name given the expressions. - return containers.ToQualifiedName(operand) + switch operand.Kind() { + case ast.IdentKind: + id := operand.AsIdent() + return id, true + case ast.SelectKind: + sel := operand.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := p.toQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false } func stripLeadingDot(name string) string { diff --git a/constraint/vendor/github.com/google/cel-go/interpreter/prune.go b/constraint/vendor/github.com/google/cel-go/interpreter/prune.go index b8834b1cb..410d80dc4 100644 --- a/constraint/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/constraint/vendor/github.com/google/cel-go/interpreter/prune.go @@ -15,19 +15,18 @@ package interpreter import ( + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - structpb "google.golang.org/protobuf/types/known/structpb" ) type astPruner struct { - expr *exprpb.Expr - macroCalls map[int64]*exprpb.Expr + ast.ExprFactory + expr ast.Expr + macroCalls map[int64]ast.Expr state EvalState nextExprID int64 } @@ -67,84 +66,44 @@ type astPruner struct { // compiled and constant folded expressions, but is not willing to constant // fold(and thus cache results of) some external calls, then they can prepare // the overloads accordingly. -func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr { +func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST { pruneState := NewEvalState() for _, id := range state.IDs() { v, _ := state.Value(id) pruneState.SetValue(id, v) } pruner := &astPruner{ - expr: expr, - macroCalls: macroCalls, - state: pruneState, - nextExprID: getMaxID(expr)} + ExprFactory: ast.NewExprFactory(), + expr: expr, + macroCalls: macroCalls, + state: pruneState, + nextExprID: getMaxID(expr)} newExpr, _ := pruner.maybePrune(expr) - return &exprpb.ParsedExpr{ - Expr: newExpr, - SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls}, - } -} - -func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr { - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ConstExpr{ - ConstExpr: val, - }, + newInfo := ast.NewSourceInfo(nil) + for id, call := range pruner.macroCalls { + newInfo.SetMacroCall(id, call) } + return ast.NewAST(newExpr, newInfo) } -func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) { +func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { switch v := val.(type) { - case types.Bool: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true - case types.Bytes: + case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true - case types.Double: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true + return p.NewLiteral(id, val), true case types.Duration: p.state.SetValue(id, val) - durationString := string(v.ConvertToType(types.StringType).(types.String)) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Function: overloads.TypeConvertDuration, - Args: []*exprpb.Expr{ - p.createLiteral(p.nextID(), - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}), - }, - }, - }, - }, true - case types.Int: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true - case types.Uint: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true - case types.String: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true - case types.Null: - p.state.SetValue(id, val) - return p.createLiteral(id, - &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true + durationString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true + case types.Timestamp: + timestampString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true } // Attempt to build a list literal. if list, isList := val.(traits.Lister); isList { sz := list.Size().(types.Int) - elemExprs := make([]*exprpb.Expr, sz) + elemExprs := make([]ast.Expr, sz) for i := types.Int(0); i < sz; i++ { elem := list.Get(i) if types.IsUnknownOrError(elem) { @@ -157,20 +116,13 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo elemExprs[i] = elemExpr } p.state.SetValue(id, val) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: elemExprs, - }, - }, - }, true + return p.NewList(id, elemExprs, []int32{}), true } // Create a map literal if possible. if mp, isMap := val.(traits.Mapper); isMap { it := mp.Iterator() - entries := make([]*exprpb.Expr_CreateStruct_Entry, mp.Size().(types.Int)) + entries := make([]ast.EntryExpr, mp.Size().(types.Int)) i := 0 for it.HasNext() != types.False { key := it.Next() @@ -186,25 +138,12 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo if !ok { return nil, false } - entry := &exprpb.Expr_CreateStruct_Entry{ - Id: p.nextID(), - KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: keyExpr, - }, - Value: valExpr, - } + entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false) entries[i] = entry i++ } p.state.SetValue(id, val) - return &exprpb.Expr{ - Id: id, - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - Entries: entries, - }, - }, - }, true + return p.NewMap(id, entries), true } // TODO(issues/377) To construct message literals, the type provider will need to support @@ -212,215 +151,206 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo return nil, false } -func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) { - elemVal, found := p.value(elem.GetId()) +func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) { + elemVal, found := p.value(elem.ID()) if found && elemVal.Type() == types.OptionalType { opt := elemVal.(*types.Optional) if !opt.HasValue() { return nil, true } - if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned { + if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned { return newElem, true } } return elem, false } -func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) { // elem in list - call := node.GetCallExpr() - val, exists := p.maybeValue(call.GetArgs()[1].GetId()) + call := node.AsCall() + val, exists := p.maybeValue(call.Args()[1].ID()) if !exists { return nil, false } if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } return nil, false } -func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() - arg := call.GetArgs()[0] - val, exists := p.maybeValue(arg.GetId()) +func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + arg := call.Args()[0] + val, exists := p.maybeValue(arg.ID()) if !exists { return nil, false } if b, ok := val.(types.Bool); ok { - return p.maybeCreateLiteral(node.GetId(), !b) + return p.maybeCreateLiteral(node.ID(), !b) } return nil, false } -func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() +func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() // We know result is unknown, so we have at least one unknown arg // and if one side is a known value, we know we can ignore it. - if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { if v == types.True { - return p.maybeCreateLiteral(node.GetId(), types.True) + return p.maybeCreateLiteral(node.ID(), types.True) } - return call.GetArgs()[1], true + return call.Args()[1], true } - if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { if v == types.True { - return p.maybeCreateLiteral(node.GetId(), types.True) + return p.maybeCreateLiteral(node.ID(), types.True) } - return call.GetArgs()[0], true + return call.Args()[0], true } return nil, false } -func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() +func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() // We know result is unknown, so we have at least one unknown arg // and if one side is a known value, we know we can ignore it. - if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { if v == types.False { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } - return call.GetArgs()[1], true + return call.Args()[1], true } - if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists { + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { if v == types.False { - return p.maybeCreateLiteral(node.GetId(), types.False) + return p.maybeCreateLiteral(node.ID(), types.False) } - return call.GetArgs()[0], true + return call.Args()[0], true } return nil, false } -func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) { - call := node.GetCallExpr() - cond, exists := p.maybeValue(call.GetArgs()[0].GetId()) +func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + cond, exists := p.maybeValue(call.Args()[0].ID()) if !exists { return nil, false } if cond.Value().(bool) { - return call.GetArgs()[1], true + return call.Args()[1], true } - return call.GetArgs()[2], true + return call.Args()[2], true } -func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) { - if _, exists := p.value(node.GetId()); !exists { +func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) { + if _, exists := p.value(node.ID()); !exists { return nil, false } - call := node.GetCallExpr() - if call.Function == operators.LogicalOr { + call := node.AsCall() + if call.FunctionName() == operators.LogicalOr { return p.maybePruneOr(node) } - if call.Function == operators.LogicalAnd { + if call.FunctionName() == operators.LogicalAnd { return p.maybePruneAnd(node) } - if call.Function == operators.Conditional { + if call.FunctionName() == operators.Conditional { return p.maybePruneConditional(node) } - if call.Function == operators.In { + if call.FunctionName() == operators.In { return p.maybePruneIn(node) } - if call.Function == operators.LogicalNot { + if call.FunctionName() == operators.LogicalNot { return p.maybePruneLogicalNot(node) } return nil, false } -func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) { return p.prune(node) } -func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { +func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { if node == nil { return node, false } - val, valueExists := p.maybeValue(node.GetId()) + val, valueExists := p.maybeValue(node.ID()) if valueExists { - if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok { - delete(p.macroCalls, node.GetId()) + if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok { + delete(p.macroCalls, node.ID()) return newNode, true } } - if macro, found := p.macroCalls[node.GetId()]; found { + if macro, found := p.macroCalls[node.ID()]; found { // Ensure that intermediate values for the comprehension are cleared during pruning - compre := node.GetComprehensionExpr() - if compre != nil { - visit(macro, clearIterVarVisitor(compre.IterVar, p.state)) + if node.Kind() == ast.ComprehensionKind { + compre := node.AsComprehension() + visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) } // prune the expression in terms of the macro call instead of the expanded form. if newMacro, pruned := p.prune(macro); pruned { - p.macroCalls[node.GetId()] = newMacro + p.macroCalls[node.ID()] = newMacro } } // We have either an unknown/error value, or something we don't want to // transform, or expression was not evaluated. If possible, drill down // more. - switch node.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: - if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{ - Operand: operand, - Field: node.GetSelectExpr().GetField(), - TestOnly: node.GetSelectExpr().GetTestOnly(), - }, - }, - }, true - } - case *exprpb.Expr_CallExpr: - var prunedCall bool - call := node.GetCallExpr() - args := call.GetArgs() - newArgs := make([]*exprpb.Expr, len(args)) - newCall := &exprpb.Expr_Call{ - Function: call.GetFunction(), - Target: call.GetTarget(), - Args: newArgs, - } - for i, arg := range args { - newArgs[i] = arg - if newArg, prunedArg := p.maybePrune(arg); prunedArg { - prunedCall = true - newArgs[i] = newArg + switch node.Kind() { + case ast.SelectKind: + sel := node.AsSelect() + if operand, isPruned := p.maybePrune(sel.Operand()); isPruned { + if sel.IsTestOnly() { + return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true } + return p.NewSelect(node.ID(), operand, sel.FieldName()), true } - if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget { - prunedCall = true - newCall.Target = newTarget + case ast.CallKind: + argsPruned := false + call := node.AsCall() + args := call.Args() + newArgs := make([]ast.Expr, len(args)) + for i, a := range args { + newArgs[i] = a + if arg, isPruned := p.maybePrune(a); isPruned { + argsPruned = true + newArgs[i] = arg + } } - newNode := &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: newCall, - }, + if !call.IsMemberFunction() { + newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, argsPruned } - if newExpr, pruned := p.maybePruneFunction(newNode); pruned { - newExpr, _ = p.maybePrune(newExpr) - return newExpr, true + newTarget := call.Target() + targetPruned := false + if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned { + targetPruned = true + newTarget = prunedTarget } - if prunedCall { - return newNode, true + newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true } - case *exprpb.Expr_ListExpr: - elems := node.GetListExpr().GetElements() - optIndices := node.GetListExpr().GetOptionalIndices() + return newCall, targetPruned || argsPruned + case ast.ListKind: + l := node.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() optIndexMap := map[int32]bool{} for _, i := range optIndices { optIndexMap[i] = true } newOptIndexMap := make(map[int32]bool, len(optIndexMap)) - newElems := make([]*exprpb.Expr, 0, len(elems)) - var prunedList bool - + newElems := make([]ast.Expr, 0, len(elems)) + var listPruned bool prunedIdx := 0 for i, elem := range elems { _, isOpt := optIndexMap[int32(i)] if isOpt { newElem, pruned := p.maybePruneOptional(elem) if pruned { - prunedList = true + listPruned = true if newElem != nil { newElems = append(newElems, newElem) prunedIdx++ @@ -431,7 +361,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { } if newElem, prunedElem := p.maybePrune(elem); prunedElem { newElems = append(newElems, newElem) - prunedList = true + listPruned = true } else { newElems = append(newElems, elem) } @@ -443,76 +373,64 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { optIndices[idx] = i idx++ } - if prunedList { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: newElems, - OptionalIndices: optIndices, - }, - }, - }, true + if listPruned { + return p.NewList(node.ID(), newElems, optIndices), true } - case *exprpb.Expr_StructExpr: - var prunedStruct bool - entries := node.GetStructExpr().GetEntries() - messageType := node.GetStructExpr().GetMessageName() - newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries)) + case ast.MapKind: + var mapPruned bool + m := node.AsMap() + entries := m.Entries() + newEntries := make([]ast.EntryExpr, len(entries)) for i, entry := range entries { newEntries[i] = entry - newKey, prunedKey := p.maybePrune(entry.GetMapKey()) - newValue, prunedValue := p.maybePrune(entry.GetValue()) - if !prunedKey && !prunedValue { + e := entry.AsMapEntry() + newKey, keyPruned := p.maybePrune(e.Key()) + newValue, valuePruned := p.maybePrune(e.Value()) + if !keyPruned && !valuePruned { continue } - prunedStruct = true - newEntry := &exprpb.Expr_CreateStruct_Entry{ - Value: newValue, - } - if messageType != "" { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{ - FieldKey: entry.GetFieldKey(), - } - } else { - newEntry.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: newKey, - } - } - newEntry.OptionalEntry = entry.GetOptionalEntry() + mapPruned = true + newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional()) newEntries[i] = newEntry } - if prunedStruct { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: messageType, - Entries: newEntries, - }, - }, - }, true + if mapPruned { + return p.NewMap(node.ID(), newEntries), true } - case *exprpb.Expr_ComprehensionExpr: - compre := node.GetComprehensionExpr() + case ast.StructKind: + var structPruned bool + obj := node.AsStruct() + fields := obj.Fields() + newFields := make([]ast.EntryExpr, len(fields)) + for i, field := range fields { + newFields[i] = field + f := field.AsStructField() + newValue, prunedValue := p.maybePrune(f.Value()) + if !prunedValue { + continue + } + structPruned = true + newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional()) + newFields[i] = newEntry + } + if structPruned { + return p.NewStruct(node.ID(), obj.TypeName(), newFields), true + } + case ast.ComprehensionKind: + compre := node.AsComprehension() // Only the range of the comprehension is pruned since the state tracking only records // the last iteration of the comprehension and not each step in the evaluation which // means that the any residuals computed in between might be inaccurate. - if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterVar: compre.GetIterVar(), - IterRange: newRange, - AccuVar: compre.GetAccuVar(), - AccuInit: compre.GetAccuInit(), - LoopCondition: compre.GetLoopCondition(), - LoopStep: compre.GetLoopStep(), - Result: compre.GetResult(), - }, - }, - }, true + if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { + return p.NewComprehension( + node.ID(), + newRange, + compre.IterVar(), + compre.AccuVar(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result(), + ), true } } return node, false @@ -539,12 +457,12 @@ func (p *astPruner) nextID() int64 { type astVisitor struct { // visitEntry is called on every expr node, including those within a map/struct entry. - visitExpr func(expr *exprpb.Expr) + visitExpr func(expr ast.Expr) // visitEntry is called before entering the key, value of a map/struct entry. - visitEntry func(entry *exprpb.Expr_CreateStruct_Entry) + visitEntry func(entry ast.EntryExpr) } -func getMaxID(expr *exprpb.Expr) int64 { +func getMaxID(expr ast.Expr) int64 { maxID := int64(1) visit(expr, maxIDVisitor(&maxID)) return maxID @@ -552,10 +470,9 @@ func getMaxID(expr *exprpb.Expr) int64 { func clearIterVarVisitor(varName string, state EvalState) astVisitor { return astVisitor{ - visitExpr: func(e *exprpb.Expr) { - ident := e.GetIdentExpr() - if ident != nil && ident.GetName() == varName { - state.SetValue(e.GetId(), nil) + visitExpr: func(e ast.Expr) { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + state.SetValue(e.ID(), nil) } }, } @@ -563,56 +480,63 @@ func clearIterVarVisitor(varName string, state EvalState) astVisitor { func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ - visitExpr: func(e *exprpb.Expr) { - if e.GetId() >= *maxID { - *maxID = e.GetId() + 1 + visitExpr: func(e ast.Expr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 } }, - visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) { - if e.GetId() >= *maxID { - *maxID = e.GetId() + 1 + visitEntry: func(e ast.EntryExpr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 } }, } } -func visit(expr *exprpb.Expr, visitor astVisitor) { - exprs := []*exprpb.Expr{expr} +func visit(expr ast.Expr, visitor astVisitor) { + exprs := []ast.Expr{expr} for len(exprs) != 0 { e := exprs[0] if visitor.visitExpr != nil { visitor.visitExpr(e) } exprs = exprs[1:] - switch e.GetExprKind().(type) { - case *exprpb.Expr_SelectExpr: - exprs = append(exprs, e.GetSelectExpr().GetOperand()) - case *exprpb.Expr_CallExpr: - call := e.GetCallExpr() - if call.GetTarget() != nil { - exprs = append(exprs, call.GetTarget()) + switch e.Kind() { + case ast.SelectKind: + exprs = append(exprs, e.AsSelect().Operand()) + case ast.CallKind: + call := e.AsCall() + if call.Target() != nil { + exprs = append(exprs, call.Target()) } - exprs = append(exprs, call.GetArgs()...) - case *exprpb.Expr_ComprehensionExpr: - compre := e.GetComprehensionExpr() + exprs = append(exprs, call.Args()...) + case ast.ComprehensionKind: + compre := e.AsComprehension() exprs = append(exprs, - compre.GetIterRange(), - compre.GetAccuInit(), - compre.GetLoopCondition(), - compre.GetLoopStep(), - compre.GetResult()) - case *exprpb.Expr_ListExpr: - list := e.GetListExpr() - exprs = append(exprs, list.GetElements()...) - case *exprpb.Expr_StructExpr: - for _, entry := range e.GetStructExpr().GetEntries() { + compre.IterRange(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result()) + case ast.ListKind: + list := e.AsList() + exprs = append(exprs, list.Elements()...) + case ast.MapKind: + for _, entry := range e.AsMap().Entries() { + e := entry.AsMapEntry() if visitor.visitEntry != nil { visitor.visitEntry(entry) } - if entry.GetMapKey() != nil { - exprs = append(exprs, entry.GetMapKey()) + exprs = append(exprs, e.Key()) + exprs = append(exprs, e.Value()) + } + case ast.StructKind: + for _, entry := range e.AsStruct().Fields() { + f := entry.AsStructField() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) } - exprs = append(exprs, entry.GetValue()) + exprs = append(exprs, f.Value()) } } } diff --git a/constraint/vendor/github.com/google/cel-go/parser/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/parser/BUILD.bazel index 67ecc9554..97bc9bd43 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/parser/BUILD.bazel @@ -20,10 +20,13 @@ go_library( visibility = ["//visibility:public"], deps = [ "//common:go_default_library", + "//common/ast:go_default_library", "//common/operators:go_default_library", "//common/runes:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", "//parser/gen:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", @@ -43,10 +46,12 @@ go_test( ":go_default_library", ], deps = [ + "//common/ast:go_default_library", "//common/debug:go_default_library", + "//common/types:go_default_library", "//parser/gen:go_default_library", "//test:go_default_library", - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//testing/protocmp:go_default_library", ], diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/constraint/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel index 654d1de7a..3efed87b7 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -1,7 +1,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") package( - default_visibility = ["//parser:__subpackages__"], + default_visibility = ["//:__subpackages__"], licenses = ["notice"], # Apache 2.0 ) @@ -21,6 +21,6 @@ go_library( ], importpath = "github.com/google/cel-go/parser/gen", deps = [ - "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", ], ) diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go index 0247f470a..c49d03867 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -1,7 +1,7 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" // BaseCELListener is a complete listener for a parse tree produced by CELParser. type BaseCELListener struct{} diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go index 52a7f4dc5..b2c0783d3 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + type BaseCELVisitor struct { *antlr.BaseParseTreeVisitor diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go index 98ddc06d0..e026cc46f 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -1,280 +1,278 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen - import ( "fmt" - "sync" + "sync" "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/antlr4-go/antlr/v4" ) - // Suppress unused import error var _ = fmt.Printf var _ = sync.Once{} var _ = unicode.IsLetter + type CELLexer struct { *antlr.BaseLexer channelNames []string - modeNames []string + modeNames []string // TODO: EOF string } -var cellexerLexerStaticData struct { - once sync.Once - serializedATN []int32 - channelNames []string - modeNames []string - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA +var CELLexerLexerStaticData struct { + once sync.Once + serializedATN []int32 + ChannelNames []string + ModeNames []string + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA } func cellexerLexerInit() { - staticData := &cellexerLexerStaticData - staticData.channelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", - } - staticData.modeNames = []string{ - "DEFAULT_MODE", - } - staticData.literalNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.symbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.ruleNames = []string{ - "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", - "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", - "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", - "BYTES", "IDENTIFIER", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, - 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, - 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, - 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, - 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, - 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, - 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, - 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, - 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, - 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, - 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, - 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, - 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, - 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, - 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, - 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, - 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, - 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, - 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, - 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, - 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, - 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, - 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, - 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, - 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, - 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, - 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, - 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, - 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, - 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, - 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, - 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, - 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, - 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, - 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, - 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, - 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, - 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, - 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, - 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, - 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, - 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, - 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, - 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, - 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, - 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, - 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, - 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, - 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, - 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, - 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, - 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, - 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, - 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, - 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, - 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, - 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, - 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, - 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, - 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, - 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, - 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, - 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, - 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, - 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, - 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, - 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, - 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, - 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, - 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, - 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, - 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, - 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, - 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, - 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, - 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, - 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, - 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, - 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, - 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, - 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, - 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, - 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, - 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, - 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, - 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, - 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, - 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, - 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, - 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, - 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, - 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, - 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, - 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, - 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, - 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, - 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, - 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, - 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, - 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, - 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, - 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, - 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, - 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, - 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, - 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, - 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, - 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, - 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, - 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, - 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, - 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, - 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, - 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, - 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, - 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, - 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, - 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, - 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, - 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, - 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, - 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, - 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, - 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, - 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, - 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, - 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, - 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, - 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, - 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, - 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, - 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, - 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, - 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, - 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, - 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, - 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, - 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, - 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, - 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, - 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, - 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, - 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, - 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, - 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, - 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, - 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, - 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, - 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, - 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, - 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, - 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, - 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, - 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, - 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, - 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, - 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, - 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, - 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, - 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, - 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, - 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, - 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, - 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, - 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, - 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, - 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, - 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, - 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, - 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, - 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, - 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, - 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, - 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, - 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, - 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, - 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, - 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, - 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, - 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, - 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, - 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, - 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, - 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, - 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, - 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, - 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, - 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, - 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, - 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, - 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, - 413, 418, 420, 1, 0, 1, 0, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } + staticData := &CELLexerLexerStaticData + staticData.ChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.ModeNames = []string{ + "DEFAULT_MODE", + } + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", + "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", + "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", + "BYTES", "IDENTIFIER", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, + 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, + 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, + 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, + 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, + 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, + 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, + 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, + 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, + 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, + 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, + 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, + 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, + 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, + 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, + 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, + 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, + 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, + 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, + 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, + 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, + 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, + 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, + 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, + 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, + 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, + 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, + 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, + 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, + 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, + 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, + 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, + 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, + 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, + 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, + 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, + 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, + 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, + 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, + 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, + 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, + 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, + 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, + 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, + 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, + 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, + 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, + 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, + 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, + 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, + 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, + 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, + 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, + 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, + 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, + 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, + 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, + 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, + 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, + 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, + 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, + 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, + 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, + 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, + 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, + 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, + 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, + 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, + 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, + 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, + 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, + 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, + 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, + 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, + 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, + 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, + 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, + 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, + 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, + 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, + 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, + 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, + 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, + 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, + 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, + 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, + 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, + 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, + 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, + 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, + 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, + 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, + 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, + 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, + 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, + 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, + 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, + 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, + 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, + 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, + 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, + 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, + 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, + 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, + 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, + 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, + 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, + 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, + 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, + 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, + 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, + 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, + 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, + 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, + 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, + 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, + 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, + 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, + 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, + 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, + 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, + 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, + 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, + 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, + 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, + 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, + 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, + 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, + 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, + 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, + 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, + 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, + 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, + 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, + 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, + 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, + 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, + 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, + 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, + 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, + 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, + 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, + 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, + 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, + 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, + 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, + 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, + 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, + 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, + 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, + 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, + 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, + 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, + 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, + 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, + 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, + 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, + 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, + 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, + 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, + 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, + 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, + 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, + 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, + 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, + 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, + 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, + 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, + 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, + 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, + 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, + 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, + 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, + 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, + 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, + 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, + 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, + 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, + 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, + 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, + 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, + 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, + 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, + 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, + 413, 418, 420, 1, 0, 1, 0, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } } // CELLexerInit initializes any static state used to implement CELLexer. By default the @@ -282,22 +280,22 @@ func cellexerLexerInit() { // NewCELLexer(). You can call this function if you wish to initialize the static state ahead // of time. func CELLexerInit() { - staticData := &cellexerLexerStaticData - staticData.once.Do(cellexerLexerInit) + staticData := &CELLexerLexerStaticData + staticData.once.Do(cellexerLexerInit) } // NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. func NewCELLexer(input antlr.CharStream) *CELLexer { - CELLexerInit() + CELLexerInit() l := new(CELLexer) l.BaseLexer = antlr.NewBaseLexer(input) - staticData := &cellexerLexerStaticData - l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - l.channelNames = staticData.channelNames - l.modeNames = staticData.modeNames - l.RuleNames = staticData.ruleNames - l.LiteralNames = staticData.literalNames - l.SymbolicNames = staticData.symbolicNames + staticData := &CELLexerLexerStaticData + l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + l.channelNames = staticData.ChannelNames + l.modeNames = staticData.ModeNames + l.RuleNames = staticData.RuleNames + l.LiteralNames = staticData.LiteralNames + l.SymbolicNames = staticData.SymbolicNames l.GrammarFileName = "CEL.g4" // TODO: l.EOF = antlr.TokenEOF @@ -306,40 +304,41 @@ func NewCELLexer(input antlr.CharStream) *CELLexer { // CELLexer tokens. const ( - CELLexerEQUALS = 1 - CELLexerNOT_EQUALS = 2 - CELLexerIN = 3 - CELLexerLESS = 4 - CELLexerLESS_EQUALS = 5 + CELLexerEQUALS = 1 + CELLexerNOT_EQUALS = 2 + CELLexerIN = 3 + CELLexerLESS = 4 + CELLexerLESS_EQUALS = 5 CELLexerGREATER_EQUALS = 6 - CELLexerGREATER = 7 - CELLexerLOGICAL_AND = 8 - CELLexerLOGICAL_OR = 9 - CELLexerLBRACKET = 10 - CELLexerRPRACKET = 11 - CELLexerLBRACE = 12 - CELLexerRBRACE = 13 - CELLexerLPAREN = 14 - CELLexerRPAREN = 15 - CELLexerDOT = 16 - CELLexerCOMMA = 17 - CELLexerMINUS = 18 - CELLexerEXCLAM = 19 - CELLexerQUESTIONMARK = 20 - CELLexerCOLON = 21 - CELLexerPLUS = 22 - CELLexerSTAR = 23 - CELLexerSLASH = 24 - CELLexerPERCENT = 25 - CELLexerCEL_TRUE = 26 - CELLexerCEL_FALSE = 27 - CELLexerNUL = 28 - CELLexerWHITESPACE = 29 - CELLexerCOMMENT = 30 - CELLexerNUM_FLOAT = 31 - CELLexerNUM_INT = 32 - CELLexerNUM_UINT = 33 - CELLexerSTRING = 34 - CELLexerBYTES = 35 - CELLexerIDENTIFIER = 36 + CELLexerGREATER = 7 + CELLexerLOGICAL_AND = 8 + CELLexerLOGICAL_OR = 9 + CELLexerLBRACKET = 10 + CELLexerRPRACKET = 11 + CELLexerLBRACE = 12 + CELLexerRBRACE = 13 + CELLexerLPAREN = 14 + CELLexerRPAREN = 15 + CELLexerDOT = 16 + CELLexerCOMMA = 17 + CELLexerMINUS = 18 + CELLexerEXCLAM = 19 + CELLexerQUESTIONMARK = 20 + CELLexerCOLON = 21 + CELLexerPLUS = 22 + CELLexerSTAR = 23 + CELLexerSLASH = 24 + CELLexerPERCENT = 25 + CELLexerCEL_TRUE = 26 + CELLexerCEL_FALSE = 27 + CELLexerNUL = 28 + CELLexerWHITESPACE = 29 + CELLexerCOMMENT = 30 + CELLexerNUM_FLOAT = 31 + CELLexerNUM_INT = 32 + CELLexerNUM_UINT = 33 + CELLexerSTRING = 34 + CELLexerBYTES = 35 + CELLexerIDENTIFIER = 36 ) + diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_listener.go index 73b7f1d39..22dc99789 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_listener.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + // CELListener is a complete listener for a parse tree produced by CELParser. type CELListener interface { diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_parser.go index 0cb6c8eae..35334af61 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_parser.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -1,12 +1,12 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL import ( "fmt" "strconv" - "sync" + "sync" - "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/antlr4-go/antlr/v4" ) // Suppress unused import errors @@ -14,166 +14,167 @@ var _ = fmt.Printf var _ = strconv.Itoa var _ = sync.Once{} + type CELParser struct { *antlr.BaseParser } -var celParserStaticData struct { - once sync.Once - serializedATN []int32 - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA +var CELParserStaticData struct { + once sync.Once + serializedATN []int32 + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA } func celParserInit() { - staticData := &celParserStaticData - staticData.literalNames = []string{ - "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", - "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", - "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", - } - staticData.symbolicNames = []string{ - "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", - "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", - "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", - "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", - "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", - "STRING", "BYTES", "IDENTIFIER", - } - staticData.ruleNames = []string{ - "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", - "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", - "optField", "mapInitializerList", "optExpr", "literal", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, - 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, - 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, - 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, - 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, - 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, - 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, - 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, - 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, - 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, - 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, - 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, - 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, - 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, - 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, - 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, - 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, - 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, - 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, - 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, - 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, - 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, - 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, - 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, - 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, - 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, - 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, - 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, - 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, - 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, - 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, - 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, - 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, - 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, - 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, - 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, - 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, - 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, - 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, - 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, - 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, - 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, - 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, - 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, - 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, - 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, - 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, - 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, - 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, - 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, - 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, - 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, - 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, - 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, - 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, - 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, - 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, - 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, - 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, - 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, - 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, - 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, - 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, - 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, - 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, - 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, - 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, - 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, - 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, - 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, - 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, - 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, - 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, - 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, - 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, - 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, - 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, - 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, - 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, - 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, - 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, - 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, - 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, - 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, - 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, - 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, - 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, - 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, - 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, - 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, - 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, - 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, - 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, - 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, - 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, - 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, - 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, - 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, - 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, - 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, - 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, - 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, - 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, - 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, - 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, - 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, - 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, - 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, - 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, - 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, - 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, - 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, - 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, - 240, 248, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } + staticData := &CELParserStaticData + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", + "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", + "optField", "mapInitializerList", "optExpr", "literal", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, + 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, + 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, + 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, + 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, + 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, + 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, + 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, + 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, + 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, + 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, + 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, + 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, + 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, + 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, + 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, + 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, + 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, + 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, + 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, + 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, + 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, + 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, + 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, + 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, + 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, + 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, + 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, + 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, + 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, + 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, + 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, + 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, + 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, + 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, + 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, + 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, + 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, + 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, + 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, + 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, + 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, + 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, + 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, + 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, + 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, + 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, + 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, + 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, + 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, + 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, + 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, + 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, + 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, + 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, + 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, + 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, + 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, + 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, + 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, + 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, + 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, + 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, + 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, + 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, + 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, + 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, + 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, + 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, + 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, + 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, + 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, + 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, + 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, + 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, + 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, + 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, + 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, + 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, + 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, + 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, + 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, + 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, + 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, + 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, + 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, + 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, + 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, + 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, + 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, + 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, + 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, + 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, + 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, + 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, + 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, + 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, + 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, + 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, + 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, + 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, + 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, + 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, + 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, + 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, + 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, + 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, + 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, + 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, + 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, + 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, + 240, 248, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } } // CELParserInit initializes any static state used to implement CELParser. By default the @@ -181,8 +182,8 @@ func celParserInit() { // NewCELParser(). You can call this function if you wish to initialize the static state ahead // of time. func CELParserInit() { - staticData := &celParserStaticData - staticData.once.Do(celParserInit) + staticData := &CELParserStaticData + staticData.once.Do(celParserInit) } // NewCELParser produces a new parser instance for the optional input antlr.TokenStream. @@ -190,75 +191,76 @@ func NewCELParser(input antlr.TokenStream) *CELParser { CELParserInit() this := new(CELParser) this.BaseParser = antlr.NewBaseParser(input) - staticData := &celParserStaticData - this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - this.RuleNames = staticData.ruleNames - this.LiteralNames = staticData.literalNames - this.SymbolicNames = staticData.symbolicNames + staticData := &CELParserStaticData + this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + this.RuleNames = staticData.RuleNames + this.LiteralNames = staticData.LiteralNames + this.SymbolicNames = staticData.SymbolicNames this.GrammarFileName = "CEL.g4" return this } + // CELParser tokens. const ( - CELParserEOF = antlr.TokenEOF - CELParserEQUALS = 1 - CELParserNOT_EQUALS = 2 - CELParserIN = 3 - CELParserLESS = 4 - CELParserLESS_EQUALS = 5 + CELParserEOF = antlr.TokenEOF + CELParserEQUALS = 1 + CELParserNOT_EQUALS = 2 + CELParserIN = 3 + CELParserLESS = 4 + CELParserLESS_EQUALS = 5 CELParserGREATER_EQUALS = 6 - CELParserGREATER = 7 - CELParserLOGICAL_AND = 8 - CELParserLOGICAL_OR = 9 - CELParserLBRACKET = 10 - CELParserRPRACKET = 11 - CELParserLBRACE = 12 - CELParserRBRACE = 13 - CELParserLPAREN = 14 - CELParserRPAREN = 15 - CELParserDOT = 16 - CELParserCOMMA = 17 - CELParserMINUS = 18 - CELParserEXCLAM = 19 - CELParserQUESTIONMARK = 20 - CELParserCOLON = 21 - CELParserPLUS = 22 - CELParserSTAR = 23 - CELParserSLASH = 24 - CELParserPERCENT = 25 - CELParserCEL_TRUE = 26 - CELParserCEL_FALSE = 27 - CELParserNUL = 28 - CELParserWHITESPACE = 29 - CELParserCOMMENT = 30 - CELParserNUM_FLOAT = 31 - CELParserNUM_INT = 32 - CELParserNUM_UINT = 33 - CELParserSTRING = 34 - CELParserBYTES = 35 - CELParserIDENTIFIER = 36 + CELParserGREATER = 7 + CELParserLOGICAL_AND = 8 + CELParserLOGICAL_OR = 9 + CELParserLBRACKET = 10 + CELParserRPRACKET = 11 + CELParserLBRACE = 12 + CELParserRBRACE = 13 + CELParserLPAREN = 14 + CELParserRPAREN = 15 + CELParserDOT = 16 + CELParserCOMMA = 17 + CELParserMINUS = 18 + CELParserEXCLAM = 19 + CELParserQUESTIONMARK = 20 + CELParserCOLON = 21 + CELParserPLUS = 22 + CELParserSTAR = 23 + CELParserSLASH = 24 + CELParserPERCENT = 25 + CELParserCEL_TRUE = 26 + CELParserCEL_FALSE = 27 + CELParserNUL = 28 + CELParserWHITESPACE = 29 + CELParserCOMMENT = 30 + CELParserNUM_FLOAT = 31 + CELParserNUM_INT = 32 + CELParserNUM_UINT = 33 + CELParserSTRING = 34 + CELParserBYTES = 35 + CELParserIDENTIFIER = 36 ) // CELParser rules. const ( - CELParserRULE_start = 0 - CELParserRULE_expr = 1 - CELParserRULE_conditionalOr = 2 - CELParserRULE_conditionalAnd = 3 - CELParserRULE_relation = 4 - CELParserRULE_calc = 5 - CELParserRULE_unary = 6 - CELParserRULE_member = 7 - CELParserRULE_primary = 8 - CELParserRULE_exprList = 9 - CELParserRULE_listInit = 10 + CELParserRULE_start = 0 + CELParserRULE_expr = 1 + CELParserRULE_conditionalOr = 2 + CELParserRULE_conditionalAnd = 3 + CELParserRULE_relation = 4 + CELParserRULE_calc = 5 + CELParserRULE_unary = 6 + CELParserRULE_member = 7 + CELParserRULE_primary = 8 + CELParserRULE_exprList = 9 + CELParserRULE_listInit = 10 CELParserRULE_fieldInitializerList = 11 - CELParserRULE_optField = 12 - CELParserRULE_mapInitializerList = 13 - CELParserRULE_optExpr = 14 - CELParserRULE_literal = 15 + CELParserRULE_optField = 12 + CELParserRULE_mapInitializerList = 13 + CELParserRULE_optExpr = 14 + CELParserRULE_literal = 15 ) // IStartContext is an interface to support dynamic dispatch. @@ -271,9 +273,11 @@ type IStartContext interface { // GetE returns the e rule contexts. GetE() IExprContext + // SetE sets the e rule contexts. SetE(IExprContext) + // Getter signatures EOF() antlr.TerminalNode Expr() IExprContext @@ -283,24 +287,29 @@ type IStartContext interface { } type StartContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - e IExprContext + e IExprContext } func NewEmptyStartContext() *StartContext { var p = new(StartContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_start return p } +func InitEmptyStartContext(p *StartContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start +} + func (*StartContext) IsStartContext() {} func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext { var p = new(StartContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_start @@ -312,17 +321,19 @@ func (s *StartContext) GetParser() antlr.Parser { return s.parser } func (s *StartContext) GetE() IExprContext { return s.e } + func (s *StartContext) SetE(v IExprContext) { s.e = v } + func (s *StartContext) EOF() antlr.TerminalNode { return s.GetToken(CELParserEOF, 0) } func (s *StartContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -342,6 +353,7 @@ func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterStart(s) @@ -364,45 +376,46 @@ func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Start() (localctx IStartContext) { - this := p - _ = this - localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 0, CELParserRULE_start) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) Start_() (localctx IStartContext) { + localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, CELParserRULE_start) p.EnterOuterAlt(localctx, 1) { p.SetState(32) var _x = p.Expr() + localctx.(*StartContext).e = _x } { p.SetState(33) p.Match(CELParserEOF) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IExprContext is an interface to support dynamic dispatch. type IExprContext interface { antlr.ParserRuleContext @@ -411,10 +424,12 @@ type IExprContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // GetE returns the e rule contexts. GetE() IConditionalOrContext @@ -425,6 +440,7 @@ type IExprContext interface { // GetE2 returns the e2 rule contexts. GetE2() IExprContext + // SetE sets the e rule contexts. SetE(IConditionalOrContext) @@ -434,6 +450,7 @@ type IExprContext interface { // SetE2 sets the e2 rule contexts. SetE2(IExprContext) + // Getter signatures AllConditionalOr() []IConditionalOrContext ConditionalOr(i int) IConditionalOrContext @@ -446,27 +463,32 @@ type IExprContext interface { } type ExprContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - e IConditionalOrContext - op antlr.Token - e1 IConditionalOrContext - e2 IExprContext + e IConditionalOrContext + op antlr.Token + e1 IConditionalOrContext + e2 IExprContext } func NewEmptyExprContext() *ExprContext { var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_expr return p } +func InitEmptyExprContext(p *ExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr +} + func (*ExprContext) IsExprContext() {} func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_expr @@ -478,20 +500,24 @@ func (s *ExprContext) GetParser() antlr.Parser { return s.parser } func (s *ExprContext) GetOp() antlr.Token { return s.op } + func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + func (s *ExprContext) GetE() IConditionalOrContext { return s.e } func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } func (s *ExprContext) GetE2() IExprContext { return s.e2 } + func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } + func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { children := s.GetChildren() len := 0 @@ -514,12 +540,12 @@ func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { } func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalOrContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -542,10 +568,10 @@ func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { } func (s *ExprContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -565,6 +591,7 @@ func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExpr(s) @@ -587,42 +614,31 @@ func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Expr() (localctx IExprContext) { - this := p - _ = this + + +func (p *CELParser) Expr() (localctx IExprContext) { localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 2, CELParserRULE_expr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(35) var _x = p.ConditionalOr() + localctx.(*ExprContext).e = _x } p.SetState(41) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(36) @@ -630,31 +646,54 @@ func (p *CELParser) Expr() (localctx IExprContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*ExprContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(37) var _x = p.ConditionalOr() + localctx.(*ExprContext).e1 = _x } { p.SetState(38) p.Match(CELParserCOLON) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(39) var _x = p.Expr() + localctx.(*ExprContext).e2 = _x } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IConditionalOrContext is an interface to support dynamic dispatch. type IConditionalOrContext interface { antlr.ParserRuleContext @@ -663,34 +702,42 @@ type IConditionalOrContext interface { GetParser() antlr.Parser // GetS9 returns the s9 token. - GetS9() antlr.Token + GetS9() antlr.Token + // SetS9 sets the s9 token. - SetS9(antlr.Token) + SetS9(antlr.Token) + // GetOps returns the ops token list. GetOps() []antlr.Token + // SetOps sets the ops token list. SetOps([]antlr.Token) + // GetE returns the e rule contexts. GetE() IConditionalAndContext // Get_conditionalAnd returns the _conditionalAnd rule contexts. Get_conditionalAnd() IConditionalAndContext + // SetE sets the e rule contexts. SetE(IConditionalAndContext) // Set_conditionalAnd sets the _conditionalAnd rule contexts. Set_conditionalAnd(IConditionalAndContext) + // GetE1 returns the e1 rule context list. GetE1() []IConditionalAndContext + // SetE1 sets the e1 rule context list. - SetE1([]IConditionalAndContext) + SetE1([]IConditionalAndContext) + // Getter signatures AllConditionalAnd() []IConditionalAndContext @@ -703,28 +750,33 @@ type IConditionalOrContext interface { } type ConditionalOrContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IConditionalAndContext - s9 antlr.Token - ops []antlr.Token - _conditionalAnd IConditionalAndContext - e1 []IConditionalAndContext + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalAndContext + s9 antlr.Token + ops []antlr.Token + _conditionalAnd IConditionalAndContext + e1 []IConditionalAndContext } func NewEmptyConditionalOrContext() *ConditionalOrContext { var p = new(ConditionalOrContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalOr return p } +func InitEmptyConditionalOrContext(p *ConditionalOrContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr +} + func (*ConditionalOrContext) IsConditionalOrContext() {} func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext { var p = new(ConditionalOrContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_conditionalOr @@ -736,24 +788,32 @@ func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } + func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } + func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } + func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { children := s.GetChildren() len := 0 @@ -776,12 +836,12 @@ func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { } func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IConditionalAndContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -811,6 +871,7 @@ func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Reco return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalOr(s) @@ -833,42 +894,31 @@ func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } -func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { - this := p - _ = this + + +func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(43) var _x = p.ConditionalAnd() + localctx.(*ConditionalOrContext).e = _x } p.SetState(48) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserLOGICAL_OR { { p.SetState(44) @@ -876,6 +926,10 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { var _m = p.Match(CELParserLOGICAL_OR) localctx.(*ConditionalOrContext).s9 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) { @@ -883,18 +937,36 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { var _x = p.ConditionalAnd() + localctx.(*ConditionalOrContext)._conditionalAnd = _x } localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) + p.SetState(50) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IConditionalAndContext is an interface to support dynamic dispatch. type IConditionalAndContext interface { antlr.ParserRuleContext @@ -903,34 +975,42 @@ type IConditionalAndContext interface { GetParser() antlr.Parser // GetS8 returns the s8 token. - GetS8() antlr.Token + GetS8() antlr.Token + // SetS8 sets the s8 token. - SetS8(antlr.Token) + SetS8(antlr.Token) + // GetOps returns the ops token list. GetOps() []antlr.Token + // SetOps sets the ops token list. SetOps([]antlr.Token) + // GetE returns the e rule contexts. GetE() IRelationContext // Get_relation returns the _relation rule contexts. Get_relation() IRelationContext + // SetE sets the e rule contexts. SetE(IRelationContext) // Set_relation sets the _relation rule contexts. Set_relation(IRelationContext) + // GetE1 returns the e1 rule context list. GetE1() []IRelationContext + // SetE1 sets the e1 rule context list. - SetE1([]IRelationContext) + SetE1([]IRelationContext) + // Getter signatures AllRelation() []IRelationContext @@ -943,28 +1023,33 @@ type IConditionalAndContext interface { } type ConditionalAndContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - e IRelationContext - s8 antlr.Token - ops []antlr.Token - _relation IRelationContext - e1 []IRelationContext + antlr.BaseParserRuleContext + parser antlr.Parser + e IRelationContext + s8 antlr.Token + ops []antlr.Token + _relation IRelationContext + e1 []IRelationContext } func NewEmptyConditionalAndContext() *ConditionalAndContext { var p = new(ConditionalAndContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_conditionalAnd return p } +func InitEmptyConditionalAndContext(p *ConditionalAndContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd +} + func (*ConditionalAndContext) IsConditionalAndContext() {} func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext { var p = new(ConditionalAndContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_conditionalAnd @@ -976,24 +1061,32 @@ func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } + func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } + func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } + func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } + func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } + func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + func (s *ConditionalAndContext) AllRelation() []IRelationContext { children := s.GetChildren() len := 0 @@ -1016,12 +1109,12 @@ func (s *ConditionalAndContext) AllRelation() []IRelationContext { } func (s *ConditionalAndContext) Relation(i int) IRelationContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1051,6 +1144,7 @@ func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Rec return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConditionalAnd(s) @@ -1073,30 +1167,14 @@ func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface } } -func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { - this := p - _ = this + + +func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(51) @@ -1107,8 +1185,12 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } p.SetState(56) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserLOGICAL_AND { { p.SetState(52) @@ -1116,6 +1198,10 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { var _m = p.Match(CELParserLOGICAL_AND) localctx.(*ConditionalAndContext).s8 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) { @@ -1127,14 +1213,31 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { } localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) + p.SetState(58) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IRelationContext is an interface to support dynamic dispatch. type IRelationContext interface { antlr.ParserRuleContext @@ -1143,10 +1246,12 @@ type IRelationContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // Getter signatures Calc() ICalcContext @@ -1165,24 +1270,29 @@ type IRelationContext interface { } type RelationContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - op antlr.Token + op antlr.Token } func NewEmptyRelationContext() *RelationContext { var p = new(RelationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_relation return p } +func InitEmptyRelationContext(p *RelationContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation +} + func (*RelationContext) IsRelationContext() {} func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext { var p = new(RelationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_relation @@ -1194,13 +1304,15 @@ func (s *RelationContext) GetParser() antlr.Parser { return s.parser } func (s *RelationContext) GetOp() antlr.Token { return s.op } + func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } + func (s *RelationContext) Calc() ICalcContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1234,12 +1346,12 @@ func (s *RelationContext) AllRelation() []IRelationContext { } func (s *RelationContext) Relation(i int) IRelationContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IRelationContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1289,6 +1401,7 @@ func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterRelation(s) @@ -1311,15 +1424,17 @@ func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + + + func (p *CELParser) Relation() (localctx IRelationContext) { return p.relation(0) } func (p *CELParser) relation(_p int) (localctx IRelationContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState) var _prevctx IRelationContext = localctx @@ -1328,22 +1443,6 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -1355,8 +1454,13 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(67) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -1368,7 +1472,8 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.SetState(62) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(63) @@ -1379,7 +1484,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { _la = p.GetTokenStream().LA(1) - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&254) != 0) { + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*RelationContext).op = _ri @@ -1393,15 +1498,35 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) { p.relation(2) } + } p.SetState(69) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // ICalcContext is an interface to support dynamic dispatch. type ICalcContext interface { antlr.ParserRuleContext @@ -1410,10 +1535,12 @@ type ICalcContext interface { GetParser() antlr.Parser // GetOp returns the op token. - GetOp() antlr.Token + GetOp() antlr.Token + // SetOp sets the op token. - SetOp(antlr.Token) + SetOp(antlr.Token) + // Getter signatures Unary() IUnaryContext @@ -1430,24 +1557,29 @@ type ICalcContext interface { } type CalcContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - op antlr.Token + op antlr.Token } func NewEmptyCalcContext() *CalcContext { var p = new(CalcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_calc return p } +func InitEmptyCalcContext(p *CalcContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc +} + func (*CalcContext) IsCalcContext() {} func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext { var p = new(CalcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_calc @@ -1459,13 +1591,15 @@ func (s *CalcContext) GetParser() antlr.Parser { return s.parser } func (s *CalcContext) GetOp() antlr.Token { return s.op } + func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } + func (s *CalcContext) Unary() IUnaryContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IUnaryContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1499,12 +1633,12 @@ func (s *CalcContext) AllCalc() []ICalcContext { } func (s *CalcContext) Calc(i int) ICalcContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(ICalcContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -1546,6 +1680,7 @@ func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) s return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCalc(s) @@ -1568,15 +1703,17 @@ func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + + + func (p *CELParser) Calc() (localctx ICalcContext) { return p.calc(0) } func (p *CELParser) calc(_p int) (localctx ICalcContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState) var _prevctx ICalcContext = localctx @@ -1585,22 +1722,6 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -1612,8 +1733,13 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(81) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -1622,14 +1748,19 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _prevctx = localctx p.SetState(79) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) { case 1: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(73) if !(p.Precpred(p.GetParserRuleContext(), 2)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit } { p.SetState(74) @@ -1640,7 +1771,7 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { _la = p.GetTokenStream().LA(1) - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&58720256) != 0) { + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { var _ri = p.GetErrorHandler().RecoverInline(p) localctx.(*CalcContext).op = _ri @@ -1654,13 +1785,15 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.calc(3) } + case 2: localctx = NewCalcContext(p, _parentctx, _parentState) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) p.SetState(76) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(77) @@ -1685,17 +1818,38 @@ func (p *CELParser) calc(_p int) (localctx ICalcContext) { p.calc(2) } + case antlr.ATNInvalidAltNumber: + goto errorExit } } p.SetState(83) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IUnaryContext is an interface to support dynamic dispatch. type IUnaryContext interface { antlr.ParserRuleContext @@ -1707,23 +1861,28 @@ type IUnaryContext interface { } type UnaryContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyUnaryContext() *UnaryContext { var p = new(UnaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_unary return p } +func InitEmptyUnaryContext(p *UnaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary +} + func (*UnaryContext) IsUnaryContext() {} func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext { var p = new(UnaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_unary @@ -1733,8 +1892,8 @@ func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invoki func (s *UnaryContext) GetParser() antlr.Parser { return s.parser } -func (s *UnaryContext) CopyFrom(ctx *UnaryContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *UnaryContext) CopyAll(ctx *UnaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *UnaryContext) GetRuleContext() antlr.RuleContext { @@ -1745,8 +1904,11 @@ func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + + + type LogicalNotContext struct { - *UnaryContext + UnaryContext s19 antlr.Token ops []antlr.Token } @@ -1754,19 +1916,23 @@ type LogicalNotContext struct { func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext { var p = new(LogicalNotContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } + func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } + func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } + func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } + func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { @@ -1774,10 +1940,10 @@ func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { } func (s *LogicalNotContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1797,6 +1963,7 @@ func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { return s.GetToken(CELParserEXCLAM, i) } + func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterLogicalNot(s) @@ -1819,16 +1986,17 @@ func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type MemberExprContext struct { - *UnaryContext + UnaryContext } func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext { var p = new(MemberExprContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } @@ -1838,10 +2006,10 @@ func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { } func (s *MemberExprContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1853,6 +2021,7 @@ func (s *MemberExprContext) Member() IMemberContext { return t.(IMemberContext) } + func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberExpr(s) @@ -1875,8 +2044,9 @@ func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type NegateContext struct { - *UnaryContext + UnaryContext s18 antlr.Token ops []antlr.Token } @@ -1884,19 +2054,23 @@ type NegateContext struct { func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext { var p = new(NegateContext) - p.UnaryContext = NewEmptyUnaryContext() + InitEmptyUnaryContext(&p.UnaryContext) p.parser = parser - p.CopyFrom(ctx.(*UnaryContext)) + p.CopyAll(ctx.(*UnaryContext)) return p } + func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } func (s *NegateContext) GetRuleContext() antlr.RuleContext { @@ -1904,10 +2078,10 @@ func (s *NegateContext) GetRuleContext() antlr.RuleContext { } func (s *NegateContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -1927,6 +2101,7 @@ func (s *NegateContext) MINUS(i int) antlr.TerminalNode { return s.GetToken(CELParserMINUS, i) } + func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNegate(s) @@ -1949,35 +2124,22 @@ func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Unary() (localctx IUnaryContext) { - this := p - _ = this + +func (p *CELParser) Unary() (localctx IUnaryContext) { localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 12, CELParserRULE_unary) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.SetState(97) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) { case 1: localctx = NewMemberExprContext(p, localctx) p.EnterOuterAlt(localctx, 1) @@ -1986,13 +2148,18 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { p.member(0) } + case 2: localctx = NewLogicalNotContext(p, localctx) p.EnterOuterAlt(localctx, 2) p.SetState(86) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for ok := true; ok; ok = _la == CELParserEXCLAM { { p.SetState(85) @@ -2000,11 +2167,19 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { var _m = p.Match(CELParserEXCLAM) localctx.(*LogicalNotContext).s19 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) + p.SetState(88) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } { @@ -2012,42 +2187,71 @@ func (p *CELParser) Unary() (localctx IUnaryContext) { p.member(0) } + case 3: localctx = NewNegateContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(92) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _alt = 1 for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { switch _alt { case 1: - { - p.SetState(91) + { + p.SetState(91) + + var _m = p.Match(CELParserMINUS) + + localctx.(*NegateContext).s18 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) + - var _m = p.Match(CELParserMINUS) - localctx.(*NegateContext).s18 = _m - } - localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + goto errorExit } p.SetState(94) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } { p.SetState(96) p.member(0) } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IMemberContext is an interface to support dynamic dispatch. type IMemberContext interface { antlr.ParserRuleContext @@ -2059,23 +2263,28 @@ type IMemberContext interface { } type MemberContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyMemberContext() *MemberContext { var p = new(MemberContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_member return p } +func InitEmptyMemberContext(p *MemberContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member +} + func (*MemberContext) IsMemberContext() {} func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext { var p = new(MemberContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_member @@ -2085,8 +2294,8 @@ func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invok func (s *MemberContext) GetParser() antlr.Parser { return s.parser } -func (s *MemberContext) CopyFrom(ctx *MemberContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *MemberContext) CopyAll(ctx *MemberContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *MemberContext) GetRuleContext() antlr.RuleContext { @@ -2097,38 +2306,46 @@ func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) return antlr.TreesStringTree(s, ruleNames, recog) } + + + + type MemberCallContext struct { - *MemberContext - op antlr.Token - id antlr.Token + MemberContext + op antlr.Token + id antlr.Token open antlr.Token - args IExprListContext + args IExprListContext } func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { var p = new(MemberCallContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *MemberCallContext) GetOp() antlr.Token { return s.op } func (s *MemberCallContext) GetId() antlr.Token { return s.id } func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } + func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } + func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { @@ -2136,10 +2353,10 @@ func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { } func (s *MemberCallContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2168,10 +2385,10 @@ func (s *MemberCallContext) LPAREN() antlr.TerminalNode { } func (s *MemberCallContext) ExprList() IExprListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2183,6 +2400,7 @@ func (s *MemberCallContext) ExprList() IExprListContext { return t.(IExprListContext) } + func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMemberCall(s) @@ -2205,29 +2423,32 @@ func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type SelectContext struct { - *MemberContext - op antlr.Token + MemberContext + op antlr.Token opt antlr.Token - id antlr.Token + id antlr.Token } func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { var p = new(SelectContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *SelectContext) GetOp() antlr.Token { return s.op } func (s *SelectContext) GetOpt() antlr.Token { return s.opt } func (s *SelectContext) GetId() antlr.Token { return s.id } + func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } @@ -2239,10 +2460,10 @@ func (s *SelectContext) GetRuleContext() antlr.RuleContext { } func (s *SelectContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2266,6 +2487,7 @@ func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } + func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterSelect(s) @@ -2288,16 +2510,17 @@ func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type PrimaryExprContext struct { - *MemberContext + MemberContext } func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext { var p = new(PrimaryExprContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } @@ -2307,10 +2530,10 @@ func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { } func (s *PrimaryExprContext) Primary() IPrimaryContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IPrimaryContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2322,6 +2545,7 @@ func (s *PrimaryExprContext) Primary() IPrimaryContext { return t.(IPrimaryContext) } + func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterPrimaryExpr(s) @@ -2344,33 +2568,38 @@ func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } + type IndexContext struct { - *MemberContext - op antlr.Token - opt antlr.Token - index IExprContext + MemberContext + op antlr.Token + opt antlr.Token + index IExprContext } func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { var p = new(IndexContext) - p.MemberContext = NewEmptyMemberContext() + InitEmptyMemberContext(&p.MemberContext) p.parser = parser - p.CopyFrom(ctx.(*MemberContext)) + p.CopyAll(ctx.(*MemberContext)) return p } + func (s *IndexContext) GetOp() antlr.Token { return s.op } func (s *IndexContext) GetOpt() antlr.Token { return s.opt } + func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *IndexContext) GetIndex() IExprContext { return s.index } + func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } func (s *IndexContext) GetRuleContext() antlr.RuleContext { @@ -2378,10 +2607,10 @@ func (s *IndexContext) GetRuleContext() antlr.RuleContext { } func (s *IndexContext) Member() IMemberContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMemberContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2402,10 +2631,10 @@ func (s *IndexContext) LBRACKET() antlr.TerminalNode { } func (s *IndexContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2421,6 +2650,7 @@ func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { return s.GetToken(CELParserQUESTIONMARK, 0) } + func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIndex(s) @@ -2443,15 +2673,15 @@ func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + + func (p *CELParser) Member() (localctx IMemberContext) { return p.member(0) } func (p *CELParser) member(_p int) (localctx IMemberContext) { - this := p - _ = this - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState) var _prevctx IMemberContext = localctx @@ -2460,22 +2690,6 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p) var _la int - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - var _alt int p.EnterOuterAlt(localctx, 1) @@ -2491,8 +2705,13 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) p.SetState(126) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { if p.GetParseListeners() != nil { @@ -2501,14 +2720,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { _prevctx = localctx p.SetState(124) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) { case 1: localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(102) if !(p.Precpred(p.GetParserRuleContext(), 3)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + goto errorExit } { p.SetState(103) @@ -2516,11 +2740,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserDOT) localctx.(*SelectContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(105) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(104) @@ -2528,6 +2760,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*SelectContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -2537,15 +2773,21 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*SelectContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 2: localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(108) if !(p.Precpred(p.GetParserRuleContext(), 2)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit } { p.SetState(109) @@ -2553,6 +2795,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserDOT) localctx.(*MemberCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(110) @@ -2560,6 +2806,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*MemberCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(111) @@ -2567,17 +2817,26 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserLPAREN) localctx.(*MemberCallContext).open = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(113) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { { p.SetState(112) var _x = p.ExprList() + localctx.(*MemberCallContext).args = _x } @@ -2585,15 +2844,21 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { { p.SetState(115) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) p.SetState(116) if !(p.Precpred(p.GetParserRuleContext(), 1)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit } { p.SetState(117) @@ -2601,11 +2866,19 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserLBRACKET) localctx.(*IndexContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(119) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(118) @@ -2613,6 +2886,10 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*IndexContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -2621,24 +2898,50 @@ func (p *CELParser) member(_p int) (localctx IMemberContext) { var _x = p.Expr() + localctx.(*IndexContext).index = _x } { p.SetState(122) p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case antlr.ATNInvalidAltNumber: + goto errorExit } } p.SetState(128) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IPrimaryContext is an interface to support dynamic dispatch. type IPrimaryContext interface { antlr.ParserRuleContext @@ -2650,23 +2953,28 @@ type IPrimaryContext interface { } type PrimaryContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyPrimaryContext() *PrimaryContext { var p = new(PrimaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_primary return p } +func InitEmptyPrimaryContext(p *PrimaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary +} + func (*PrimaryContext) IsPrimaryContext() {} func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext { var p = new(PrimaryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_primary @@ -2676,8 +2984,8 @@ func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser } -func (s *PrimaryContext) CopyFrom(ctx *PrimaryContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *PrimaryContext) GetRuleContext() antlr.RuleContext { @@ -2688,28 +2996,35 @@ func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + + + type CreateListContext struct { - *PrimaryContext - op antlr.Token - elems IListInitContext + PrimaryContext + op antlr.Token + elems IListInitContext } func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { var p = new(CreateListContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateListContext) GetOp() antlr.Token { return s.op } + func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } func (s *CreateListContext) GetRuleContext() antlr.RuleContext { @@ -2729,10 +3044,10 @@ func (s *CreateListContext) COMMA() antlr.TerminalNode { } func (s *CreateListContext) ListInit() IListInitContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IListInitContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2744,6 +3059,7 @@ func (s *CreateListContext) ListInit() IListInitContext { return t.(IListInitContext) } + func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateList(s) @@ -2766,28 +3082,33 @@ func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type CreateStructContext struct { - *PrimaryContext - op antlr.Token - entries IMapInitializerListContext + PrimaryContext + op antlr.Token + entries IMapInitializerListContext } func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { var p = new(CreateStructContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { @@ -2807,10 +3128,10 @@ func (s *CreateStructContext) COMMA() antlr.TerminalNode { } func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IMapInitializerListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2822,6 +3143,7 @@ func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { return t.(IMapInitializerListContext) } + func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateStruct(s) @@ -2844,16 +3166,17 @@ func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} } } + type ConstantLiteralContext struct { - *PrimaryContext + PrimaryContext } func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext { var p = new(ConstantLiteralContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } @@ -2863,10 +3186,10 @@ func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { } func (s *ConstantLiteralContext) Literal() ILiteralContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(ILiteralContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2878,6 +3201,7 @@ func (s *ConstantLiteralContext) Literal() ILiteralContext { return t.(ILiteralContext) } + func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterConstantLiteral(s) @@ -2900,23 +3224,26 @@ func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interfac } } + type NestedContext struct { - *PrimaryContext - e IExprContext + PrimaryContext + e IExprContext } func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { var p = new(NestedContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *NestedContext) GetE() IExprContext { return s.e } + func (s *NestedContext) SetE(v IExprContext) { s.e = v } func (s *NestedContext) GetRuleContext() antlr.RuleContext { @@ -2932,10 +3259,10 @@ func (s *NestedContext) RPAREN() antlr.TerminalNode { } func (s *NestedContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -2947,6 +3274,7 @@ func (s *NestedContext) Expr() IExprContext { return t.(IExprContext) } + func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNested(s) @@ -2969,27 +3297,29 @@ func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type CreateMessageContext struct { - *PrimaryContext - leadingDot antlr.Token + PrimaryContext + leadingDot antlr.Token _IDENTIFIER antlr.Token - ids []antlr.Token - s16 antlr.Token - ops []antlr.Token - op antlr.Token - entries IFieldInitializerListContext + ids []antlr.Token + s16 antlr.Token + ops []antlr.Token + op antlr.Token + entries IFieldInitializerListContext } func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { var p = new(CreateMessageContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } @@ -2998,6 +3328,7 @@ func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } + func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } @@ -3006,16 +3337,20 @@ func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } + func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } + func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } + func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } + func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { @@ -3051,10 +3386,10 @@ func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { } func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IFieldInitializerListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -3066,6 +3401,7 @@ func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListConte return t.(IFieldInitializerListContext) } + func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterCreateMessage(s) @@ -3088,38 +3424,43 @@ func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{ } } + type IdentOrGlobalCallContext struct { - *PrimaryContext + PrimaryContext leadingDot antlr.Token - id antlr.Token - op antlr.Token - args IExprListContext + id antlr.Token + op antlr.Token + args IExprListContext } func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { var p = new(IdentOrGlobalCallContext) - p.PrimaryContext = NewEmptyPrimaryContext() + InitEmptyPrimaryContext(&p.PrimaryContext) p.parser = parser - p.CopyFrom(ctx.(*PrimaryContext)) + p.CopyAll(ctx.(*PrimaryContext)) return p } + func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } + func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } + func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } + func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { @@ -3143,10 +3484,10 @@ func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { } func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprListContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -3158,6 +3499,7 @@ func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { return t.(IExprListContext) } + func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterIdentOrGlobalCall(s) @@ -3180,40 +3522,31 @@ func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interf } } -func (p *CELParser) Primary() (localctx IPrimaryContext) { - this := p - _ = this + +func (p *CELParser) Primary() (localctx IPrimaryContext) { localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 16, CELParserRULE_primary) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.SetState(180) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 25, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { case 1: localctx = NewIdentOrGlobalCallContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(130) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserDOT { { p.SetState(129) @@ -3221,6 +3554,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*IdentOrGlobalCallContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -3230,28 +3567,42 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*IdentOrGlobalCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(138) p.GetErrorHandler().Sync(p) - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + + if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { { p.SetState(133) var _m = p.Match(CELParserLPAREN) localctx.(*IdentOrGlobalCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(135) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135762105344) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { { p.SetState(134) var _x = p.ExprList() + localctx.(*IdentOrGlobalCallContext).args = _x } @@ -3259,29 +3610,46 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { { p.SetState(137) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + } else if p.HasError() { // JIM + goto errorExit } + case 2: localctx = NewNestedContext(p, localctx) p.EnterOuterAlt(localctx, 2) { p.SetState(140) p.Match(CELParserLPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(141) var _x = p.Expr() + localctx.(*NestedContext).e = _x } { p.SetState(142) p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewCreateListContext(p, localctx) p.EnterOuterAlt(localctx, 3) @@ -3291,37 +3659,59 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACKET) localctx.(*CreateListContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(146) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { { p.SetState(145) var _x = p.ListInit() + localctx.(*CreateListContext).elems = _x } } p.SetState(149) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(148) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(151) p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 4: localctx = NewCreateStructContext(p, localctx) p.EnterOuterAlt(localctx, 4) @@ -3331,44 +3721,70 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACE) localctx.(*CreateStructContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(154) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) - if (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&135763153920) != 0 { + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { { p.SetState(153) var _x = p.MapInitializerList() + localctx.(*CreateStructContext).entries = _x } } p.SetState(157) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(156) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(159) p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 5: localctx = NewCreateMessageContext(p, localctx) p.EnterOuterAlt(localctx, 5) p.SetState(161) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserDOT { { p.SetState(160) @@ -3376,6 +3792,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -3385,12 +3805,20 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) p.SetState(168) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserDOT { { p.SetState(164) @@ -3398,6 +3826,10 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserDOT) localctx.(*CreateMessageContext).s16 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) { @@ -3406,11 +3838,19 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserIDENTIFIER) localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + p.SetState(170) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } { @@ -3419,37 +3859,59 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { var _m = p.Match(CELParserLBRACE) localctx.(*CreateMessageContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } p.SetState(173) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { { p.SetState(172) var _x = p.FieldInitializerList() + localctx.(*CreateMessageContext).entries = _x } } p.SetState(176) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserCOMMA { { p.SetState(175) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(178) p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 6: localctx = NewConstantLiteralContext(p, localctx) p.EnterOuterAlt(localctx, 6) @@ -3458,11 +3920,25 @@ func (p *CELParser) Primary() (localctx IPrimaryContext) { p.Literal() } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IExprListContext is an interface to support dynamic dispatch. type IExprListContext interface { antlr.ParserRuleContext @@ -3473,14 +3949,18 @@ type IExprListContext interface { // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetE returns the e rule context list. GetE() []IExprContext + // SetE sets the e rule context list. - SetE([]IExprContext) + SetE([]IExprContext) + // Getter signatures AllExpr() []IExprContext @@ -3493,25 +3973,30 @@ type IExprListContext interface { } type ExprListContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - _expr IExprContext - e []IExprContext + _expr IExprContext + e []IExprContext } func NewEmptyExprListContext() *ExprListContext { var p = new(ExprListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_exprList return p } +func InitEmptyExprListContext(p *ExprListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList +} + func (*ExprListContext) IsExprListContext() {} func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext { var p = new(ExprListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_exprList @@ -3523,12 +4008,16 @@ func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } func (s *ExprListContext) Get_expr() IExprContext { return s._expr } + func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *ExprListContext) GetE() []IExprContext { return s.e } + func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + func (s *ExprListContext) AllExpr() []IExprContext { children := s.GetChildren() len := 0 @@ -3551,12 +4040,12 @@ func (s *ExprListContext) AllExpr() []IExprContext { } func (s *ExprListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -3586,6 +4075,7 @@ func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterExprList(s) @@ -3608,65 +4098,76 @@ func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) ExprList() (localctx IExprListContext) { - this := p - _ = this + + +func (p *CELParser) ExprList() (localctx IExprListContext) { localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 18, CELParserRULE_exprList) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) { p.SetState(182) var _x = p.Expr() + localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) p.SetState(187) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + for _la == CELParserCOMMA { { p.SetState(183) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(184) var _x = p.Expr() + localctx.(*ExprListContext)._expr = _x } localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + p.SetState(189) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IListInitContext is an interface to support dynamic dispatch. type IListInitContext interface { antlr.ParserRuleContext @@ -3677,14 +4178,18 @@ type IListInitContext interface { // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext + // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) + // GetElems returns the elems rule context list. GetElems() []IOptExprContext + // SetElems sets the elems rule context list. - SetElems([]IOptExprContext) + SetElems([]IOptExprContext) + // Getter signatures AllOptExpr() []IOptExprContext @@ -3697,25 +4202,30 @@ type IListInitContext interface { } type ListInitContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optExpr IOptExprContext - elems []IOptExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + elems []IOptExprContext } func NewEmptyListInitContext() *ListInitContext { var p = new(ListInitContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_listInit return p } +func InitEmptyListInitContext(p *ListInitContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit +} + func (*ListInitContext) IsListInitContext() {} func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext { var p = new(ListInitContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_listInit @@ -3727,12 +4237,16 @@ func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } + func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + func (s *ListInitContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -3755,12 +4269,12 @@ func (s *ListInitContext) AllOptExpr() []IOptExprContext { } func (s *ListInitContext) OptExpr(i int) IOptExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -3790,6 +4304,7 @@ func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterListInit(s) @@ -3812,29 +4327,12 @@ func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) ListInit() (localctx IListInitContext) { - this := p - _ = this - localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 20, CELParserRULE_listInit) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) ListInit() (localctx IListInitContext) { + localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, CELParserRULE_listInit) var _alt int p.EnterOuterAlt(localctx, 1) @@ -3843,37 +4341,68 @@ func (p *CELParser) ListInit() (localctx IListInitContext) { var _x = p.OptExpr() + localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) p.SetState(195) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(191) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(192) var _x = p.OptExpr() + localctx.(*ListInitContext)._optExpr = _x } localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + } p.SetState(197) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IFieldInitializerListContext is an interface to support dynamic dispatch. type IFieldInitializerListContext interface { antlr.ParserRuleContext @@ -3882,40 +4411,48 @@ type IFieldInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. - GetS21() antlr.Token + GetS21() antlr.Token + // SetS21 sets the s21 token. - SetS21(antlr.Token) + SetS21(antlr.Token) + // GetCols returns the cols token list. GetCols() []antlr.Token + // SetCols sets the cols token list. SetCols([]antlr.Token) + // Get_optField returns the _optField rule contexts. Get_optField() IOptFieldContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_optField sets the _optField rule contexts. Set_optField(IOptFieldContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetFields returns the fields rule context list. GetFields() []IOptFieldContext // GetValues returns the values rule context list. GetValues() []IExprContext + // SetFields sets the fields rule context list. - SetFields([]IOptFieldContext) + SetFields([]IOptFieldContext) // SetValues sets the values rule context list. - SetValues([]IExprContext) + SetValues([]IExprContext) + // Getter signatures AllOptField() []IOptFieldContext @@ -3932,29 +4469,34 @@ type IFieldInitializerListContext interface { } type FieldInitializerListContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optField IOptFieldContext - fields []IOptFieldContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optField IOptFieldContext + fields []IOptFieldContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext } func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { var p = new(FieldInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_fieldInitializerList return p } +func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList +} + func (*FieldInitializerListContext) IsFieldInitializerListContext() {} func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext { var p = new(FieldInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_fieldInitializerList @@ -3966,28 +4508,36 @@ func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } + func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } + func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } + func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } + func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { children := s.GetChildren() len := 0 @@ -4010,12 +4560,12 @@ func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { } func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptFieldContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4059,12 +4609,12 @@ func (s *FieldInitializerListContext) AllExpr() []IExprContext { } func (s *FieldInitializerListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4094,6 +4644,7 @@ func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog ant return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterFieldInitializerList(s) @@ -4116,29 +4667,12 @@ func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) int } } -func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { - this := p - _ = this - localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { + localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) var _alt int p.EnterOuterAlt(localctx, 1) @@ -4147,6 +4681,7 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.OptField() + localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) @@ -4156,6 +4691,10 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { @@ -4163,24 +4702,35 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.Expr() + localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) p.SetState(208) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(201) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(202) var _x = p.OptField() + localctx.(*FieldInitializerListContext)._optField = _x } localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) @@ -4190,6 +4740,10 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _m = p.Match(CELParserCOLON) localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) { @@ -4197,19 +4751,40 @@ func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContex var _x = p.Expr() + localctx.(*FieldInitializerListContext)._expr = _x } localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + } p.SetState(210) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IOptFieldContext is an interface to support dynamic dispatch. type IOptFieldContext interface { antlr.ParserRuleContext @@ -4218,10 +4793,12 @@ type IOptFieldContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. - GetOpt() antlr.Token + GetOpt() antlr.Token + // SetOpt sets the opt token. - SetOpt(antlr.Token) + SetOpt(antlr.Token) + // Getter signatures IDENTIFIER() antlr.TerminalNode @@ -4232,24 +4809,29 @@ type IOptFieldContext interface { } type OptFieldContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - opt antlr.Token + opt antlr.Token } func NewEmptyOptFieldContext() *OptFieldContext { var p = new(OptFieldContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optField return p } +func InitEmptyOptFieldContext(p *OptFieldContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField +} + func (*OptFieldContext) IsOptFieldContext() {} func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext { var p = new(OptFieldContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_optField @@ -4261,8 +4843,10 @@ func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } + func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { return s.GetToken(CELParserIDENTIFIER, 0) } @@ -4279,6 +4863,7 @@ func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognize return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptField(s) @@ -4301,35 +4886,23 @@ func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) OptField() (localctx IOptFieldContext) { - this := p - _ = this + + +func (p *CELParser) OptField() (localctx IOptFieldContext) { localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 24, CELParserRULE_optField) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) p.SetState(212) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(211) @@ -4337,17 +4910,38 @@ func (p *CELParser) OptField() (localctx IOptFieldContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptFieldContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } { p.SetState(214) p.Match(CELParserIDENTIFIER) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IMapInitializerListContext is an interface to support dynamic dispatch. type IMapInitializerListContext interface { antlr.ParserRuleContext @@ -4356,40 +4950,48 @@ type IMapInitializerListContext interface { GetParser() antlr.Parser // GetS21 returns the s21 token. - GetS21() antlr.Token + GetS21() antlr.Token + // SetS21 sets the s21 token. - SetS21(antlr.Token) + SetS21(antlr.Token) + // GetCols returns the cols token list. GetCols() []antlr.Token + // SetCols sets the cols token list. SetCols([]antlr.Token) + // Get_optExpr returns the _optExpr rule contexts. Get_optExpr() IOptExprContext // Get_expr returns the _expr rule contexts. Get_expr() IExprContext + // Set_optExpr sets the _optExpr rule contexts. Set_optExpr(IOptExprContext) // Set_expr sets the _expr rule contexts. Set_expr(IExprContext) + // GetKeys returns the keys rule context list. GetKeys() []IOptExprContext // GetValues returns the values rule context list. GetValues() []IExprContext + // SetKeys sets the keys rule context list. - SetKeys([]IOptExprContext) + SetKeys([]IOptExprContext) // SetValues sets the values rule context list. - SetValues([]IExprContext) + SetValues([]IExprContext) + // Getter signatures AllOptExpr() []IOptExprContext @@ -4406,29 +5008,34 @@ type IMapInitializerListContext interface { } type MapInitializerListContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - _optExpr IOptExprContext - keys []IOptExprContext - s21 antlr.Token - cols []antlr.Token - _expr IExprContext - values []IExprContext + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + keys []IOptExprContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext } func NewEmptyMapInitializerListContext() *MapInitializerListContext { var p = new(MapInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_mapInitializerList return p } +func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList +} + func (*MapInitializerListContext) IsMapInitializerListContext() {} func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext { var p = new(MapInitializerListContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_mapInitializerList @@ -4440,28 +5047,36 @@ func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } + func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } + func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } + func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } + func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { children := s.GetChildren() len := 0 @@ -4484,12 +5099,12 @@ func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { } func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IOptExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4533,12 +5148,12 @@ func (s *MapInitializerListContext) AllExpr() []IExprContext { } func (s *MapInitializerListContext) Expr(i int) IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; j := 0 for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { if j == i { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } j++ @@ -4568,6 +5183,7 @@ func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterMapInitializerList(s) @@ -4590,29 +5206,12 @@ func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) inter } } -func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { - this := p - _ = this - localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() +func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { + localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) var _alt int p.EnterOuterAlt(localctx, 1) @@ -4621,6 +5220,7 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.OptExpr() + localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) @@ -4630,6 +5230,10 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { @@ -4637,24 +5241,35 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.Expr() + localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) p.SetState(226) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) - + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { if _alt == 1 { { p.SetState(219) p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } { p.SetState(220) var _x = p.OptExpr() + localctx.(*MapInitializerListContext)._optExpr = _x } localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) @@ -4664,6 +5279,10 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _m = p.Match(CELParserCOLON) localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) { @@ -4671,19 +5290,40 @@ func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { var _x = p.Expr() + localctx.(*MapInitializerListContext)._expr = _x } localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + } p.SetState(228) p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // IOptExprContext is an interface to support dynamic dispatch. type IOptExprContext interface { antlr.ParserRuleContext @@ -4692,17 +5332,21 @@ type IOptExprContext interface { GetParser() antlr.Parser // GetOpt returns the opt token. - GetOpt() antlr.Token + GetOpt() antlr.Token + // SetOpt sets the opt token. - SetOpt(antlr.Token) + SetOpt(antlr.Token) + // GetE returns the e rule contexts. GetE() IExprContext + // SetE sets the e rule contexts. SetE(IExprContext) + // Getter signatures Expr() IExprContext QUESTIONMARK() antlr.TerminalNode @@ -4712,25 +5356,30 @@ type IOptExprContext interface { } type OptExprContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser - opt antlr.Token - e IExprContext + opt antlr.Token + e IExprContext } func NewEmptyOptExprContext() *OptExprContext { var p = new(OptExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_optExpr return p } +func InitEmptyOptExprContext(p *OptExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr +} + func (*OptExprContext) IsOptExprContext() {} func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext { var p = new(OptExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_optExpr @@ -4742,17 +5391,21 @@ func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } + func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } + func (s *OptExprContext) GetE() IExprContext { return s.e } + func (s *OptExprContext) SetE(v IExprContext) { s.e = v } + func (s *OptExprContext) Expr() IExprContext { - var t antlr.RuleContext + var t antlr.RuleContext; for _, ctx := range s.GetChildren() { if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) + t = ctx.(antlr.RuleContext); break } } @@ -4776,6 +5429,7 @@ func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterOptExpr(s) @@ -4798,35 +5452,23 @@ func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) OptExpr() (localctx IOptExprContext) { - this := p - _ = this + + +func (p *CELParser) OptExpr() (localctx IOptExprContext) { localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 28, CELParserRULE_optExpr) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.EnterOuterAlt(localctx, 1) p.SetState(230) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserQUESTIONMARK { { p.SetState(229) @@ -4834,6 +5476,10 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { var _m = p.Match(CELParserQUESTIONMARK) localctx.(*OptExprContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -4842,12 +5488,26 @@ func (p *CELParser) OptExpr() (localctx IOptExprContext) { var _x = p.Expr() + localctx.(*OptExprContext).e = _x } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + // ILiteralContext is an interface to support dynamic dispatch. type ILiteralContext interface { antlr.ParserRuleContext @@ -4859,23 +5519,28 @@ type ILiteralContext interface { } type LiteralContext struct { - *antlr.BaseParserRuleContext + antlr.BaseParserRuleContext parser antlr.Parser } func NewEmptyLiteralContext() *LiteralContext { var p = new(LiteralContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) p.RuleIndex = CELParserRULE_literal return p } +func InitEmptyLiteralContext(p *LiteralContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal +} + func (*LiteralContext) IsLiteralContext() {} func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext { var p = new(LiteralContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) p.parser = parser p.RuleIndex = CELParserRULE_literal @@ -4885,8 +5550,8 @@ func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invo func (s *LiteralContext) GetParser() antlr.Parser { return s.parser } -func (s *LiteralContext) CopyFrom(ctx *LiteralContext) { - s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext) +func (s *LiteralContext) CopyAll(ctx *LiteralContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) } func (s *LiteralContext) GetRuleContext() antlr.RuleContext { @@ -4897,23 +5562,28 @@ func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer return antlr.TreesStringTree(s, ruleNames, recog) } + + + type BytesContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext { var p = new(BytesContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BytesContext) GetTok() antlr.Token { return s.tok } + func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } func (s *BytesContext) GetRuleContext() antlr.RuleContext { @@ -4924,6 +5594,7 @@ func (s *BytesContext) BYTES() antlr.TerminalNode { return s.GetToken(CELParserBYTES, 0) } + func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBytes(s) @@ -4946,23 +5617,26 @@ func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type UintContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext { var p = new(UintContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *UintContext) GetTok() antlr.Token { return s.tok } + func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } func (s *UintContext) GetRuleContext() antlr.RuleContext { @@ -4973,6 +5647,7 @@ func (s *UintContext) NUM_UINT() antlr.TerminalNode { return s.GetToken(CELParserNUM_UINT, 0) } + func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterUint(s) @@ -4995,23 +5670,26 @@ func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type NullContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext { var p = new(NullContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *NullContext) GetTok() antlr.Token { return s.tok } + func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } func (s *NullContext) GetRuleContext() antlr.RuleContext { @@ -5022,6 +5700,7 @@ func (s *NullContext) NUL() antlr.TerminalNode { return s.GetToken(CELParserNUL, 0) } + func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterNull(s) @@ -5044,23 +5723,26 @@ func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type BoolFalseContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext { var p = new(BoolFalseContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } + func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { @@ -5071,6 +5753,7 @@ func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { return s.GetToken(CELParserCEL_FALSE, 0) } + func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolFalse(s) @@ -5093,23 +5776,26 @@ func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type StringContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext { var p = new(StringContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *StringContext) GetTok() antlr.Token { return s.tok } + func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } func (s *StringContext) GetRuleContext() antlr.RuleContext { @@ -5120,6 +5806,7 @@ func (s *StringContext) STRING() antlr.TerminalNode { return s.GetToken(CELParserSTRING, 0) } + func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterString(s) @@ -5142,26 +5829,29 @@ func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type DoubleContext struct { - *LiteralContext + LiteralContext sign antlr.Token - tok antlr.Token + tok antlr.Token } func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { var p = new(DoubleContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *DoubleContext) GetSign() antlr.Token { return s.sign } func (s *DoubleContext) GetTok() antlr.Token { return s.tok } + func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } @@ -5178,6 +5868,7 @@ func (s *DoubleContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } + func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterDouble(s) @@ -5200,23 +5891,26 @@ func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type BoolTrueContext struct { - *LiteralContext + LiteralContext tok antlr.Token } func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext { var p = new(BoolTrueContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } + func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { @@ -5227,6 +5921,7 @@ func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { return s.GetToken(CELParserCEL_TRUE, 0) } + func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterBoolTrue(s) @@ -5249,26 +5944,29 @@ func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } + type IntContext struct { - *LiteralContext + LiteralContext sign antlr.Token - tok antlr.Token + tok antlr.Token } func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { var p = new(IntContext) - p.LiteralContext = NewEmptyLiteralContext() + InitEmptyLiteralContext(&p.LiteralContext) p.parser = parser - p.CopyFrom(ctx.(*LiteralContext)) + p.CopyAll(ctx.(*LiteralContext)) return p } + func (s *IntContext) GetSign() antlr.Token { return s.sign } func (s *IntContext) GetTok() antlr.Token { return s.tok } + func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } @@ -5285,6 +5983,7 @@ func (s *IntContext) MINUS() antlr.TerminalNode { return s.GetToken(CELParserMINUS, 0) } + func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { if listenerT, ok := listener.(CELListener); ok { listenerT.EnterInt(s) @@ -5307,40 +6006,31 @@ func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { } } -func (p *CELParser) Literal() (localctx ILiteralContext) { - this := p - _ = this + +func (p *CELParser) Literal() (localctx ILiteralContext) { localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) p.EnterRule(localctx, 30, CELParserRULE_literal) var _la int - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - p.SetState(248) p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) { + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { case 1: localctx = NewIntContext(p, localctx) p.EnterOuterAlt(localctx, 1) p.SetState(235) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserMINUS { { p.SetState(234) @@ -5348,6 +6038,10 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserMINUS) localctx.(*IntContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -5357,8 +6051,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_INT) localctx.(*IntContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 2: localctx = NewUintContext(p, localctx) p.EnterOuterAlt(localctx, 2) @@ -5368,15 +6067,24 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_UINT) localctx.(*UintContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 3: localctx = NewDoubleContext(p, localctx) p.EnterOuterAlt(localctx, 3) p.SetState(240) p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } _la = p.GetTokenStream().LA(1) + if _la == CELParserMINUS { { p.SetState(239) @@ -5384,6 +6092,10 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserMINUS) localctx.(*DoubleContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } } @@ -5393,8 +6105,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUM_FLOAT) localctx.(*DoubleContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 4: localctx = NewStringContext(p, localctx) p.EnterOuterAlt(localctx, 4) @@ -5404,8 +6121,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserSTRING) localctx.(*StringContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 5: localctx = NewBytesContext(p, localctx) p.EnterOuterAlt(localctx, 5) @@ -5415,8 +6137,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserBYTES) localctx.(*BytesContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 6: localctx = NewBoolTrueContext(p, localctx) p.EnterOuterAlt(localctx, 6) @@ -5426,8 +6153,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserCEL_TRUE) localctx.(*BoolTrueContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 7: localctx = NewBoolFalseContext(p, localctx) p.EnterOuterAlt(localctx, 7) @@ -5437,8 +6169,13 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserCEL_FALSE) localctx.(*BoolFalseContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case 8: localctx = NewNullContext(p, localctx) p.EnterOuterAlt(localctx, 8) @@ -5448,35 +6185,48 @@ func (p *CELParser) Literal() (localctx ILiteralContext) { var _m = p.Match(CELParserNUL) localctx.(*NullContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } } + case antlr.ATNInvalidAltNumber: + goto errorExit } + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() return localctx + goto errorExit // Trick to prevent compiler error if the label is not used } + func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { switch ruleIndex { case 4: - var t *RelationContext = nil - if localctx != nil { - t = localctx.(*RelationContext) - } - return p.Relation_Sempred(t, predIndex) + var t *RelationContext = nil + if localctx != nil { t = localctx.(*RelationContext) } + return p.Relation_Sempred(t, predIndex) case 5: - var t *CalcContext = nil - if localctx != nil { - t = localctx.(*CalcContext) - } - return p.Calc_Sempred(t, predIndex) + var t *CalcContext = nil + if localctx != nil { t = localctx.(*CalcContext) } + return p.Calc_Sempred(t, predIndex) case 7: - var t *MemberContext = nil - if localctx != nil { - t = localctx.(*MemberContext) - } - return p.Member_Sempred(t, predIndex) + var t *MemberContext = nil + if localctx != nil { t = localctx.(*MemberContext) } + return p.Member_Sempred(t, predIndex) + default: panic("No predicate with index: " + fmt.Sprint(ruleIndex)) @@ -5484,12 +6234,9 @@ func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int } func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 0: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -5497,15 +6244,12 @@ func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) } func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 1: - return p.Precpred(p.GetParserRuleContext(), 2) + return p.Precpred(p.GetParserRuleContext(), 2) case 2: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) @@ -5513,20 +6257,18 @@ func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool } func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - switch predIndex { case 3: - return p.Precpred(p.GetParserRuleContext(), 3) + return p.Precpred(p.GetParserRuleContext(), 3) case 4: - return p.Precpred(p.GetParserRuleContext(), 2) + return p.Precpred(p.GetParserRuleContext(), 2) case 5: - return p.Precpred(p.GetParserRuleContext(), 1) + return p.Precpred(p.GetParserRuleContext(), 1) default: panic("No predicate with index: " + fmt.Sprint(predIndex)) } } + diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go index 2c54e2cb0..d2fbd563a 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -1,7 +1,8 @@ -// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT. +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. package gen // CEL -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" +import "github.com/antlr4-go/antlr/v4" + // A complete Visitor for a parse tree produced by CELParser. type CELVisitor interface { @@ -105,4 +106,5 @@ type CELVisitor interface { // Visit a parse tree produced by CELParser#Null. VisitNull(ctx *NullContext) interface{} -} + +} \ No newline at end of file diff --git a/constraint/vendor/github.com/google/cel-go/parser/gen/generate.sh b/constraint/vendor/github.com/google/cel-go/parser/gen/generate.sh index 389107c6a..27a9559f7 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/gen/generate.sh +++ b/constraint/vendor/github.com/google/cel-go/parser/gen/generate.sh @@ -27,7 +27,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # Generate AntLR artifacts. -java -Xmx500M -cp ${DIR}/antlr-4.12.0-complete.jar org.antlr.v4.Tool \ +java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \ -Dlanguage=Go \ -package gen \ -o ${DIR} \ diff --git a/constraint/vendor/github.com/google/cel-go/parser/helper.go b/constraint/vendor/github.com/google/cel-go/parser/helper.go index a5f29e3d7..9f09ead0e 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/helper.go +++ b/constraint/vendor/github.com/google/cel-go/parser/helper.go @@ -17,284 +17,229 @@ package parser import ( "sync" - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) type parserHelper struct { - source common.Source - nextID int64 - positions map[int64]int32 - macroCalls map[int64]*exprpb.Expr + exprFactory ast.ExprFactory + source common.Source + sourceInfo *ast.SourceInfo + nextID int64 } -func newParserHelper(source common.Source) *parserHelper { +func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper { return &parserHelper{ - source: source, - nextID: 1, - positions: make(map[int64]int32), - macroCalls: make(map[int64]*exprpb.Expr), + exprFactory: fac, + source: source, + sourceInfo: ast.NewSourceInfo(source), + nextID: 1, } } -func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo { - return &exprpb.SourceInfo{ - Location: p.source.Description(), - Positions: p.positions, - LineOffsets: p.source.LineOffsets(), - MacroCalls: p.macroCalls} +func (p *parserHelper) getSourceInfo() *ast.SourceInfo { + return p.sourceInfo } -func (p *parserHelper) newLiteral(ctx any, value *exprpb.Constant) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value} - return exprNode +func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr { + return p.exprFactory.NewLiteral(p.newID(ctx), value) } -func (p *parserHelper) newLiteralBool(ctx any, value bool) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}}) +func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr { + return p.newLiteral(ctx, types.Bool(value)) } -func (p *parserHelper) newLiteralString(ctx any, value string) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}}) +func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr { + return p.newLiteral(ctx, types.String(value)) } -func (p *parserHelper) newLiteralBytes(ctx any, value []byte) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}}) +func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr { + return p.newLiteral(ctx, types.Bytes(value)) } -func (p *parserHelper) newLiteralInt(ctx any, value int64) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}}) +func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr { + return p.newLiteral(ctx, types.Int(value)) } -func (p *parserHelper) newLiteralUint(ctx any, value uint64) *exprpb.Expr { - return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}}) +func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr { + return p.newLiteral(ctx, types.Uint(value)) } -func (p *parserHelper) newLiteralDouble(ctx any, value float64) *exprpb.Expr { - return p.newLiteral(ctx, - &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}}) +func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr { + return p.newLiteral(ctx, types.Double(value)) } -func (p *parserHelper) newIdent(ctx any, name string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}} - return exprNode +func (p *parserHelper) newIdent(ctx any, name string) ast.Expr { + return p.exprFactory.NewIdent(p.newID(ctx), name) } -func (p *parserHelper) newSelect(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}} - return exprNode +func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewSelect(p.newID(ctx), operand, field) } -func (p *parserHelper) newPresenceTest(ctx any, operand *exprpb.Expr, field string) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_SelectExpr{ - SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}} - return exprNode +func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field) } -func (p *parserHelper) newGlobalCall(ctx any, function string, args ...*exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{Function: function, Args: args}} - return exprNode +func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewCall(p.newID(ctx), function, args...) } -func (p *parserHelper) newReceiverCall(ctx any, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}} - return exprNode +func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...) } -func (p *parserHelper) newList(ctx any, elements []*exprpb.Expr, optionals ...int32) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: elements, - OptionalIndices: optionals, - }} - return exprNode +func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr { + return p.exprFactory.NewList(p.newID(ctx), elements, optionals) } -func (p *parserHelper) newMap(ctx any, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}} - return exprNode +func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewMap(p.newID(ctx), entries) } -func (p *parserHelper) newMapEntry(entryID int64, key *exprpb.Expr, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return &exprpb.Expr_CreateStruct_Entry{ - Id: entryID, - KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key}, - Value: value, - OptionalEntry: optional, - } +func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewMapEntry(entryID, key, value, optional) } -func (p *parserHelper) newObject(ctx any, typeName string, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: typeName, - Entries: entries, - }, - } - return exprNode +func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields) } -func (p *parserHelper) newObjectField(fieldID int64, field string, value *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return &exprpb.Expr_CreateStruct_Entry{ - Id: fieldID, - KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field}, - Value: value, - OptionalEntry: optional, - } +func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewStructField(fieldID, field, value, optional) } -func (p *parserHelper) newComprehension(ctx any, iterVar string, - iterRange *exprpb.Expr, +func (p *parserHelper) newComprehension(ctx any, + iterRange ast.Expr, + iterVar, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr { - exprNode := p.newExpr(ctx) - exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - AccuVar: accuVar, - AccuInit: accuInit, - IterVar: iterVar, - IterRange: iterRange, - LoopCondition: condition, - LoopStep: step, - Result: result}} - return exprNode -} - -func (p *parserHelper) newExpr(ctx any) *exprpb.Expr { - id, isID := ctx.(int64) - if isID { - return &exprpb.Expr{Id: id} + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehension( + p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newComprehensionTwoVar(ctx any, + iterRange ast.Expr, + iterVar, iterVar2, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehensionTwoVar( + p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newID(ctx any) int64 { + if id, isID := ctx.(int64); isID { + return id } - return &exprpb.Expr{Id: p.id(ctx)} + return p.id(ctx) +} + +func (p *parserHelper) newExpr(ctx any) ast.Expr { + return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx)) } func (p *parserHelper) id(ctx any) int64 { - var location common.Location + var offset ast.OffsetRange switch c := ctx.(type) { case antlr.ParserRuleContext: - token := c.GetStart() - location = p.source.NewLocation(token.GetLine(), token.GetColumn()) + start := c.GetStart() + offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) case antlr.Token: - token := c - location = p.source.NewLocation(token.GetLine(), token.GetColumn()) + offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) case common.Location: - location = c + offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) + offset.Stop = offset.Start + case ast.OffsetRange: + offset = c default: // This should only happen if the ctx is nil return -1 } id := p.nextID - p.positions[id], _ = p.source.LocationOffset(location) + p.sourceInfo.SetOffsetRange(id, offset) p.nextID++ return id } +func (p *parserHelper) deleteID(id int64) { + p.sourceInfo.ClearOffsetRange(id) + if id == p.nextID-1 { + p.nextID-- + } +} + func (p *parserHelper) getLocation(id int64) common.Location { - characterOffset := p.positions[id] - location, _ := p.source.OffsetLocation(characterOffset) - return location + return p.sourceInfo.GetStartLocation(id) +} + +func (p *parserHelper) getLocationByOffset(offset int32) common.Location { + return p.getSourceInfo().GetLocationByOffset(offset) } // buildMacroCallArg iterates the expression and returns a new expression // where all macros have been replaced by their IDs in MacroCalls -func (p *parserHelper) buildMacroCallArg(expr *exprpb.Expr) *exprpb.Expr { - if _, found := p.macroCalls[expr.GetId()]; found { - return &exprpb.Expr{Id: expr.GetId()} +func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { + if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found { + return p.exprFactory.NewUnspecifiedExpr(expr.ID()) } - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: + switch expr.Kind() { + case ast.CallKind: // Iterate the AST from `expr` recursively looking for macros. Because we are at most // starting from the top level macro, this recursion is bounded by the size of the AST. This // means that the depth check on the AST during parsing will catch recursion overflows // before we get to here. - macroTarget := expr.GetCallExpr().GetTarget() - if macroTarget != nil { - macroTarget = p.buildMacroCallArg(macroTarget) - } - macroArgs := make([]*exprpb.Expr, len(expr.GetCallExpr().GetArgs())) - for index, arg := range expr.GetCallExpr().GetArgs() { + call := expr.AsCall() + macroArgs := make([]ast.Expr, len(call.Args())) + for index, arg := range call.Args() { macroArgs[index] = p.buildMacroCallArg(arg) } - return &exprpb.Expr{ - Id: expr.GetId(), - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Target: macroTarget, - Function: expr.GetCallExpr().GetFunction(), - Args: macroArgs, - }, - }, + if !call.IsMemberFunction() { + return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...) } - case *exprpb.Expr_ListExpr: - listExpr := expr.GetListExpr() - macroListArgs := make([]*exprpb.Expr, len(listExpr.GetElements())) - for i, elem := range listExpr.GetElements() { + macroTarget := p.buildMacroCallArg(call.Target()) + return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...) + case ast.ListKind: + list := expr.AsList() + macroListArgs := make([]ast.Expr, list.Size()) + for i, elem := range list.Elements() { macroListArgs[i] = p.buildMacroCallArg(elem) } - return &exprpb.Expr{ - Id: expr.GetId(), - ExprKind: &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{ - Elements: macroListArgs, - OptionalIndices: listExpr.GetOptionalIndices(), - }, - }, - } + return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices()) } - return expr } // addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target // that are macros, their ID will be stored instead for later self-lookups. -func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) { - macroTarget := target - if target != nil { - if _, found := p.macroCalls[target.GetId()]; found { - macroTarget = &exprpb.Expr{Id: target.GetId()} - } else { - macroTarget = p.buildMacroCallArg(target) - } - } - - macroArgs := make([]*exprpb.Expr, len(args)) +func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) { + macroArgs := make([]ast.Expr, len(args)) for index, arg := range args { macroArgs[index] = p.buildMacroCallArg(arg) } - - p.macroCalls[exprID] = &exprpb.Expr{ - ExprKind: &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Target: macroTarget, - Function: function, - Args: macroArgs, - }, - }, + if target == nil { + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...)) + return } + macroTarget := target + if _, found := p.sourceInfo.GetMacroCall(target.ID()); found { + macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID()) + } else { + macroTarget = p.buildMacroCallArg(target) + } + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...)) } // logicManager compacts logical trees into a more efficient structure which is semantically @@ -309,71 +254,71 @@ func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprp // controversial choice as it alters the traditional order of execution assumptions present in most // expressions. type logicManager struct { - helper *parserHelper + exprFactory ast.ExprFactory function string - terms []*exprpb.Expr + terms []ast.Expr ops []int64 variadicASTs bool } // newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. -func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { +func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { return &logicManager{ - helper: h, + exprFactory: fac, function: function, - terms: []*exprpb.Expr{term}, + terms: []ast.Expr{term}, ops: []int64{}, variadicASTs: true, } } // newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. -func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { +func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { return &logicManager{ - helper: h, + exprFactory: fac, function: function, - terms: []*exprpb.Expr{term}, + terms: []ast.Expr{term}, ops: []int64{}, variadicASTs: false, } } // addTerm adds an operation identifier and term to the set of terms to be balanced. -func (l *logicManager) addTerm(op int64, term *exprpb.Expr) { +func (l *logicManager) addTerm(op int64, term ast.Expr) { l.terms = append(l.terms, term) l.ops = append(l.ops, op) } // toExpr renders the logic graph into an Expr value, either balancing a tree of logical // operations or creating a variadic representation of the logical operator. -func (l *logicManager) toExpr() *exprpb.Expr { +func (l *logicManager) toExpr() ast.Expr { if len(l.terms) == 1 { return l.terms[0] } if l.variadicASTs { - return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...) + return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...) } return l.balancedTree(0, len(l.ops)-1) } // balancedTree recursively balances the terms provided to a commutative operator. -func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr { +func (l *logicManager) balancedTree(lo, hi int) ast.Expr { mid := (lo + hi + 1) / 2 - var left *exprpb.Expr + var left ast.Expr if mid == lo { left = l.terms[mid] } else { left = l.balancedTree(lo, mid-1) } - var right *exprpb.Expr + var right ast.Expr if mid == hi { right = l.terms[mid+1] } else { right = l.balancedTree(mid+1, hi) } - return l.helper.newGlobalCall(l.ops[mid], l.function, left, right) + return l.exprFactory.NewCall(l.ops[mid], l.function, left, right) } type exprHelper struct { @@ -387,202 +332,167 @@ func (e *exprHelper) nextMacroID() int64 { // Copy implements the ExprHelper interface method by producing a copy of the input Expr value // with a fresh set of numeric identifiers the Expr and all its descendants. -func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr { - copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId())) - switch expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - copy.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: expr.GetConstExpr()} - case *exprpb.Expr_IdentExpr: - copy.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: expr.GetIdentExpr()} - case *exprpb.Expr_SelectExpr: - op := expr.GetSelectExpr().GetOperand() - copy.ExprKind = &exprpb.Expr_SelectExpr{SelectExpr: &exprpb.Expr_Select{ - Operand: e.Copy(op), - Field: expr.GetSelectExpr().GetField(), - TestOnly: expr.GetSelectExpr().GetTestOnly(), - }} - case *exprpb.Expr_CallExpr: - call := expr.GetCallExpr() - target := call.GetTarget() - if target != nil { - target = e.Copy(target) +func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { + offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID()) + copyID := e.parserHelper.newID(offsetRange) + switch expr.Kind() { + case ast.LiteralKind: + return e.exprFactory.NewLiteral(copyID, expr.AsLiteral()) + case ast.IdentKind: + return e.exprFactory.NewIdent(copyID, expr.AsIdent()) + case ast.SelectKind: + sel := expr.AsSelect() + op := e.Copy(sel.Operand()) + if sel.IsTestOnly() { + return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName()) } - args := call.GetArgs() - argsCopy := make([]*exprpb.Expr, len(args)) + return e.exprFactory.NewSelect(copyID, op, sel.FieldName()) + case ast.CallKind: + call := expr.AsCall() + args := call.Args() + argsCopy := make([]ast.Expr, len(args)) for i, arg := range args { argsCopy[i] = e.Copy(arg) } - copy.ExprKind = &exprpb.Expr_CallExpr{ - CallExpr: &exprpb.Expr_Call{ - Function: call.GetFunction(), - Target: target, - Args: argsCopy, - }, + if !call.IsMemberFunction() { + return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...) } - case *exprpb.Expr_ListExpr: - elems := expr.GetListExpr().GetElements() - elemsCopy := make([]*exprpb.Expr, len(elems)) + return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...) + case ast.ListKind: + list := expr.AsList() + elems := list.Elements() + elemsCopy := make([]ast.Expr, len(elems)) for i, elem := range elems { elemsCopy[i] = e.Copy(elem) } - copy.ExprKind = &exprpb.Expr_ListExpr{ - ListExpr: &exprpb.Expr_CreateList{Elements: elemsCopy}, - } - case *exprpb.Expr_StructExpr: - entries := expr.GetStructExpr().GetEntries() - entriesCopy := make([]*exprpb.Expr_CreateStruct_Entry, len(entries)) - for i, entry := range entries { - entryCopy := &exprpb.Expr_CreateStruct_Entry{} - entryCopy.Id = e.nextMacroID() - switch entry.GetKeyKind().(type) { - case *exprpb.Expr_CreateStruct_Entry_FieldKey: - entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_FieldKey{ - FieldKey: entry.GetFieldKey(), - } - case *exprpb.Expr_CreateStruct_Entry_MapKey: - entryCopy.KeyKind = &exprpb.Expr_CreateStruct_Entry_MapKey{ - MapKey: e.Copy(entry.GetMapKey()), - } - } - entryCopy.Value = e.Copy(entry.GetValue()) - entriesCopy[i] = entryCopy - } - copy.ExprKind = &exprpb.Expr_StructExpr{ - StructExpr: &exprpb.Expr_CreateStruct{ - MessageName: expr.GetStructExpr().GetMessageName(), - Entries: entriesCopy, - }, + return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices()) + case ast.MapKind: + m := expr.AsMap() + entries := m.Entries() + entriesCopy := make([]ast.EntryExpr, len(entries)) + for i, en := range entries { + entry := en.AsMapEntry() + entryID := e.nextMacroID() + entriesCopy[i] = e.exprFactory.NewMapEntry(entryID, + e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional()) } - case *exprpb.Expr_ComprehensionExpr: - iterRange := e.Copy(expr.GetComprehensionExpr().GetIterRange()) - accuInit := e.Copy(expr.GetComprehensionExpr().GetAccuInit()) - cond := e.Copy(expr.GetComprehensionExpr().GetLoopCondition()) - step := e.Copy(expr.GetComprehensionExpr().GetLoopStep()) - result := e.Copy(expr.GetComprehensionExpr().GetResult()) - copy.ExprKind = &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterRange: iterRange, - IterVar: expr.GetComprehensionExpr().GetIterVar(), - AccuInit: accuInit, - AccuVar: expr.GetComprehensionExpr().GetAccuVar(), - LoopCondition: cond, - LoopStep: step, - Result: result, - }, + return e.exprFactory.NewMap(copyID, entriesCopy) + case ast.StructKind: + s := expr.AsStruct() + fields := s.Fields() + fieldsCopy := make([]ast.EntryExpr, len(fields)) + for i, f := range fields { + field := f.AsStructField() + fieldID := e.nextMacroID() + fieldsCopy[i] = e.exprFactory.NewStructField(fieldID, + field.Name(), e.Copy(field.Value()), field.IsOptional()) } + return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy) + case ast.ComprehensionKind: + compre := expr.AsComprehension() + iterRange := e.Copy(compre.IterRange()) + accuInit := e.Copy(compre.AccuInit()) + cond := e.Copy(compre.LoopCondition()) + step := e.Copy(compre.LoopStep()) + result := e.Copy(compre.Result()) + // All comprehensions can be represented by the two-variable comprehension since the + // differentiation between one and two-variable is whether the iterVar2 value is non-empty. + return e.exprFactory.NewComprehensionTwoVar(copyID, + iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result) } - return copy -} - -// LiteralBool implements the ExprHelper interface method. -func (e *exprHelper) LiteralBool(value bool) *exprpb.Expr { - return e.parserHelper.newLiteralBool(e.nextMacroID(), value) + return e.exprFactory.NewUnspecifiedExpr(copyID) } -// LiteralBytes implements the ExprHelper interface method. -func (e *exprHelper) LiteralBytes(value []byte) *exprpb.Expr { - return e.parserHelper.newLiteralBytes(e.nextMacroID(), value) -} - -// LiteralDouble implements the ExprHelper interface method. -func (e *exprHelper) LiteralDouble(value float64) *exprpb.Expr { - return e.parserHelper.newLiteralDouble(e.nextMacroID(), value) -} - -// LiteralInt implements the ExprHelper interface method. -func (e *exprHelper) LiteralInt(value int64) *exprpb.Expr { - return e.parserHelper.newLiteralInt(e.nextMacroID(), value) -} - -// LiteralString implements the ExprHelper interface method. -func (e *exprHelper) LiteralString(value string) *exprpb.Expr { - return e.parserHelper.newLiteralString(e.nextMacroID(), value) -} - -// LiteralUint implements the ExprHelper interface method. -func (e *exprHelper) LiteralUint(value uint64) *exprpb.Expr { - return e.parserHelper.newLiteralUint(e.nextMacroID(), value) +// NewLiteral implements the ExprHelper interface method. +func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr { + return e.exprFactory.NewLiteral(e.nextMacroID(), value) } // NewList implements the ExprHelper interface method. -func (e *exprHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newList(e.nextMacroID(), elems) +func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr { + return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{}) } // NewMap implements the ExprHelper interface method. -func (e *exprHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - return e.parserHelper.newMap(e.nextMacroID(), entries...) +func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewMap(e.nextMacroID(), entries) } // NewMapEntry implements the ExprHelper interface method. -func (e *exprHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return e.parserHelper.newMapEntry(e.nextMacroID(), key, val, optional) +func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional) } -// NewObject implements the ExprHelper interface method. -func (e *exprHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { - return e.parserHelper.newObject(e.nextMacroID(), typeName, fieldInits...) +// NewStruct implements the ExprHelper interface method. +func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits) } -// NewObjectFieldInit implements the ExprHelper interface method. -func (e *exprHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { - return e.parserHelper.newObjectField(e.nextMacroID(), field, init, optional) +// NewStructField implements the ExprHelper interface method. +func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional) } -// Fold implements the ExprHelper interface method. -func (e *exprHelper) Fold(iterVar string, - iterRange *exprpb.Expr, +// NewComprehension implements the ExprHelper interface method. +func (e *exprHelper) NewComprehension( + iterRange ast.Expr, + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehension( + e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +// NewComprehensionTwoVar implements the ExprHelper interface method. +func (e *exprHelper) NewComprehensionTwoVar( + iterRange ast.Expr, + iterVar, + iterVar2, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newComprehension( - e.nextMacroID(), iterVar, iterRange, accuVar, accuInit, condition, step, result) + accuInit, + condition, + step, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehensionTwoVar( + e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) } -// Ident implements the ExprHelper interface method. -func (e *exprHelper) Ident(name string) *exprpb.Expr { - return e.parserHelper.newIdent(e.nextMacroID(), name) +// NewIdent implements the ExprHelper interface method. +func (e *exprHelper) NewIdent(name string) ast.Expr { + return e.exprFactory.NewIdent(e.nextMacroID(), name) } -// AccuIdent implements the ExprHelper interface method. -func (e *exprHelper) AccuIdent() *exprpb.Expr { - return e.parserHelper.newIdent(e.nextMacroID(), AccumulatorName) +// NewAccuIdent implements the ExprHelper interface method. +func (e *exprHelper) NewAccuIdent() ast.Expr { + return e.exprFactory.NewAccuIdent(e.nextMacroID()) } -// GlobalCall implements the ExprHelper interface method. -func (e *exprHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newGlobalCall(e.nextMacroID(), function, args...) +// NewGlobalCall implements the ExprHelper interface method. +func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewCall(e.nextMacroID(), function, args...) } -// ReceiverCall implements the ExprHelper interface method. -func (e *exprHelper) ReceiverCall(function string, - target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { - return e.parserHelper.newReceiverCall(e.nextMacroID(), function, target, args...) +// NewMemberCall implements the ExprHelper interface method. +func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...) } -// PresenceTest implements the ExprHelper interface method. -func (e *exprHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { - return e.parserHelper.newPresenceTest(e.nextMacroID(), operand, field) +// NewPresenceTest implements the ExprHelper interface method. +func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field) } -// Select implements the ExprHelper interface method. -func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { - return e.parserHelper.newSelect(e.nextMacroID(), operand, field) +// NewSelect implements the ExprHelper interface method. +func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewSelect(e.nextMacroID(), operand, field) } // OffsetLocation implements the ExprHelper interface method. func (e *exprHelper) OffsetLocation(exprID int64) common.Location { - offset, found := e.parserHelper.positions[exprID] - if !found { - return common.NoLocation - } - location, found := e.parserHelper.source.OffsetLocation(offset) - if !found { - return common.NoLocation - } - return location + return e.parserHelper.sourceInfo.GetStartLocation(exprID) } // NewError associates an error message with a given expression id, populating the source offset location of the error if possible. diff --git a/constraint/vendor/github.com/google/cel-go/parser/input.go b/constraint/vendor/github.com/google/cel-go/parser/input.go index 810eaff21..44792455d 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/input.go +++ b/constraint/vendor/github.com/google/cel-go/parser/input.go @@ -15,7 +15,7 @@ package parser import ( - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common/runes" ) @@ -110,7 +110,7 @@ func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string { } // GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval. -func (c *charStream) GetTextFromInterval(i *antlr.Interval) string { +func (c *charStream) GetTextFromInterval(i antlr.Interval) string { return c.GetText(i.Start, i.Stop) } diff --git a/constraint/vendor/github.com/google/cel-go/parser/macro.go b/constraint/vendor/github.com/google/cel-go/parser/macro.go index 6066e8ef4..dc47b4203 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/macro.go +++ b/constraint/vendor/github.com/google/cel-go/parser/macro.go @@ -18,9 +18,10 @@ import ( "fmt" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" ) // NewGlobalMacro creates a Macro for a global function with the specified arg count. @@ -142,58 +143,63 @@ func makeVarArgMacroKey(name string, receiverStyle bool) string { // and produces as output an Expr ast node. // // Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. -type MacroExpander func(eh ExprHelper, - target *exprpb.Expr, - args []*exprpb.Expr) (*exprpb.Expr, *common.Error) +type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) -// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is -// consistent with the source position and expression id generation code leveraged by both -// the parser and type-checker. +// ExprHelper assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. type ExprHelper interface { // Copy the input expression with a brand new set of identifiers. - Copy(*exprpb.Expr) *exprpb.Expr - - // LiteralBool creates an Expr value for a bool literal. - LiteralBool(value bool) *exprpb.Expr - - // LiteralBytes creates an Expr value for a byte literal. - LiteralBytes(value []byte) *exprpb.Expr - - // LiteralDouble creates an Expr value for double literal. - LiteralDouble(value float64) *exprpb.Expr - - // LiteralInt creates an Expr value for an int literal. - LiteralInt(value int64) *exprpb.Expr - - // LiteralString creates am Expr value for a string literal. - LiteralString(value string) *exprpb.Expr + Copy(ast.Expr) ast.Expr - // LiteralUint creates an Expr value for a uint literal. - LiteralUint(value uint64) *exprpb.Expr + // Literal creates an Expr value for a scalar literal value. + NewLiteral(value ref.Val) ast.Expr - // NewList creates a CreateList instruction where the list is comprised of the optional set - // of elements provided as arguments. - NewList(elems ...*exprpb.Expr) *exprpb.Expr + // NewList creates a list literal instruction with an optional set of elements. + NewList(elems ...ast.Expr) ast.Expr // NewMap creates a CreateStruct instruction for a map where the map is comprised of the // optional set of key, value entries. - NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + NewMap(entries ...ast.EntryExpr) ast.Expr // NewMapEntry creates a Map Entry for the key, value pair. - NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr - // NewObject creates a CreateStruct instruction for an object with a given type name and - // optional set of field initializers. - NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + // NewStruct creates a struct literal expression with an optional set of field initializers. + NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr - // NewObjectFieldInit creates a new Object field initializer from the field name and value. - NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + // NewStructField creates a new struct field initializer from the field name and value. + NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr + + // NewComprehension creates a new one-variable comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the variable name for the list element value, or the map key, depending on the + // range type. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + NewComprehension(iterRange ast.Expr, + iterVar, + accuVar string, + accuInit, + condition, + step, + result ast.Expr) ast.Expr - // Fold creates a fold comprehension instruction. + // NewComprehensionTwoVar creates a new two-variable comprehension instruction. // - // - iterVar is the iteration variable name. // - iterRange represents the expression that resolves to a list or map where the elements or // keys (respectively) will be iterated over. + // - iterVar is the iteration variable assigned to the list index or the map key. + // - iterVar2 is the iteration variable assigned to the list element value or the map key value. // - accuVar is the accumulation variable name, typically parser.AccumulatorName. // - accuInit is the initial expression whose value will be set for the accuVar prior to // folding. @@ -204,31 +210,32 @@ type ExprHelper interface { // The accuVar should not shadow variable names that you would like to reference within the // environment in the step and condition expressions. Presently, the name __result__ is commonly // used by built-in macros but this may change in the future. - Fold(iterVar string, - iterRange *exprpb.Expr, + NewComprehensionTwoVar(iterRange ast.Expr, + iterVar, + iterVar2, accuVar string, - accuInit *exprpb.Expr, - condition *exprpb.Expr, - step *exprpb.Expr, - result *exprpb.Expr) *exprpb.Expr + accuInit, + condition, + step, + result ast.Expr) ast.Expr - // Ident creates an identifier Expr value. - Ident(name string) *exprpb.Expr + // NewIdent creates an identifier Expr value. + NewIdent(name string) ast.Expr - // AccuIdent returns an accumulator identifier for use with comprehension results. - AccuIdent() *exprpb.Expr + // NewAccuIdent returns an accumulator identifier for use with comprehension results. + NewAccuIdent() ast.Expr - // GlobalCall creates a function call Expr value for a global (free) function. - GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + // NewCall creates a function call Expr value for a global (free) function. + NewCall(function string, args ...ast.Expr) ast.Expr - // ReceiverCall creates a function call Expr value for a receiver-style function. - ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + // NewMemberCall creates a function call Expr value for a receiver-style function. + NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr - // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. - PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + NewPresenceTest(operand ast.Expr, field string) ast.Expr - // Select create a field traversal Expr value. - Select(operand *exprpb.Expr, field string) *exprpb.Expr + // NewSelect create a field traversal Expr value. + NewSelect(operand ast.Expr, field string) ast.Expr // OffsetLocation returns the Location of the expression identifier. OffsetLocation(exprID int64) common.Location @@ -296,21 +303,21 @@ const ( // MakeAll expands the input call arguments into a comprehension that returns true if all of the // elements in the range match the predicate expressions: // .all(, ) -func MakeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierAll, eh, target, args) } // MakeExists expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) -func MakeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierExists, eh, target, args) } // MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) -func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { return makeQuantifier(quantifierExistsOne, eh, target, args) } @@ -324,14 +331,14 @@ func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*ex // // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. -func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } - var fn *exprpb.Expr - var filter *exprpb.Expr + var fn ast.Expr + var filter ast.Expr if len(args) == 3 { filter = args[1] @@ -341,84 +348,81 @@ func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E fn = args[1] } - accuExpr := eh.Ident(AccumulatorName) init := eh.NewList() - condition := eh.LiteralBool(true) - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn)) + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(fn)) if filter != nil { - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) } - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil } // MakeFilter expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) -func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") } filter := args[1] - accuExpr := eh.Ident(AccumulatorName) init := eh.NewList() - condition := eh.LiteralBool(true) - step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0])) - step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr) - return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil } // MakeHas expands the input call arguments into a presence test, e.g. has(.field) -func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { - if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok { - return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil +func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + if args[0].Kind() == ast.SelectKind { + s := args[0].AsSelect() + return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil } - return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro") + return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro") } -func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { +func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument must be a simple name") + return nil, eh.NewError(args[0].ID(), "argument must be a simple name") } - var init *exprpb.Expr - var condition *exprpb.Expr - var step *exprpb.Expr - var result *exprpb.Expr + var init ast.Expr + var condition ast.Expr + var step ast.Expr + var result ast.Expr switch kind { case quantifierAll: - init = eh.LiteralBool(true) - condition = eh.GlobalCall(operators.NotStrictlyFalse, eh.AccuIdent()) - step = eh.GlobalCall(operators.LogicalAnd, eh.AccuIdent(), args[1]) - result = eh.AccuIdent() + init = eh.NewLiteral(types.True) + condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent()) + step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() case quantifierExists: - init = eh.LiteralBool(false) - condition = eh.GlobalCall( + init = eh.NewLiteral(types.False) + condition = eh.NewCall( operators.NotStrictlyFalse, - eh.GlobalCall(operators.LogicalNot, eh.AccuIdent())) - step = eh.GlobalCall(operators.LogicalOr, eh.AccuIdent(), args[1]) - result = eh.AccuIdent() + eh.NewCall(operators.LogicalNot, eh.NewAccuIdent())) + step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() case quantifierExistsOne: - zeroExpr := eh.LiteralInt(0) - oneExpr := eh.LiteralInt(1) - init = zeroExpr - condition = eh.LiteralBool(true) - step = eh.GlobalCall(operators.Conditional, args[1], - eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent()) - result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr) + init = eh.NewLiteral(types.Int(0)) + condition = eh.NewLiteral(types.True) + step = eh.NewCall(operators.Conditional, args[1], + eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))) default: - return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) } - return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil } -func extractIdent(e *exprpb.Expr) (string, bool) { - switch e.ExprKind.(type) { - case *exprpb.Expr_IdentExpr: - return e.GetIdentExpr().GetName(), true +func extractIdent(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + return e.AsIdent(), true } return "", false } diff --git a/constraint/vendor/github.com/google/cel-go/parser/parser.go b/constraint/vendor/github.com/google/cel-go/parser/parser.go index 109326a93..5cbb17672 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/parser.go +++ b/constraint/vendor/github.com/google/cel-go/parser/parser.go @@ -21,17 +21,15 @@ import ( "regexp" "strconv" "strings" - "sync" - antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4" + antlr "github.com/antlr4-go/antlr/v4" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/runes" + "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser/gen" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - structpb "google.golang.org/protobuf/types/known/structpb" ) // Parser encapsulates the context necessary to perform parsing for different expressions. @@ -88,11 +86,13 @@ func mustNewParser(opts ...Option) *Parser { } // Parse parses the expression represented by source and returns the result. -func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { +func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { errs := common.NewErrors(source) + fac := ast.NewExprFactory() impl := parser{ errors: &parseErrors{errs}, - helper: newParserHelper(source), + exprFactory: fac, + helper: newParserHelper(source, fac), macros: p.macros, maxRecursionDepth: p.maxRecursionDepth, errorReportingLimit: p.errorReportingLimit, @@ -106,18 +106,15 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors if !ok { buf = runes.NewBuffer(source.Content()) } - var e *exprpb.Expr + var out ast.Expr if buf.Len() > p.expressionSizeCodePointLimit { - e = impl.reportError(common.NoLocation, + out = impl.reportError(common.NoLocation, "expression code point size exceeds limit: size: %d, limit %d", buf.Len(), p.expressionSizeCodePointLimit) } else { - e = impl.parse(buf, source.Description()) + out = impl.parse(buf, source.Description()) } - return &exprpb.ParsedExpr{ - Expr: e, - SourceInfo: impl.helper.getSourceInfo(), - }, errs + return ast.NewAST(out, impl.helper.getSourceInfo()), errs } // reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid @@ -150,7 +147,7 @@ var reservedIds = map[string]struct{}{ // This function calls ParseWithMacros with AllMacros. // // Deprecated: Use NewParser().Parse() instead. -func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { +func Parse(source common.Source) (*ast.AST, *common.Errors) { return mustNewParser(Macros(AllMacros...)).Parse(source) } @@ -287,6 +284,7 @@ var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{} type parser struct { gen.BaseCELVisitor errors *parseErrors + exprFactory ast.ExprFactory helper *parserHelper macros map[string]Macro recursionDepth int @@ -300,53 +298,21 @@ type parser struct { enableVariadicOperatorASTs bool } -var ( - _ gen.CELVisitor = (*parser)(nil) - - lexerPool *sync.Pool = &sync.Pool{ - New: func() any { - l := gen.NewCELLexer(nil) - l.RemoveErrorListeners() - return l - }, - } +var _ gen.CELVisitor = (*parser)(nil) - parserPool *sync.Pool = &sync.Pool{ - New: func() any { - p := gen.NewCELParser(nil) - p.RemoveErrorListeners() - return p - }, - } -) +func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr { + lexer := gen.NewCELLexer(newCharStream(expr, desc)) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(p) -func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { - // TODO: get rid of these pools once https://github.com/antlr/antlr4/pull/3571 is in a release - lexer := lexerPool.Get().(*gen.CELLexer) - prsr := parserPool.Get().(*gen.CELParser) + prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0)) + prsr.RemoveErrorListeners() prsrListener := &recursionListener{ maxDepth: p.maxRecursionDepth, ruleTypeDepth: map[int]*int{}, } - defer func() { - // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, - // so this is good enough until that is exported. - // Reset the lexer and parser before putting them back in the pool. - lexer.RemoveErrorListeners() - prsr.RemoveParseListener(prsrListener) - prsr.RemoveErrorListeners() - lexer.SetInputStream(nil) - prsr.SetInputStream(nil) - lexerPool.Put(lexer) - parserPool.Put(prsr) - }() - - lexer.SetInputStream(newCharStream(expr, desc)) - prsr.SetInputStream(antlr.NewCommonTokenStream(lexer, 0)) - - lexer.AddErrorListener(p) prsr.AddErrorListener(p) prsr.AddParseListener(prsrListener) @@ -373,7 +339,7 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { } }() - return p.Visit(prsr.Start()).(*exprpb.Expr) + return p.Visit(prsr.Start_()).(ast.Expr) } // Visitor implementations. @@ -470,26 +436,26 @@ func (p *parser) VisitStart(ctx *gen.StartContext) any { // Visit a parse tree produced by CELParser#expr. func (p *parser) VisitExpr(ctx *gen.ExprContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) if ctx.GetOp() == nil { return result } opID := p.helper.id(ctx.GetOp()) - ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr) - ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr) + ifTrue := p.Visit(ctx.GetE1()).(ast.Expr) + ifFalse := p.Visit(ctx.GetE2()).(ast.Expr) return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse) } // Visit a parse tree produced by CELParser#conditionalOr. func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) l := p.newLogicManager(operators.LogicalOr, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { return p.reportError(ctx, "unexpected character, wanted '||'") } - next := p.Visit(rest[i]).(*exprpb.Expr) + next := p.Visit(rest[i]).(ast.Expr) opID := p.helper.id(op) l.addTerm(opID, next) } @@ -498,14 +464,14 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { // Visit a parse tree produced by CELParser#conditionalAnd. func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { - result := p.Visit(ctx.GetE()).(*exprpb.Expr) + result := p.Visit(ctx.GetE()).(ast.Expr) l := p.newLogicManager(operators.LogicalAnd, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { return p.reportError(ctx, "unexpected character, wanted '&&'") } - next := p.Visit(rest[i]).(*exprpb.Expr) + next := p.Visit(rest[i]).(ast.Expr) opID := p.helper.id(op) l.addTerm(opID, next) } @@ -519,9 +485,9 @@ func (p *parser) VisitRelation(ctx *gen.RelationContext) any { opText = ctx.GetOp().GetText() } if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr) + lhs := p.Visit(ctx.Relation(0)).(ast.Expr) opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr) + rhs := p.Visit(ctx.Relation(1)).(ast.Expr) return p.globalCallOrMacro(opID, op, lhs, rhs) } return p.reportError(ctx, "operator not found") @@ -534,9 +500,9 @@ func (p *parser) VisitCalc(ctx *gen.CalcContext) any { opText = ctx.GetOp().GetText() } if op, found := operators.Find(opText); found { - lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr) + lhs := p.Visit(ctx.Calc(0)).(ast.Expr) opID := p.helper.id(ctx.GetOp()) - rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr) + rhs := p.Visit(ctx.Calc(1)).(ast.Expr) return p.globalCallOrMacro(opID, op, lhs, rhs) } return p.reportError(ctx, "operator not found") @@ -552,7 +518,7 @@ func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any { return p.Visit(ctx.Member()) } opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) return p.globalCallOrMacro(opID, operators.LogicalNot, target) } @@ -561,13 +527,13 @@ func (p *parser) VisitNegate(ctx *gen.NegateContext) any { return p.Visit(ctx.Member()) } opID := p.helper.id(ctx.GetOps()[0]) - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) return p.globalCallOrMacro(opID, operators.Negate, target) } // VisitSelect visits a parse tree produced by CELParser#Select. func (p *parser) VisitSelect(ctx *gen.SelectContext) any { - operand := p.Visit(ctx.Member()).(*exprpb.Expr) + operand := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetId() == nil || ctx.GetOp() == nil { return p.helper.newExpr(ctx) @@ -588,7 +554,7 @@ func (p *parser) VisitSelect(ctx *gen.SelectContext) any { // VisitMemberCall visits a parse tree produced by CELParser#MemberCall. func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { - operand := p.Visit(ctx.Member()).(*exprpb.Expr) + operand := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetId() == nil { return p.helper.newExpr(ctx) @@ -600,13 +566,13 @@ func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { // Visit a parse tree produced by CELParser#Index. func (p *parser) VisitIndex(ctx *gen.IndexContext) any { - target := p.Visit(ctx.Member()).(*exprpb.Expr) + target := p.Visit(ctx.Member()).(ast.Expr) // Handle the error case where no valid identifier is specified. if ctx.GetOp() == nil { return p.helper.newExpr(ctx) } opID := p.helper.id(ctx.GetOp()) - index := p.Visit(ctx.GetIndex()).(*exprpb.Expr) + index := p.Visit(ctx.GetIndex()).(ast.Expr) operator := operators.Index if ctx.GetOpt() != nil { if !p.enableOptionalSyntax { @@ -630,7 +596,7 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { messageName = "." + messageName } objID := p.helper.id(ctx.GetOp()) - entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) + entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr) return p.helper.newObject(objID, messageName, entries...) } @@ -638,16 +604,16 @@ func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any { if ctx == nil || ctx.GetFields() == nil { // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields())) + result := make([]ast.EntryExpr, len(ctx.GetFields())) cols := ctx.GetCols() vals := ctx.GetValues() for i, f := range ctx.GetFields() { if i >= len(cols) || i >= len(vals) { // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } initID := p.helper.id(cols[i]) optField := f.(*gen.OptFieldContext) @@ -659,10 +625,10 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext // The field may be empty due to a prior error. id := optField.IDENTIFIER() if id == nil { - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } fieldName := id.GetText() - value := p.Visit(vals[i]).(*exprpb.Expr) + value := p.Visit(vals[i]).(ast.Expr) field := p.helper.newObjectField(initID, fieldName, value, optional) result[i] = field } @@ -702,9 +668,9 @@ func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { // Visit a parse tree produced by CELParser#CreateStruct. func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { structID := p.helper.id(ctx.GetOp()) - entries := []*exprpb.Expr_CreateStruct_Entry{} + entries := []ast.EntryExpr{} if ctx.GetEntries() != nil { - entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry) + entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr) } return p.helper.newMap(structID, entries...) } @@ -713,17 +679,17 @@ func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any { if ctx == nil || ctx.GetKeys() == nil { // This is the result of a syntax error handled elswhere, return empty. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } - result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols())) + result := make([]ast.EntryExpr, len(ctx.GetCols())) keys := ctx.GetKeys() vals := ctx.GetValues() for i, col := range ctx.GetCols() { colID := p.helper.id(col) if i >= len(keys) || i >= len(vals) { // This is the result of a syntax error detected elsewhere. - return []*exprpb.Expr_CreateStruct_Entry{} + return []ast.EntryExpr{} } optKey := keys[i] optional := optKey.GetOpt() != nil @@ -731,8 +697,8 @@ func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any p.reportError(optKey, "unsupported syntax '?'") continue } - key := p.Visit(optKey.GetE()).(*exprpb.Expr) - value := p.Visit(vals[i]).(*exprpb.Expr) + key := p.Visit(optKey.GetE()).(ast.Expr) + value := p.Visit(vals[i]).(ast.Expr) entry := p.helper.newMapEntry(colID, key, value, optional) result[i] = entry } @@ -812,30 +778,27 @@ func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any { // Visit a parse tree produced by CELParser#Null. func (p *parser) VisitNull(ctx *gen.NullContext) any { - return p.helper.newLiteral(ctx, - &exprpb.Constant{ - ConstantKind: &exprpb.Constant_NullValue{ - NullValue: structpb.NullValue_NULL_VALUE}}) + return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue) } -func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr { +func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr { if ctx == nil { - return []*exprpb.Expr{} + return []ast.Expr{} } return p.visitSlice(ctx.GetE()) } -func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) { +func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) { if ctx == nil { - return []*exprpb.Expr{}, []int32{} + return []ast.Expr{}, []int32{} } elements := ctx.GetElems() - result := make([]*exprpb.Expr, len(elements)) + result := make([]ast.Expr, len(elements)) optionals := []int32{} for i, e := range elements { - ex := p.Visit(e.GetE()).(*exprpb.Expr) + ex := p.Visit(e.GetE()).(ast.Expr) if ex == nil { - return []*exprpb.Expr{}, []int32{} + return []ast.Expr{}, []int32{} } result[i] = ex if e.GetOpt() != nil { @@ -849,13 +812,13 @@ func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int3 return result, optionals } -func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr { +func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr { if expressions == nil { - return []*exprpb.Expr{} + return []ast.Expr{} } - result := make([]*exprpb.Expr, len(expressions)) + result := make([]ast.Expr, len(expressions)) for i, e := range expressions { - ex := p.Visit(e).(*exprpb.Expr) + ex := p.Visit(e).(ast.Expr) result[i] = ex } return result @@ -870,30 +833,31 @@ func (p *parser) unquote(ctx any, value string, isBytes bool) string { return text } -func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager { +func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager { if p.enableVariadicOperatorASTs { - return newVariadicLogicManager(p.helper, function, term) + return newVariadicLogicManager(p.exprFactory, function, term) } - return newBalancingLogicManager(p.helper, function, term) + return newBalancingLogicManager(p.exprFactory, function, term) } -func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr { +func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { var location common.Location err := p.helper.newExpr(ctx) switch c := ctx.(type) { case common.Location: location = c case antlr.Token, antlr.ParserRuleContext: - location = p.helper.getLocation(err.GetId()) + location = p.helper.getLocation(err.ID()) } // Provide arguments to the report error. - p.errors.reportErrorAtID(err.GetId(), location, format, args...) + p.errors.reportErrorAtID(err.ID(), location, format, args...) return err } // ANTLR Parse listener implementations func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { - l := p.helper.source.NewLocation(line, column) + offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) + l := p.helper.getLocationByOffset(offset) // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error // messages out of ANTLR to prevent future breaking changes related to error message content. @@ -912,33 +876,33 @@ func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, l } } -func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) { +func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) { +func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) { // Intentional } -func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr { +func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr { if expr, found := p.expandMacro(exprID, function, nil, args...); found { return expr } return p.helper.newGlobalCall(exprID, function, args...) } -func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { +func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr { if expr, found := p.expandMacro(exprID, function, target, args...); found { return expr } return p.helper.newReceiverCall(exprID, function, target, args...) } -func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) { +func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) { macro, found := p.macros[makeMacroKey(function, len(args), target != nil)] if !found { macro, found = p.macros[makeVarArgMacroKey(function, target != nil)] @@ -953,10 +917,12 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, expr, err := macro.Expander()(eh, target, args) // An error indicates that the macro was matched, but the arguments were not well-formed. if err != nil { - if err.Location != nil { - return p.reportError(err.Location, err.Message), true + loc := err.Location + if loc == nil { + loc = p.helper.getLocation(exprID) } - return p.reportError(p.helper.getLocation(exprID), err.Message), true + p.helper.deleteID(exprID) + return p.reportError(loc, err.Message), true } // A nil value from the macro indicates that the macro implementation decided that // an expansion should not be performed. @@ -964,8 +930,9 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, return nil, false } if p.populateMacroCalls { - p.helper.addMacroCall(expr.GetId(), function, target, args...) + p.helper.addMacroCall(expr.ID(), function, target, args...) } + p.helper.deleteID(exprID) return expr, true } diff --git a/constraint/vendor/github.com/google/cel-go/parser/unparser.go b/constraint/vendor/github.com/google/cel-go/parser/unparser.go index c3c40a0dd..91cf72944 100644 --- a/constraint/vendor/github.com/google/cel-go/parser/unparser.go +++ b/constraint/vendor/github.com/google/cel-go/parser/unparser.go @@ -20,9 +20,9 @@ import ( "strconv" "strings" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/operators" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "github.com/google/cel-go/common/types" ) // Unparse takes an input expression and source position information and generates a human-readable @@ -39,7 +39,7 @@ import ( // // This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as // performing word wrapping on expressions. -func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) (string, error) { +func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) { unparserOpts := &unparserOption{ wrapOnColumn: defaultWrapOnColumn, wrapAfterColumnLimit: defaultWrapAfterColumnLimit, @@ -68,12 +68,12 @@ func Unparse(expr *exprpb.Expr, info *exprpb.SourceInfo, opts ...UnparserOption) // unparser visits an expression to reconstruct a human-readable string from an AST. type unparser struct { str strings.Builder - info *exprpb.SourceInfo + info *ast.SourceInfo options *unparserOption lastWrappedIndex int } -func (un *unparser) visit(expr *exprpb.Expr) error { +func (un *unparser) visit(expr ast.Expr) error { if expr == nil { return errors.New("unsupported expression") } @@ -81,27 +81,29 @@ func (un *unparser) visit(expr *exprpb.Expr) error { if visited || err != nil { return err } - switch expr.GetExprKind().(type) { - case *exprpb.Expr_CallExpr: + switch expr.Kind() { + case ast.CallKind: return un.visitCall(expr) - case *exprpb.Expr_ConstExpr: + case ast.LiteralKind: return un.visitConst(expr) - case *exprpb.Expr_IdentExpr: + case ast.IdentKind: return un.visitIdent(expr) - case *exprpb.Expr_ListExpr: + case ast.ListKind: return un.visitList(expr) - case *exprpb.Expr_SelectExpr: + case ast.MapKind: + return un.visitStructMap(expr) + case ast.SelectKind: return un.visitSelect(expr) - case *exprpb.Expr_StructExpr: - return un.visitStruct(expr) + case ast.StructKind: + return un.visitStructMsg(expr) default: return fmt.Errorf("unsupported expression: %v", expr) } } -func (un *unparser) visitCall(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() +func (un *unparser) visitCall(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() switch fun { // ternary operator case operators.Conditional: @@ -141,10 +143,10 @@ func (un *unparser) visitCall(expr *exprpb.Expr) error { } } -func (un *unparser) visitCallBinary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() +func (un *unparser) visitCallBinary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() lhs := args[0] // add parens if the current operator is lower precedence than the lhs expr operator. lhsParen := isComplexOperatorWithRespectTo(fun, lhs) @@ -168,9 +170,9 @@ func (un *unparser) visitCallBinary(expr *exprpb.Expr) error { return un.visitMaybeNested(rhs, rhsParen) } -func (un *unparser) visitCallConditional(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitCallConditional(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() // add parens if operand is a conditional itself. nested := isSamePrecedence(operators.Conditional, args[0]) || isComplexOperator(args[0]) @@ -196,13 +198,13 @@ func (un *unparser) visitCallConditional(expr *exprpb.Expr) error { return un.visitMaybeNested(args[2], nested) } -func (un *unparser) visitCallFunc(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() - if c.GetTarget() != nil { - nested := isBinaryOrTernaryOperator(c.GetTarget()) - err := un.visitMaybeNested(c.GetTarget(), nested) +func (un *unparser) visitCallFunc(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + if c.IsMemberFunction() { + nested := isBinaryOrTernaryOperator(c.Target()) + err := un.visitMaybeNested(c.Target(), nested) if err != nil { return err } @@ -223,17 +225,17 @@ func (un *unparser) visitCallFunc(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitCallIndex(expr *exprpb.Expr) error { +func (un *unparser) visitCallIndex(expr ast.Expr) error { return un.visitCallIndexInternal(expr, "[") } -func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error { +func (un *unparser) visitCallOptIndex(expr ast.Expr) error { return un.visitCallIndexInternal(expr, "[?") } -func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error { + c := expr.AsCall() + args := c.Args() nested := isBinaryOrTernaryOperator(args[0]) err := un.visitMaybeNested(args[0], nested) if err != nil { @@ -248,10 +250,10 @@ func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error { return nil } -func (un *unparser) visitCallUnary(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - fun := c.GetFunction() - args := c.GetArgs() +func (un *unparser) visitCallUnary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() unmangled, found := operators.FindReverse(fun) if !found { return fmt.Errorf("cannot unmangle operator: %s", fun) @@ -261,35 +263,34 @@ func (un *unparser) visitCallUnary(expr *exprpb.Expr) error { return un.visitMaybeNested(args[0], nested) } -func (un *unparser) visitConst(expr *exprpb.Expr) error { - c := expr.GetConstExpr() - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - un.str.WriteString(strconv.FormatBool(c.GetBoolValue())) - case *exprpb.Constant_BytesValue: +func (un *unparser) visitConst(expr ast.Expr) error { + val := expr.AsLiteral() + switch val := val.(type) { + case types.Bool: + un.str.WriteString(strconv.FormatBool(bool(val))) + case types.Bytes: // bytes constants are surrounded with b"" - b := c.GetBytesValue() un.str.WriteString(`b"`) - un.str.WriteString(bytesToOctets(b)) + un.str.WriteString(bytesToOctets([]byte(val))) un.str.WriteString(`"`) - case *exprpb.Constant_DoubleValue: + case types.Double: // represent the float using the minimum required digits - d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64) + d := strconv.FormatFloat(float64(val), 'g', -1, 64) un.str.WriteString(d) if !strings.Contains(d, ".") { un.str.WriteString(".0") } - case *exprpb.Constant_Int64Value: - i := strconv.FormatInt(c.GetInt64Value(), 10) + case types.Int: + i := strconv.FormatInt(int64(val), 10) un.str.WriteString(i) - case *exprpb.Constant_NullValue: + case types.Null: un.str.WriteString("null") - case *exprpb.Constant_StringValue: + case types.String: // strings will be double quoted with quotes escaped. - un.str.WriteString(strconv.Quote(c.GetStringValue())) - case *exprpb.Constant_Uint64Value: + un.str.WriteString(strconv.Quote(string(val))) + case types.Uint: // uint literals have a 'u' suffix. - ui := strconv.FormatUint(c.GetUint64Value(), 10) + ui := strconv.FormatUint(uint64(val), 10) un.str.WriteString(ui) un.str.WriteString("u") default: @@ -298,16 +299,16 @@ func (un *unparser) visitConst(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitIdent(expr *exprpb.Expr) error { - un.str.WriteString(expr.GetIdentExpr().GetName()) +func (un *unparser) visitIdent(expr ast.Expr) error { + un.str.WriteString(expr.AsIdent()) return nil } -func (un *unparser) visitList(expr *exprpb.Expr) error { - l := expr.GetListExpr() - elems := l.GetElements() +func (un *unparser) visitList(expr ast.Expr) error { + l := expr.AsList() + elems := l.Elements() optIndices := make(map[int]bool, len(elems)) - for _, idx := range l.GetOptionalIndices() { + for _, idx := range l.OptionalIndices() { optIndices[int(idx)] = true } un.str.WriteString("[") @@ -327,20 +328,20 @@ func (un *unparser) visitList(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitOptSelect(expr *exprpb.Expr) error { - c := expr.GetCallExpr() - args := c.GetArgs() +func (un *unparser) visitOptSelect(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() operand := args[0] - field := args[1].GetConstExpr().GetStringValue() - return un.visitSelectInternal(operand, false, ".?", field) + field := args[1].AsLiteral().(types.String) + return un.visitSelectInternal(operand, false, ".?", string(field)) } -func (un *unparser) visitSelect(expr *exprpb.Expr) error { - sel := expr.GetSelectExpr() - return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField()) +func (un *unparser) visitSelect(expr ast.Expr) error { + sel := expr.AsSelect() + return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName()) } -func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error { +func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error { // handle the case when the select expression was generated by the has() macro. if testOnly { un.str.WriteString("has(") @@ -358,34 +359,25 @@ func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op return nil } -func (un *unparser) visitStruct(expr *exprpb.Expr) error { - s := expr.GetStructExpr() - // If the message name is non-empty, then this should be treated as message construction. - if s.GetMessageName() != "" { - return un.visitStructMsg(expr) - } - // Otherwise, build a map. - return un.visitStructMap(expr) -} - -func (un *unparser) visitStructMsg(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() - un.str.WriteString(m.GetMessageName()) +func (un *unparser) visitStructMsg(expr ast.Expr) error { + m := expr.AsStruct() + fields := m.Fields() + un.str.WriteString(m.TypeName()) un.str.WriteString("{") - for i, entry := range entries { - f := entry.GetFieldKey() - if entry.GetOptionalEntry() { + for i, f := range fields { + field := f.AsStructField() + f := field.Name() + if field.IsOptional() { un.str.WriteString("?") } un.str.WriteString(f) un.str.WriteString(": ") - v := entry.GetValue() + v := field.Value() err := un.visit(v) if err != nil { return err } - if i < len(entries)-1 { + if i < len(fields)-1 { un.str.WriteString(", ") } } @@ -393,13 +385,14 @@ func (un *unparser) visitStructMsg(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitStructMap(expr *exprpb.Expr) error { - m := expr.GetStructExpr() - entries := m.GetEntries() +func (un *unparser) visitStructMap(expr ast.Expr) error { + m := expr.AsMap() + entries := m.Entries() un.str.WriteString("{") - for i, entry := range entries { - k := entry.GetMapKey() - if entry.GetOptionalEntry() { + for i, e := range entries { + entry := e.AsMapEntry() + k := entry.Key() + if entry.IsOptional() { un.str.WriteString("?") } err := un.visit(k) @@ -407,7 +400,7 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error { return err } un.str.WriteString(": ") - v := entry.GetValue() + v := entry.Value() err = un.visit(v) if err != nil { return err @@ -420,16 +413,15 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error { return nil } -func (un *unparser) visitMaybeMacroCall(expr *exprpb.Expr) (bool, error) { - macroCalls := un.info.GetMacroCalls() - call, found := macroCalls[expr.GetId()] +func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) { + call, found := un.info.GetMacroCall(expr.ID()) if !found { return false, nil } return true, un.visit(call) } -func (un *unparser) visitMaybeNested(expr *exprpb.Expr, nested bool) error { +func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error { if nested { un.str.WriteString("(") } @@ -453,12 +445,12 @@ func isLeftRecursive(op string) bool { // precedence of the (possible) operation represented in the input Expr. // // If the expr is not a Call, the result is false. -func isSamePrecedence(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil { +func isSamePrecedence(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind { return false } - c := expr.GetCallExpr() - other := c.GetFunction() + c := expr.AsCall() + other := c.FunctionName() return operators.Precedence(op) == operators.Precedence(other) } @@ -466,16 +458,16 @@ func isSamePrecedence(op string, expr *exprpb.Expr) bool { // than the (possible) operation represented in the input Expr. // // If the expr is not a Call, the result is false. -func isLowerPrecedence(op string, expr *exprpb.Expr) bool { - c := expr.GetCallExpr() - other := c.GetFunction() +func isLowerPrecedence(op string, expr ast.Expr) bool { + c := expr.AsCall() + other := c.FunctionName() return operators.Precedence(op) < operators.Precedence(other) } // Indicates whether the expr is a complex operator, i.e., a call expression // with 2 or more arguments. -func isComplexOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() != nil && len(expr.GetCallExpr().GetArgs()) >= 2 { +func isComplexOperator(expr ast.Expr) bool { + if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 { return true } return false @@ -484,19 +476,19 @@ func isComplexOperator(expr *exprpb.Expr) bool { // Indicates whether it is a complex operation compared to another. // expr is *not* considered complex if it is not a call expression or has // less than two arguments, or if it has a higher precedence than op. -func isComplexOperatorWithRespectTo(op string, expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { +func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { return false } return isLowerPrecedence(op, expr) } // Indicate whether this is a binary or ternary operator. -func isBinaryOrTernaryOperator(expr *exprpb.Expr) bool { - if expr.GetCallExpr() == nil || len(expr.GetCallExpr().GetArgs()) < 2 { +func isBinaryOrTernaryOperator(expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { return false } - _, isBinaryOp := operators.FindReverseBinaryOperator(expr.GetCallExpr().GetFunction()) + _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName()) return isBinaryOp || isSamePrecedence(operators.Conditional, expr) } diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c..a65d88eb8 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e4478..2f2b34243 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb..2e50082ad 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 568299869..41cd4f503 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is @@ -93,6 +108,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +116,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0b..2fcd7af3c 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4..0fa907656 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,33 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +178,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -181,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } @@ -201,8 +220,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff4..b1dfc37af 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e8..07c28112c 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e438..60c2065dd 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f2..f710036b3 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b..0a1ca7e06 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de486..38ca39cc5 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776..66aa5f2dc 100644 --- a/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/constraint/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/constraint/vendor/github.com/imdario/mergo/.deepsource.toml b/constraint/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/constraint/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/constraint/vendor/github.com/imdario/mergo/.gitignore b/constraint/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/constraint/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/constraint/vendor/github.com/imdario/mergo/.travis.yml b/constraint/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/constraint/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/constraint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/constraint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/constraint/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/constraint/vendor/github.com/imdario/mergo/LICENSE b/constraint/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/constraint/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/constraint/vendor/github.com/imdario/mergo/README.md b/constraint/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 7e6f7aeee..000000000 --- a/constraint/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,235 +0,0 @@ -# Mergo - - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] -[![Become my sponsor][15]][16] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [cli/cli](https://github.com/cli/cli) -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/constraint/vendor/github.com/imdario/mergo/doc.go b/constraint/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f99..000000000 --- a/constraint/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/constraint/vendor/github.com/imdario/mergo/map.go b/constraint/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index a13a7ee46..000000000 --- a/constraint/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/constraint/vendor/github.com/imdario/mergo/merge.go b/constraint/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 8b4e2f47a..000000000 --- a/constraint/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/constraint/vendor/github.com/imdario/mergo/mergo.go b/constraint/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 9fe362d47..000000000 --- a/constraint/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/constraint/vendor/github.com/klauspost/compress/.goreleaser.yml b/constraint/vendor/github.com/klauspost/compress/.goreleaser.yml index a22953805..4528059ca 100644 --- a/constraint/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/constraint/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/constraint/vendor/github.com/klauspost/compress/README.md b/constraint/vendor/github.com/klauspost/compress/README.md index 05c7359e4..de264c85a 100644 --- a/constraint/vendor/github.com/klauspost/compress/README.md +++ b/constraint/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,27 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/constraint/vendor/github.com/klauspost/compress/fse/decompress.go b/constraint/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7e..0c7dd4ffe 100644 --- a/constraint/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/constraint/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/constraint/vendor/github.com/klauspost/compress/huff0/decompress.go b/constraint/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25..0f56b02d7 100644 --- a/constraint/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/constraint/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/blockdec.go b/constraint/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc7..9c28840c3 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/enc_better.go b/constraint/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91f..84a79fde7 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/constraint/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f7..d36be7bd8 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/encoder.go b/constraint/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0f..8f8223cd3 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/framedec.go b/constraint/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e..e47af66e7 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd828..c59f17e07 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b8..f5591fa1e 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/constraint/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/constraint/vendor/github.com/klauspost/compress/zstd/zstd.go b/constraint/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc736..066bef2a4 100644 --- a/constraint/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/constraint/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/constraint/vendor/github.com/onsi/gomega/CHANGELOG.md b/constraint/vendor/github.com/onsi/gomega/CHANGELOG.md index 62af14ad2..a20d997cd 100644 --- a/constraint/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/constraint/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,76 @@ +## 1.36.2 + +### Maintenance +- Bump google.golang.org/protobuf from 1.35.1 to 1.36.1 (#810) [9a7609d] +- Bump golang.org/x/net from 0.30.0 to 0.33.0 (#807) [b6cb028] +- Bump github.com/onsi/ginkgo/v2 from 2.20.1 to 2.22.1 (#808) [5756529] +- Bump nokogiri from 1.16.3 to 1.16.5 in /docs (#757) [dabc12e] + +## 1.36.1 + +### Fixes +- Fix https://github.com/onsi/gomega/issues/803 [1c6c112] +- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7] + +## 1.36.0 + +### Features +- new: make collection-related matchers Go 1.23 iterator aware [4c964c6] + +### Maintenance +- Replace min/max helpers with built-in min/max [ece6872] +- Fix some typos in docs [8e924d7] + +## 1.35.1 + +### Fixes +- Export EnforceDefaultTimeoutsWhenUsingContexts and DisableDefaultTimeoutsWhenUsingContext [ca36da1] + +## 1.35.0 + +### Features + +- You can now call `EnforceDefaultTimeoutsWhenUsingContexts()` to have `Eventually` honor the default timeout when passed a context. (prior to this you had to expclility add a timeout) [e4c4265] +- You can call `StopTrying(message).Successfully()` to abort a `Consistently` early without failure [eeca931] + +### Fixes + +- Stop memoizing the result of `HaveField` to avoid unexpected errors when used with async assertions. [3bdbc4e] + +### Maintenance + +- Bump all dependencies [a05a416] + +## 1.34.2 + +Require Go 1.22+ + +### Maintenance +- bump ginkgo as well [c59c6dc] +- bump to go 1.22 - remove x/exp dependency [8158b99] + +## 1.34.1 + +### Maintenance +- Use slices from exp/slices to keep golang 1.20 compat [5e71dcd] + +## 1.34.0 + +### Features +- Add RoundTripper method to ghttp.Server [c549e0d] + +### Fixes +- fix incorrect handling of nil slices in HaveExactElements (fixes #771) [878940c] +- issue_765 - fixed bug in Hopcroft-Karp algorithm [ebadb67] + +### Maintenance +- bump ginkgo [8af2ece] +- Fix typo in docs [123a071] +- Bump github.com/onsi/ginkgo/v2 from 2.17.2 to 2.17.3 (#756) [0e69083] +- Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 (#755) [2675796] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#754) [4160c0f] +- Bump github-pages from 230 to 231 in /docs (#748) [892c303] + ## 1.33.1 ### Fixes diff --git a/constraint/vendor/github.com/onsi/gomega/gomega_dsl.go b/constraint/vendor/github.com/onsi/gomega/gomega_dsl.go index 9697d5134..9a028f3f3 100644 --- a/constraint/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/constraint/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.33.1" +const GOMEGA_VERSION = "1.36.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -319,7 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. + +By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: + + Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) + +now either the context cacnellation or the timeout will cause Eventually to stop polling. + +If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: + + EnforceDefaultTimeoutsWhenUsingContexts() + +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -491,6 +503,16 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { Default.SetDefaultConsistentlyPollingInterval(t) } +// EnforceDefaultTimeoutsWhenUsingContexts forces `Eventually` to apply a default timeout even when a context is provided. +func EnforceDefaultTimeoutsWhenUsingContexts() { + Default.EnforceDefaultTimeoutsWhenUsingContexts() +} + +// DisableDefaultTimeoutsWhenUsingContext disables the default timeout when a context is provided to `Eventually`. +func DisableDefaultTimeoutsWhenUsingContext() { + Default.DisableDefaultTimeoutsWhenUsingContext() +} + // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against // the matcher passed to the Should and ShouldNot methods. // diff --git a/constraint/vendor/github.com/onsi/gomega/internal/async_assertion.go b/constraint/vendor/github.com/onsi/gomega/internal/async_assertion.go index cde9e2ec8..8b4cd1f5b 100644 --- a/constraint/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/constraint/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -335,7 +335,7 @@ func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { if assertion.asyncType == AsyncAssertionTypeConsistently { return time.After(assertion.g.DurationBundle.ConsistentlyDuration) } else { - if assertion.ctx == nil { + if assertion.ctx == nil || assertion.g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts { return time.After(assertion.g.DurationBundle.EventuallyTimeout) } else { return nil @@ -496,7 +496,15 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch for _, err := range []error{actualErr, matcherErr} { if pollingSignalErr, ok := AsPollingSignalError(err); ok { if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") + if pollingSignalErr.IsSuccessful() { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Told to stop trying (and ignoring call to Successfully(), as it is only relevant with Consistently)") + } else { + return true // early escape hatch for Consistently + } + } else { + fail("Told to stop trying") + } return false } if pollingSignalErr.IsTryAgainAfter() { diff --git a/constraint/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/constraint/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 6e0d90d3a..2e026c336 100644 --- a/constraint/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/constraint/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -8,10 +8,11 @@ import ( ) type DurationBundle struct { - EventuallyTimeout time.Duration - EventuallyPollingInterval time.Duration - ConsistentlyDuration time.Duration - ConsistentlyPollingInterval time.Duration + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration + EnforceDefaultTimeoutsWhenUsingContexts bool } const ( @@ -20,15 +21,19 @@ const ( ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" + + EnforceDefaultTimeoutsWhenUsingContextsEnvVarName = "GOMEGA_ENFORCE_DEFAULT_TIMEOUTS_WHEN_USING_CONTEXTS" ) func FetchDefaultDurationBundle() DurationBundle { + _, EnforceDefaultTimeoutsWhenUsingContexts := os.LookupEnv(EnforceDefaultTimeoutsWhenUsingContextsEnvVarName) return DurationBundle{ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), - ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), - ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + EnforceDefaultTimeoutsWhenUsingContexts: EnforceDefaultTimeoutsWhenUsingContexts, } } diff --git a/constraint/vendor/github.com/onsi/gomega/internal/gomega.go b/constraint/vendor/github.com/onsi/gomega/internal/gomega.go index de1f4f336..c6e2fcc0e 100644 --- a/constraint/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/constraint/vendor/github.com/onsi/gomega/internal/gomega.go @@ -127,3 +127,11 @@ func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { g.DurationBundle.ConsistentlyPollingInterval = t } + +func (g *Gomega) EnforceDefaultTimeoutsWhenUsingContexts() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = true +} + +func (g *Gomega) DisableDefaultTimeoutsWhenUsingContext() { + g.DurationBundle.EnforceDefaultTimeoutsWhenUsingContexts = false +} diff --git a/constraint/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/constraint/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 83b04b1a4..3a4f7ddd9 100644 --- a/constraint/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/constraint/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -17,6 +17,7 @@ type PollingSignalError interface { error Wrap(err error) PollingSignalError Attach(description string, obj any) PollingSignalError + Successfully() PollingSignalError Now() } @@ -45,6 +46,7 @@ type PollingSignalErrorImpl struct { wrappedErr error pollingSignalErrorType PollingSignalErrorType duration time.Duration + successful bool Attachments []PollingSignalErrorAttachment } @@ -73,6 +75,11 @@ func (s *PollingSignalErrorImpl) Unwrap() error { return s.wrappedErr } +func (s *PollingSignalErrorImpl) Successfully() PollingSignalError { + s.successful = true + return s +} + func (s *PollingSignalErrorImpl) Now() { panic(s) } @@ -81,6 +88,10 @@ func (s *PollingSignalErrorImpl) IsStopTrying() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying } +func (s *PollingSignalErrorImpl) IsSuccessful() bool { + return s.successful +} + func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter } diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index 527c1a1c1..bd7f0b96e 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -4,17 +4,31 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type BeEmptyMatcher struct { } func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { + // short-circuit the iterator case, as we only need to see the first + // element, if any. + if miter.IsIter(actual) { + var length int + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { length++; return false }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { length++; return false }) + } + return length == 0, nil + } + length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == 0, nil diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/consist_of.go b/constraint/vendor/github.com/onsi/gomega/matchers/consist_of.go index f69037a4f..a11188182 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -17,8 +18,8 @@ type ConsistOfMatcher struct { } func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) @@ -60,10 +61,21 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { } func flatten(elems []interface{}) []interface{} { - if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + if len(elems) != 1 || + !(isArrayOrSlice(elems[0]) || + (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) { return elems } + if miter.IsIter(elems[0]) { + flattened := []any{} + miter.IterateV(elems[0], func(v reflect.Value) bool { + flattened = append(flattened, v.Interface()) + return true + }) + return flattened + } + value := reflect.ValueOf(elems[0]) flattened := make([]interface{}, value.Len()) for i := 0; i < value.Len(); i++ { @@ -116,7 +128,19 @@ func presentable(elems []interface{}) interface{} { func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} - if isMap(actual) { + if miter.IsIter(actual) { + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + values = append(values, v.Interface()) + return true + }) + } + } else if isMap(actual) { keys := value.MapKeys() for i := 0; i < value.Len(); i++ { values = append(values, value.MapIndex(keys[i]).Interface()) diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 3d45c9ebc..830239c7b 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -8,6 +8,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type ContainElementMatcher struct { @@ -16,16 +17,18 @@ type ContainElementMatcher struct { } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1)) } var actualT reflect.Type var result reflect.Value - switch l := len(matcher.Result); { - case l > 1: + switch numResultArgs := len(matcher.Result); { + case numResultArgs > 1: return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") - case l == 1: + case numResultArgs == 1: + // Check the optional result arg to point to a single value/array/slice/map + // of a type compatible with the actual value. if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", format.Object(matcher.Result[0], 1)) @@ -34,93 +37,209 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e resultReference := matcher.Result[0] result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings switch result.Kind() { - case reflect.Array: + case reflect.Array: // result arrays are not supported, as they cannot be dynamically sized. + if miter.IsIter(actual) { + _, actualvT := miter.IterKVTypes(actual) + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT), result.Type().String()) + } return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) - case reflect.Slice: - if !isArrayOrSlice(actual) { + + case reflect.Slice: // result slice + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is an array or slice + // - ✔ actual is an iter.Seq producing "v" elements + // - ✔ actual is an iter.Seq2 producing "v" elements, ignoring + // the "k" elements. + switch { + case isArrayOrSlice(actual): + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) } - if !actualT.Elem().AssignableTo(result.Type().Elem()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - case reflect.Map: - if !isMap(actual) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.String(), result.Type().String()) - } - if !actualT.AssignableTo(result.Type()) { + + case reflect.Map: // result map + // can we assign elements in actual to elements in what the result + // arg points to? + // - ✔ actual is a map + // - ✔ actual is an iter.Seq2 (iter.Seq doesn't fit though) + switch { + case isMap(actual): + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + + case miter.IsIter(actual): + actualkT, actualvT := miter.IterKVTypes(actual) + if actualkT == nil { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualvT).String(), result.Type().String()) + } + if !reflect.MapOf(actualkT, actualvT).AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualkT, actualvT), result.Type().String()) + } + + default: // incompatible result reference return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", actualT.String(), result.Type().String()) } + default: - if !actualT.Elem().AssignableTo(result.Type()) { - return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", - actualT.Elem().String(), result.Type().String()) + // can we assign a (single) element in actual to what the result arg + // points to? + switch { + case miter.IsIter(actual): + _, actualvT := miter.IterKVTypes(actual) + if !actualvT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualvT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } } } } + // If the supplied matcher isn't an Omega matcher, default to the Equal + // matcher. elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} } value := reflect.ValueOf(actual) - var valueAt func(int) interface{} - var getFindings func() reflect.Value - var foundAt func(int) + var getFindings func() reflect.Value // abstracts how the findings are collected and stored + var lastError error - if isMap(actual) { - keys := value.MapKeys() - valueAt = func(i int) interface{} { - return value.MapIndex(keys[i]).Interface() + if !miter.IsIter(actual) { + var valueAt func(int) interface{} + var foundAt func(int) + // We're dealing with an array/slice/map, so in all cases we can iterate + // over the elements in actual using indices (that can be considered + // keys in case of maps). + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { return fm } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + if result.Kind() != reflect.Invalid { + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + foundAt = func(i int) { + fsl = reflect.Append(fsl, value.Index(i)) + } + } } - if result.Kind() != reflect.Invalid { - fm := reflect.MakeMap(actualT) - getFindings = func() reflect.Value { - return fm + + for i := 0; i < value.Len(); i++ { + elem := valueAt(i) + success, err := elemMatcher.Match(elem) + if err != nil { + lastError = err + continue } - foundAt = func(i int) { - fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + if success { + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } } else { - valueAt = func(i int) interface{} { - return value.Index(i).Interface() - } + // We're dealing with an iterator as a first-class construct, so things + // are slightly different: there is no index defined as in case of + // arrays/slices/maps, just "ooooorder" + var found func(k, v reflect.Value) if result.Kind() != reflect.Invalid { - var f reflect.Value - if result.Kind() == reflect.Slice { - f = reflect.MakeSlice(result.Type(), 0, 0) + if result.Kind() == reflect.Map { + fm := reflect.MakeMap(result.Type()) + getFindings = func() reflect.Value { return fm } + found = func(k, v reflect.Value) { fm.SetMapIndex(k, v) } } else { - f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) - } - getFindings = func() reflect.Value { - return f - } - foundAt = func(i int) { - f = reflect.Append(f, value.Index(i)) + var fsl reflect.Value + if result.Kind() == reflect.Slice { + fsl = reflect.MakeSlice(result.Type(), 0, 0) + } else { + fsl = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { return fsl } + found = func(_, v reflect.Value) { fsl = reflect.Append(fsl, v) } } } - } - var lastError error - for i := 0; i < value.Len(); i++ { - elem := valueAt(i) - success, err := elemMatcher.Match(elem) - if err != nil { - lastError = err - continue + success := false + actualkT, _ := miter.IterKVTypes(actual) + if actualkT == nil { + miter.IterateV(actual, func(v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(reflect.Value{}, v) + } + return true // iterate on... + }) + } else { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + var err error + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + lastError = err + return true // iterate on... + } + if success { + if result.Kind() == reflect.Invalid { + return false // a match and no result needed, so we're done + } + found(k, v) + } + return true // iterate on... + }) } - if success { - if result.Kind() == reflect.Invalid { - return true, nil - } - foundAt(i) + if success && result.Kind() == reflect.Invalid { + return true, nil } } diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 946cd8bea..d9fcb8b80 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph" ) @@ -13,8 +14,8 @@ type ContainElementsMatcher struct { } func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go index 025b6e1ac..4111f2b86 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -5,6 +5,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveEachMatcher struct { @@ -12,8 +13,8 @@ type HaveEachMatcher struct { } func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { - if !isArrayOrSlice(actual) && !isMap(actual) { - return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -22,6 +23,38 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err elemMatcher = &EqualMatcher{Expected: matcher.Element} } + if miter.IsIter(actual) { + // rejecting the non-elements case works different for iterators as we + // don't want to fetch all elements into a slice first. + count := 0 + var success bool + var err error + if miter.IsSeq2(actual) { + miter.IterateKV(actual, func(k, v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } else { + miter.IterateV(actual, func(v reflect.Value) bool { + count++ + success, err = elemMatcher.Match(v.Interface()) + if err != nil { + return false + } + return success + }) + } + if count == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty iter.Seq/iter.Seq2. Got:\n%s", + format.Object(actual, 1)) + } + return success, err + } + value := reflect.ValueOf(actual) if value.Len() == 0 { return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", @@ -40,7 +73,8 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err } } - // if there are no elements, then HaveEach will match. + // if we never failed then we succeed; the empty/nil cases have already been + // rejected above. for i := 0; i < value.Len(); i++ { success, err := elemMatcher.Match(valueAt(i)) if err != nil { diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index dca5b9446..23799f1c6 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -2,8 +2,10 @@ package matchers import ( "fmt" + "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type mismatchFailure struct { @@ -21,24 +23,68 @@ type HaveExactElementsMatcher struct { func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { matcher.resetState() - if isMap(actual) { - return false, fmt.Errorf("error") + if isMap(actual) || miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveExactElements matcher doesn't work on map or iter.Seq2. Got:\n%s", format.Object(actual, 1)) } matchers := matchers(matcher.Elements) - values := valuesOf(actual) - lenMatchers := len(matchers) + + success = true + + if miter.IsIter(actual) { + // In the worst case, we need to see everything before we can give our + // verdict. The only exception is fast fail. + i := 0 + miter.IterateV(actual, func(v reflect.Value) bool { + if i >= lenMatchers { + // the iterator produces more values than we got matchers: this + // is not good. + matcher.extraIndex = i + success = false + return false + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(v.Interface()) + if err != nil { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: err.Error(), + }) + success = false + } else if !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(v.Interface()), + }) + success = false + } + i++ + return true + }) + if i < len(matchers) { + // the iterator produced less values than we got matchers: this is + // no good, no no no. + matcher.missingIndex = i + success = false + } + return success, nil + } + + values := valuesOf(actual) lenValues := len(values) for i := 0; i < lenMatchers || i < lenValues; i++ { if i >= lenMatchers { matcher.extraIndex = i + success = false continue } if i >= lenValues { matcher.missingIndex = i + success = false return } @@ -49,15 +95,17 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool index: i, failure: err.Error(), }) + success = false } else if !match { matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ index: i, failure: elemMatcher.FailureMessage(values[i]), }) + success = false } } - return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil + return success, nil } func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_field.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_field.go index 6989f78c4..293457e85 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (interface{}, error) { +func extractField(actual interface{}, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (interfa extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) } if extractedValue == (reflect.Value{}) { - return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + ptr := reflect.New(actualValue.Type()) + ptr.Elem().Set(actualValue) + extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) { + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) + } } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { @@ -64,36 +69,46 @@ func extractField(actual interface{}, field string, matchername string) (interfa type HaveFieldMatcher struct { Field string Expected interface{} +} - extractedField interface{} - expectedMatcher omegaMatcher +func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { + var isMatcher bool + expectedMatcher, isMatcher := matcher.Expected.(omegaMatcher) + if !isMatcher { + expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + return expectedMatcher } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") + extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } - var isMatcher bool - matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) - if !isMatcher { - matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} - } - - return matcher.expectedMatcher.Match(matcher.extractedField) + return matcher.expectedMatcher().Match(extractedField) } func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) - message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().FailureMessage(extractedField) return message } func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + extractedField, err := extractField(actual, matcher.Field, "HaveField") + if err != nil { + // this really shouldn't happen + return fmt.Sprintf("Failed to extract field '%s': %s", matcher.Field, err) + } message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) - message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + message += matcher.expectedMatcher().NegatedFailureMessage(extractedField) return message } diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index 00cffec70..b62ee93cb 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyMatcher struct { @@ -14,8 +15,8 @@ type HaveKeyMatcher struct { } func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKey matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -23,6 +24,20 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro keyMatcher = &EqualMatcher{Expected: matcher.Key} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 4c5916804..3d608f63e 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -7,6 +7,7 @@ import ( "reflect" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/matchers/internal/miter" ) type HaveKeyWithValueMatcher struct { @@ -15,8 +16,8 @@ type HaveKeyWithValueMatcher struct { } func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { - if !isMap(actual) { - return false, fmt.Errorf("HaveKeyWithValue matcher expects a map. Got:%s", format.Object(actual, 1)) + if !isMap(actual) && !miter.IsSeq2(actual) { + return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } keyMatcher, keyIsMatcher := matcher.Key.(omegaMatcher) @@ -29,6 +30,27 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, valueMatcher = &EqualMatcher{Expected: matcher.Value} } + if miter.IsSeq2(actual) { + var success bool + var err error + miter.IterateKV(actual, func(k, v reflect.Value) bool { + success, err = keyMatcher.Match(k.Interface()) + if err != nil { + err = fmt.Errorf("HaveKey's key matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + if success { + success, err = valueMatcher.Match(v.Interface()) + if err != nil { + err = fmt.Errorf("HaveKeyWithValue's value matcher failed with:\n%s%s", format.Indent, err.Error()) + return false + } + } + return !success + }) + return success, err + } + keys := reflect.ValueOf(actual).MapKeys() for i := 0; i < len(keys); i++ { success, err := keyMatcher.Match(keys[i].Interface()) diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/constraint/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go index ee4276189..ca25713fe 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -13,7 +13,7 @@ type HaveLenMatcher struct { func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { length, ok := lengthOf(actual) if !ok { - return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1)) + return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) } return length == matcher.Count, nil diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go b/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go new file mode 100644 index 000000000..d8837a4d0 --- /dev/null +++ b/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_iter.go @@ -0,0 +1,128 @@ +//go:build go1.23 + +package miter + +import ( + "reflect" +) + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return true } + +// IsIter returns true if the specified value is a function type that can be +// range-d over, otherwise false. +// +// We don't use reflect's CanSeq and CanSeq2 directly, as these would return +// true also for other value types that are range-able, such as integers, +// slices, et cetera. Here, we aim only at range-able (iterator) functions. +func IsIter(it any) bool { + if it == nil { // on purpose we only test for untyped nil. + return false + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return false + } + return t.CanSeq() || t.CanSeq2() +} + +// IterKVTypes returns the reflection types of an iterator's yield function's K +// and optional V arguments, otherwise nil K and V reflection types. +func IterKVTypes(it any) (k, v reflect.Type) { + if it == nil { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func { + return + } + // get the reflection types for V, and where applicable, K. + switch { + case t.CanSeq(): + v = t. /*iterator fn*/ In(0). /*yield fn*/ In(0) + case t.CanSeq2(): + yieldfn := t. /*iterator fn*/ In(0) + k = yieldfn.In(0) + v = yieldfn.In(1) + } + return +} + +// IsSeq2 returns true if the passed iterator function is compatible with +// iter.Seq2, otherwise false. +// +// IsSeq2 hides the Go 1.23+ specific reflect.Type.CanSeq2 behind a facade which +// is empty for Go versions before 1.23. +func IsSeq2(it any) bool { + if it == nil { + return false + } + t := reflect.TypeOf(it) + return t.Kind() == reflect.Func && t.CanSeq2() +} + +// isNilly returns true if v is either an untyped nil, or is a nil function (not +// necessarily an iterator function). +func isNilly(v any) bool { + if v == nil { + return true + } + rv := reflect.ValueOf(v) + return rv.Kind() == reflect.Func && rv.IsNil() +} + +// IterateV loops over the elements produced by an iterator function, passing +// the elements to the specified yield function individually and stopping only +// when either the iterator function runs out of elements or the yield function +// tell us to stop it. +// +// IterateV works very much like reflect.Value.Seq but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateV(it any, yield func(v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} + +// IterateKV loops over the key-value elements produced by an iterator function, +// passing the elements to the specified yield function individually and +// stopping only when either the iterator function runs out of elements or the +// yield function tell us to stop it. +// +// IterateKV works very much like reflect.Value.Seq2 but hides the Go 1.23+ +// specific parts behind a facade which is empty for Go versions before 1.23, in +// order to simplify code maintenance for matchers when using older Go versions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) { + if isNilly(it) { + return + } + // reject all non-iterator-func values, even if they're range-able. + t := reflect.TypeOf(it) + if t.Kind() != reflect.Func || !t.CanSeq2() { + return + } + // Call the specified iterator function, handing it our adaptor to call the + // specified generic reflection yield function. + reflectedYield := reflect.MakeFunc( + t. /*iterator fn*/ In(0), + func(args []reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(yield(args[0], args[1]))} + }) + reflect.ValueOf(it).Call([]reflect.Value{reflectedYield}) +} diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go b/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go new file mode 100644 index 000000000..4b8fcc55b --- /dev/null +++ b/constraint/vendor/github.com/onsi/gomega/matchers/internal/miter/type_support_noiter.go @@ -0,0 +1,44 @@ +//go:build !go1.23 + +/* +Gomega matchers + +This package implements the Gomega matchers and does not typically need to be imported. +See the docs for Gomega for documentation on the matchers + +http://onsi.github.io/gomega/ +*/ + +package miter + +import "reflect" + +// HasIterators always returns false for Go versions before 1.23. +func HasIterators() bool { return false } + +// IsIter always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsIter(i any) bool { return false } + +// IsSeq2 always returns false for Go versions before 1.23 as there is no +// iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IsSeq2(it any) bool { return false } + +// IterKVTypes always returns nil reflection types for Go versions before 1.23 +// as there is no iterator (function) pattern defined yet; see also: +// https://tip.golang.org/blog/range-functions. +func IterKVTypes(i any) (k, v reflect.Type) { + return +} + +// IterateV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateV(it any, yield func(v reflect.Value) bool) {} + +// IterateKV never loops over what has been passed to it as an iterator for Go +// versions before 1.23 as there is no iterator (function) pattern defined yet; +// see also: https://tip.golang.org/blog/range-functions. +func IterateKV(it any, yield func(k, v reflect.Value) bool) {} diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/constraint/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 1c54edd8f..44aa61d4b 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -1,6 +1,8 @@ package bipartitegraph import ( + "slices" + . "github.com/onsi/gomega/matchers/support/goraph/edge" . "github.com/onsi/gomega/matchers/support/goraph/node" "github.com/onsi/gomega/matchers/support/goraph/util" @@ -157,6 +159,11 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} } + if done { // if last layer - into last layer must be only 'free' nodes + currentLayer = slices.DeleteFunc(currentLayer, func(in Node) bool { + return !matching.Free(in) + }) + } guideLayers = append(guideLayers, currentLayer) } diff --git a/constraint/vendor/github.com/onsi/gomega/matchers/type_support.go b/constraint/vendor/github.com/onsi/gomega/matchers/type_support.go index dced2419e..b9440ac7a 100644 --- a/constraint/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/constraint/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -15,6 +15,8 @@ import ( "encoding/json" "fmt" "reflect" + + "github.com/onsi/gomega/matchers/internal/miter" ) type omegaMatcher interface { @@ -152,6 +154,17 @@ func lengthOf(a interface{}) (int, bool) { switch reflect.TypeOf(a).Kind() { case reflect.Map, reflect.Array, reflect.String, reflect.Chan, reflect.Slice: return reflect.ValueOf(a).Len(), true + case reflect.Func: + if !miter.IsIter(a) { + return 0, false + } + var l int + if miter.IsSeq2(a) { + miter.IterateKV(a, func(k, v reflect.Value) bool { l++; return true }) + } else { + miter.IterateV(a, func(v reflect.Value) bool { l++; return true }) + } + return l, true default: return 0, false } diff --git a/constraint/vendor/github.com/onsi/gomega/types/types.go b/constraint/vendor/github.com/onsi/gomega/types/types.go index 7c7adb941..30f2beed3 100644 --- a/constraint/vendor/github.com/onsi/gomega/types/types.go +++ b/constraint/vendor/github.com/onsi/gomega/types/types.go @@ -29,6 +29,8 @@ type Gomega interface { SetDefaultEventuallyPollingInterval(time.Duration) SetDefaultConsistentlyDuration(time.Duration) SetDefaultConsistentlyPollingInterval(time.Duration) + EnforceDefaultTimeoutsWhenUsingContexts() + DisableDefaultTimeoutsWhenUsingContext() } // All Gomega matchers must implement the GomegaMatcher interface diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/annotations.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/annotations.go index 7d09379fd..533290d32 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/annotations.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/annotations.go @@ -5,973 +5,29 @@ package ast import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strings" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/util" -) - -const ( - annotationScopePackage = "package" - annotationScopeImport = "import" - annotationScopeRule = "rule" - annotationScopeDocument = "document" - annotationScopeSubpackages = "subpackages" + v1 "github.com/open-policy-agent/opa/v1/ast" ) type ( // Annotations represents metadata attached to other AST nodes such as rules. - Annotations struct { - Scope string `json:"scope"` - Title string `json:"title,omitempty"` - Entrypoint bool `json:"entrypoint,omitempty"` - Description string `json:"description,omitempty"` - Organizations []string `json:"organizations,omitempty"` - RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` - Authors []*AuthorAnnotation `json:"authors,omitempty"` - Schemas []*SchemaAnnotation `json:"schemas,omitempty"` - Custom map[string]interface{} `json:"custom,omitempty"` - Location *Location `json:"location,omitempty"` - - comments []*Comment - node Node - jsonOptions astJSON.Options - } + Annotations = v1.Annotations // SchemaAnnotation contains a schema declaration for the document identified by the path. - SchemaAnnotation struct { - Path Ref `json:"path"` - Schema Ref `json:"schema,omitempty"` - Definition *interface{} `json:"definition,omitempty"` - } - - AuthorAnnotation struct { - Name string `json:"name"` - Email string `json:"email,omitempty"` - } - - RelatedResourceAnnotation struct { - Ref url.URL `json:"ref"` - Description string `json:"description,omitempty"` - } - - AnnotationSet struct { - byRule map[*Rule][]*Annotations - byPackage map[int]*Annotations - byPath *annotationTreeNode - modules []*Module // Modules this set was constructed from - } + SchemaAnnotation = v1.SchemaAnnotation - annotationTreeNode struct { - Value *Annotations - Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) - } + AuthorAnnotation = v1.AuthorAnnotation - AnnotationsRef struct { - Path Ref `json:"path"` // The path of the node the annotations are applied to - Annotations *Annotations `json:"annotations,omitempty"` - Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + RelatedResourceAnnotation = v1.RelatedResourceAnnotation - jsonOptions astJSON.Options + AnnotationSet = v1.AnnotationSet - node Node // The node the annotations are applied to - } + AnnotationsRef = v1.AnnotationsRef - AnnotationsRefSet []*AnnotationsRef + AnnotationsRefSet = v1.AnnotationsRefSet - FlatAnnotationsRefSet AnnotationsRefSet + FlatAnnotationsRefSet = v1.FlatAnnotationsRefSet ) -func (a *Annotations) String() string { - bs, _ := a.MarshalJSON() - return string(bs) -} - -// Loc returns the location of this annotation. -func (a *Annotations) Loc() *Location { - return a.Location -} - -// SetLoc updates the location of this annotation. -func (a *Annotations) SetLoc(l *Location) { - a.Location = l -} - -// EndLoc returns the location of this annotation's last comment line. -func (a *Annotations) EndLoc() *Location { - count := len(a.comments) - if count == 0 { - return a.Location - } - return a.comments[count-1].Location -} - -// Compare returns an integer indicating if a is less than, equal to, or greater -// than other. -func (a *Annotations) Compare(other *Annotations) int { - - if a == nil && other == nil { - return 0 - } - - if a == nil { - return -1 - } - - if other == nil { - return 1 - } - - if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { - return cmp - } - - if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { - return cmp - } - - if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { - return cmp - } - - if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { - return cmp - } - - if a.Entrypoint != other.Entrypoint { - if a.Entrypoint { - return 1 - } - return -1 - } - - if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { - return cmp - } - - return 0 -} - -// GetTargetPath returns the path of the node these Annotations are applied to (the target) -func (a *Annotations) GetTargetPath() Ref { - switch n := a.node.(type) { - case *Package: - return n.Path - case *Rule: - return n.Ref().GroundPrefix() - default: - return nil - } -} - -func (a *Annotations) setJSONOptions(opts astJSON.Options) { - a.jsonOptions = opts - if a.Location != nil { - a.Location.JSONOptions = opts - } -} - -func (a *Annotations) MarshalJSON() ([]byte, error) { - if a == nil { - return []byte(`{"scope":""}`), nil - } - - data := map[string]interface{}{ - "scope": a.Scope, - } - - if a.Title != "" { - data["title"] = a.Title - } - - if a.Description != "" { - data["description"] = a.Description - } - - if a.Entrypoint { - data["entrypoint"] = a.Entrypoint - } - - if len(a.Organizations) > 0 { - data["organizations"] = a.Organizations - } - - if len(a.RelatedResources) > 0 { - data["related_resources"] = a.RelatedResources - } - - if len(a.Authors) > 0 { - data["authors"] = a.Authors - } - - if len(a.Schemas) > 0 { - data["schemas"] = a.Schemas - } - - if len(a.Custom) > 0 { - data["custom"] = a.Custom - } - - if a.jsonOptions.MarshalOptions.IncludeLocation.Annotations { - if a.Location != nil { - data["location"] = a.Location - } - } - - return json.Marshal(data) -} - func NewAnnotationsRef(a *Annotations) *AnnotationsRef { - var loc *Location - if a.node != nil { - loc = a.node.Loc() - } - - return &AnnotationsRef{ - Location: loc, - Path: a.GetTargetPath(), - Annotations: a, - node: a.node, - jsonOptions: a.jsonOptions, - } -} - -func (ar *AnnotationsRef) GetPackage() *Package { - switch n := ar.node.(type) { - case *Package: - return n - case *Rule: - return n.Module.Package - default: - return nil - } -} - -func (ar *AnnotationsRef) GetRule() *Rule { - switch n := ar.node.(type) { - case *Rule: - return n - default: - return nil - } -} - -func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": ar.Path, - } - - if ar.Annotations != nil { - data["annotations"] = ar.Annotations - } - - if ar.jsonOptions.MarshalOptions.IncludeLocation.AnnotationsRef { - if ar.Location != nil { - data["location"] = ar.Location - } - } - - return json.Marshal(data) -} - -func scopeCompare(s1, s2 string) int { - - o1 := scopeOrder(s1) - o2 := scopeOrder(s2) - - if o2 < o1 { - return 1 - } else if o2 > o1 { - return -1 - } - - if s1 < s2 { - return -1 - } else if s2 < s1 { - return 1 - } - - return 0 -} - -func scopeOrder(s string) int { - switch s { - case annotationScopeRule: - return 1 - } - return 0 -} - -func compareAuthors(a, b []*AuthorAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 { - return cmp - } - } - - return 0 -} - -func compareSchemas(a, b []*SchemaAnnotation) int { - maxLen := len(a) - if len(b) < maxLen { - maxLen = len(b) - } - - for i := 0; i < maxLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - return 0 -} - -func compareStringLists(a, b []string) int { - if len(a) > len(b) { - return 1 - } else if len(a) < len(b) { - return -1 - } - - for i := 0; i < len(a); i++ { - if cmp := strings.Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - - return 0 -} - -// Copy returns a deep copy of s. -func (a *Annotations) Copy(node Node) *Annotations { - cpy := *a - - cpy.Organizations = make([]string, len(a.Organizations)) - copy(cpy.Organizations, a.Organizations) - - cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) - for i := range a.RelatedResources { - cpy.RelatedResources[i] = a.RelatedResources[i].Copy() - } - - cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) - for i := range a.Authors { - cpy.Authors[i] = a.Authors[i].Copy() - } - - cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) - for i := range a.Schemas { - cpy.Schemas[i] = a.Schemas[i].Copy() - } - - cpy.Custom = deepcopy.Map(a.Custom) - - cpy.node = node - - return &cpy -} - -// toObject constructs an AST Object from the annotation. -func (a *Annotations) toObject() (*Object, *Error) { - obj := NewObject() - - if a == nil { - return &obj, nil - } - - if len(a.Scope) > 0 { - obj.Insert(StringTerm("scope"), StringTerm(a.Scope)) - } - - if len(a.Title) > 0 { - obj.Insert(StringTerm("title"), StringTerm(a.Title)) - } - - if a.Entrypoint { - obj.Insert(StringTerm("entrypoint"), BooleanTerm(true)) - } - - if len(a.Description) > 0 { - obj.Insert(StringTerm("description"), StringTerm(a.Description)) - } - - if len(a.Organizations) > 0 { - orgs := make([]*Term, 0, len(a.Organizations)) - for _, org := range a.Organizations { - orgs = append(orgs, StringTerm(org)) - } - obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...)) - } - - if len(a.RelatedResources) > 0 { - rrs := make([]*Term, 0, len(a.RelatedResources)) - for _, rr := range a.RelatedResources { - rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String()))) - if len(rr.Description) > 0 { - rrObj.Insert(StringTerm("description"), StringTerm(rr.Description)) - } - rrs = append(rrs, NewTerm(rrObj)) - } - obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...)) - } - - if len(a.Authors) > 0 { - as := make([]*Term, 0, len(a.Authors)) - for _, author := range a.Authors { - aObj := NewObject() - if len(author.Name) > 0 { - aObj.Insert(StringTerm("name"), StringTerm(author.Name)) - } - if len(author.Email) > 0 { - aObj.Insert(StringTerm("email"), StringTerm(author.Email)) - } - as = append(as, NewTerm(aObj)) - } - obj.Insert(StringTerm("authors"), ArrayTerm(as...)) - } - - if len(a.Schemas) > 0 { - ss := make([]*Term, 0, len(a.Schemas)) - for _, s := range a.Schemas { - sObj := NewObject() - if len(s.Path) > 0 { - sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray())) - } - if len(s.Schema) > 0 { - sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray())) - } - if s.Definition != nil { - def, err := InterfaceToValue(s.Definition) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) - } - sObj.Insert(StringTerm("definition"), NewTerm(def)) - } - ss = append(ss, NewTerm(sObj)) - } - obj.Insert(StringTerm("schemas"), ArrayTerm(ss...)) - } - - if len(a.Custom) > 0 { - c, err := InterfaceToValue(a.Custom) - if err != nil { - return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) - } - obj.Insert(StringTerm("custom"), NewTerm(c)) - } - - return &obj, nil -} - -func attachRuleAnnotations(mod *Module) { - // make a copy of the annotations - cpy := make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy[i] = a.Copy(a.node) - } - - for _, rule := range mod.Rules { - var j int - var found bool - for i, a := range cpy { - if rule.Ref().Equal(a.GetTargetPath()) { - if a.Scope == annotationScopeDocument { - rule.Annotations = append(rule.Annotations, a) - } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { - j = i - found = true - rule.Annotations = append(rule.Annotations, a) - } - } - } - - if found && j < len(cpy) { - cpy = append(cpy[:j], cpy[j+1:]...) - } - } -} - -func attachAnnotationsNodes(mod *Module) Errors { - var errs Errors - - // Find first non-annotation statement following each annotation and attach - // the annotation to that statement. - for _, a := range mod.Annotations { - for _, stmt := range mod.stmts { - _, ok := stmt.(*Annotations) - if !ok { - if stmt.Loc().Row > a.Location.Row { - a.node = stmt - break - } - } - } - - if a.Scope == "" { - switch a.node.(type) { - case *Rule: - if a.Entrypoint { - a.Scope = annotationScopeDocument - } else { - a.Scope = annotationScopeRule - } - case *Package: - a.Scope = annotationScopePackage - case *Import: - a.Scope = annotationScopeImport - } - } - - if err := validateAnnotationScopeAttachment(a); err != nil { - errs = append(errs, err) - } - - if err := validateAnnotationEntrypointAttachment(a); err != nil { - errs = append(errs, err) - } - } - - return errs -} - -func validateAnnotationScopeAttachment(a *Annotations) *Error { - - switch a.Scope { - case annotationScopeRule, annotationScopeDocument: - if _, ok := a.node.(*Rule); ok { - return nil - } - return newScopeAttachmentErr(a, "rule") - case annotationScopePackage, annotationScopeSubpackages: - if _, ok := a.node.(*Package); ok { - return nil - } - return newScopeAttachmentErr(a, "package") - } - - return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", - a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) -} - -func validateAnnotationEntrypointAttachment(a *Annotations) *Error { - if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { - return NewError( - ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) - } - return nil -} - -// Copy returns a deep copy of a. -func (a *AuthorAnnotation) Copy() *AuthorAnnotation { - cpy := *a - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { - if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { - return cmp - } - - return 0 -} - -func (a *AuthorAnnotation) String() string { - if len(a.Email) == 0 { - return a.Name - } else if len(a.Name) == 0 { - return fmt.Sprintf("<%s>", a.Email) - } - return fmt.Sprintf("%s <%s>", a.Name, a.Email) -} - -// Copy returns a deep copy of rr. -func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { - cpy := *rr - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { - if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { - return cmp - } - - if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { - return cmp - } - - return 0 -} - -func (rr *RelatedResourceAnnotation) String() string { - bs, _ := json.Marshal(rr) - return string(bs) -} - -func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "ref": rr.Ref.String(), - } - - if len(rr.Description) > 0 { - d["description"] = rr.Description - } - - return json.Marshal(d) -} - -// Copy returns a deep copy of s. -func (s *SchemaAnnotation) Copy() *SchemaAnnotation { - cpy := *s - return &cpy -} - -// Compare returns an integer indicating if s is less than, equal to, or greater -// than other. -func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { - - if cmp := s.Path.Compare(other.Path); cmp != 0 { - return cmp - } - - if cmp := s.Schema.Compare(other.Schema); cmp != 0 { - return cmp - } - - if s.Definition != nil && other.Definition == nil { - return -1 - } else if s.Definition == nil && other.Definition != nil { - return 1 - } else if s.Definition != nil && other.Definition != nil { - return util.Compare(*s.Definition, *other.Definition) - } - - return 0 -} - -func (s *SchemaAnnotation) String() string { - bs, _ := json.Marshal(s) - return string(bs) -} - -func newAnnotationSet() *AnnotationSet { - return &AnnotationSet{ - byRule: map[*Rule][]*Annotations{}, - byPackage: map[int]*Annotations{}, - byPath: newAnnotationTree(), - } -} - -func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { - as := newAnnotationSet() - var errs Errors - for _, m := range modules { - for _, a := range m.Annotations { - if err := as.add(a); err != nil { - errs = append(errs, err) - } - } - } - if len(errs) > 0 { - return nil, errs - } - as.modules = modules - return as, nil -} - -// NOTE(philipc): During copy propagation, the underlying Nodes can be -// stripped away from the annotations, leading to nil deref panics. We -// silently ignore these cases for now, as a workaround. -func (as *AnnotationSet) add(a *Annotations) *Error { - switch a.Scope { - case annotationScopeRule: - if rule, ok := a.node.(*Rule); ok { - as.byRule[rule] = append(as.byRule[rule], a) - } - case annotationScopePackage: - if pkg, ok := a.node.(*Package); ok { - hash := pkg.Path.Hash() - if exist, ok := as.byPackage[hash]; ok { - return errAnnotationRedeclared(a, exist.Location) - } - as.byPackage[hash] = a - } - case annotationScopeDocument: - if rule, ok := a.node.(*Rule); ok { - path := rule.Ref().GroundPrefix() - x := as.byPath.get(path) - if x != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(path, a) - } - case annotationScopeSubpackages: - if pkg, ok := a.node.(*Package); ok { - x := as.byPath.get(pkg.Path) - if x != nil && x.Value != nil { - return errAnnotationRedeclared(a, x.Value.Location) - } - as.byPath.insert(pkg.Path, a) - } - } - return nil -} - -func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { - if as == nil { - return nil - } - return as.byRule[r] -} - -func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { - if as == nil { - return nil - } - return as.byPath.ancestors(path) -} - -func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { - if as == nil { - return nil - } - if node := as.byPath.get(path); node != nil { - return node.Value - } - return nil -} - -func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { - if as == nil { - return nil - } - return as.byPackage[pkg.Path.Hash()] -} - -// Flatten returns a flattened list view of this AnnotationSet. -// The returned slice is sorted, first by the annotations' target path, then by their target location -func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { - // This preallocation often won't be optimal, but it's superior to starting with a nil slice. - refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) - - refs = as.byPath.flatten(refs) - - for _, a := range as.byPackage { - refs = append(refs, NewAnnotationsRef(a)) - } - - for _, as := range as.byRule { - for _, a := range as { - refs = append(refs, NewAnnotationsRef(a)) - } - } - - // Sort by path, then annotation location, for stable output - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Compare(refs[j]) < 0 - }) - - return refs -} - -// Chain returns the chain of annotations leading up to the given rule. -// The returned slice is ordered as follows -// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) -// 1. The 'document' scope entry, if any -// 2. The 'package' scope entry, if any -// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' -// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. -func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { - var refs []*AnnotationsRef - - ruleAnnots := as.GetRuleScope(rule) - - if len(ruleAnnots) >= 1 { - for _, a := range ruleAnnots { - refs = append(refs, NewAnnotationsRef(a)) - } - } else { - // Make sure there is always a leading entry representing the passed rule, even if it has no annotations - refs = append(refs, &AnnotationsRef{ - Location: rule.Location, - Path: rule.Ref().GroundPrefix(), - node: rule, - }) - } - - if len(refs) > 1 { - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(refs, func(i, j int) bool { - return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0 - }) - } - - docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) - if docAnnots != nil { - refs = append(refs, NewAnnotationsRef(docAnnots)) - } - - pkg := rule.Module.Package - pkgAnnots := as.GetPackageScope(pkg) - if pkgAnnots != nil { - refs = append(refs, NewAnnotationsRef(pkgAnnots)) - } - - subPkgAnnots := as.GetSubpackagesScope(pkg.Path) - // We need to reverse the order, as subPkgAnnots ordering will start at the root, - // whereas we want to end at the root. - for i := len(subPkgAnnots) - 1; i >= 0; i-- { - refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) - } - - return refs -} - -func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { - result := make(FlatAnnotationsRefSet, 0, len(ars)+1) - - // insertion sort, first by path, then location - for i, current := range ars { - if ar.Compare(current) < 0 { - result = append(result, ar) - result = append(result, ars[i:]...) - break - } - result = append(result, current) - } - - if len(result) < len(ars)+1 { - result = append(result, ar) - } - - return result -} - -func newAnnotationTree() *annotationTreeNode { - return &annotationTreeNode{ - Value: nil, - Children: map[Value]*annotationTreeNode{}, - } -} - -func (t *annotationTreeNode) insert(path Ref, value *Annotations) { - node := t - for _, k := range path { - child, ok := node.Children[k.Value] - if !ok { - child = newAnnotationTree() - node.Children[k.Value] = child - } - node = child - } - node.Value = value -} - -func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { - node := t - for _, k := range path { - if node == nil { - return nil - } - child, ok := node.Children[k.Value] - if !ok { - return nil - } - node = child - } - return node -} - -// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. -func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { - node := t - for _, k := range path { - if node == nil { - return result - } - child, ok := node.Children[k.Value] - if !ok { - return result - } - if child.Value != nil { - result = append(result, child.Value) - } - node = child - } - return result -} - -func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { - if a := t.Value; a != nil { - refs = append(refs, NewAnnotationsRef(a)) - } - for _, c := range t.Children { - refs = c.flatten(refs) - } - return refs -} - -func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { - if c := ar.Path.Compare(other.Path); c != 0 { - return c - } - - if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { - return c - } - - return ar.Annotations.Compare(other.Annotations) + return v1.NewAnnotationsRef(a) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/builtins.go index f54d91d31..d0ab69a16 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/builtins.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/builtins.go @@ -5,1348 +5,230 @@ package ast import ( - "strings" - - "github.com/open-policy-agent/opa/types" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Builtins is the registry of built-in functions supported by OPA. // Call RegisterBuiltin to add a new built-in. -var Builtins []*Builtin +var Builtins = v1.Builtins // RegisterBuiltin adds a new built-in function to the registry. func RegisterBuiltin(b *Builtin) { - Builtins = append(Builtins, b) - BuiltinMap[b.Name] = b - if len(b.Infix) > 0 { - BuiltinMap[b.Infix] = b - } + v1.RegisterBuiltin(b) } // DefaultBuiltins is the registry of built-in functions supported in OPA // by default. When adding a new built-in function to OPA, update this // list. -var DefaultBuiltins = [...]*Builtin{ - // Unification/equality ("=") - Equality, - - // Assignment (":=") - Assign, - - // Membership, infix "in": `x in xs` - Member, - MemberWithKey, - - // Comparisons - GreaterThan, - GreaterThanEq, - LessThan, - LessThanEq, - NotEqual, - Equal, - - // Arithmetic - Plus, - Minus, - Multiply, - Divide, - Ceil, - Floor, - Round, - Abs, - Rem, - - // Bitwise Arithmetic - BitsOr, - BitsAnd, - BitsNegate, - BitsXOr, - BitsShiftLeft, - BitsShiftRight, - - // Binary - And, - Or, - - // Aggregates - Count, - Sum, - Product, - Max, - Min, - Any, - All, - - // Arrays - ArrayConcat, - ArraySlice, - ArrayReverse, - - // Conversions - ToNumber, - - // Casts (DEPRECATED) - CastObject, - CastNull, - CastBoolean, - CastString, - CastSet, - CastArray, - - // Regular Expressions - RegexIsValid, - RegexMatch, - RegexMatchDeprecated, - RegexSplit, - GlobsMatch, - RegexTemplateMatch, - RegexFind, - RegexFindAllStringSubmatch, - RegexReplace, - - // Sets - SetDiff, - Intersection, - Union, - - // Strings - AnyPrefixMatch, - AnySuffixMatch, - Concat, - FormatInt, - IndexOf, - IndexOfN, - Substring, - Lower, - Upper, - Contains, - StringCount, - StartsWith, - EndsWith, - Split, - Replace, - ReplaceN, - Trim, - TrimLeft, - TrimPrefix, - TrimRight, - TrimSuffix, - TrimSpace, - Sprintf, - StringReverse, - RenderTemplate, - - // Numbers - NumbersRange, - NumbersRangeStep, - RandIntn, - - // Encoding - JSONMarshal, - JSONMarshalWithOptions, - JSONUnmarshal, - JSONIsValid, - Base64Encode, - Base64Decode, - Base64IsValid, - Base64UrlEncode, - Base64UrlEncodeNoPad, - Base64UrlDecode, - URLQueryDecode, - URLQueryEncode, - URLQueryEncodeObject, - URLQueryDecodeObject, - YAMLMarshal, - YAMLUnmarshal, - YAMLIsValid, - HexEncode, - HexDecode, - - // Object Manipulation - ObjectUnion, - ObjectUnionN, - ObjectRemove, - ObjectFilter, - ObjectGet, - ObjectKeys, - ObjectSubset, - - // JSON Object Manipulation - JSONFilter, - JSONRemove, - JSONPatch, - - // Tokens - JWTDecode, - JWTVerifyRS256, - JWTVerifyRS384, - JWTVerifyRS512, - JWTVerifyPS256, - JWTVerifyPS384, - JWTVerifyPS512, - JWTVerifyES256, - JWTVerifyES384, - JWTVerifyES512, - JWTVerifyHS256, - JWTVerifyHS384, - JWTVerifyHS512, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - - // Time - NowNanos, - ParseNanos, - ParseRFC3339Nanos, - ParseDurationNanos, - Format, - Date, - Clock, - Weekday, - AddDate, - Diff, - - // Crypto - CryptoX509ParseCertificates, - CryptoX509ParseAndVerifyCertificates, - CryptoX509ParseAndVerifyCertificatesWithOptions, - CryptoMd5, - CryptoSha1, - CryptoSha256, - CryptoX509ParseCertificateRequest, - CryptoX509ParseRSAPrivateKey, - CryptoX509ParseKeyPair, - CryptoParsePrivateKeys, - CryptoHmacMd5, - CryptoHmacSha1, - CryptoHmacSha256, - CryptoHmacSha512, - CryptoHmacEqual, - - // Graphs - WalkBuiltin, - ReachableBuiltin, - ReachablePathsBuiltin, - - // Sort - Sort, - - // Types - IsNumber, - IsString, - IsBoolean, - IsArray, - IsSet, - IsObject, - IsNull, - TypeNameBuiltin, - - // HTTP - HTTPSend, - - // GraphQL - GraphQLParse, - GraphQLParseAndVerify, - GraphQLParseQuery, - GraphQLParseSchema, - GraphQLIsValid, - GraphQLSchemaIsValid, - - // JSON Schema - JSONSchemaVerify, - JSONMatchSchema, - - // Cloud Provider Helpers - ProvidersAWSSignReqObj, - - // Rego - RegoParseModule, - RegoMetadataChain, - RegoMetadataRule, - - // OPA - OPARuntime, - - // Tracing - Trace, - - // Networking - NetCIDROverlap, - NetCIDRIntersects, - NetCIDRContains, - NetCIDRContainsMatches, - NetCIDRExpand, - NetCIDRMerge, - NetLookupIPAddr, - NetCIDRIsValid, - - // Glob - GlobMatch, - GlobQuoteMeta, - - // Units - UnitsParse, - UnitsParseBytes, - - // UUIDs - UUIDRFC4122, - UUIDParse, - - // SemVers - SemVerIsValid, - SemVerCompare, - - // Printing - Print, - InternalPrint, -} +var DefaultBuiltins = v1.DefaultBuiltins // BuiltinMap provides a convenient mapping of built-in names to // built-in definitions. -var BuiltinMap map[string]*Builtin +var BuiltinMap = v1.BuiltinMap // Deprecated: Builtins can now be directly annotated with the // Nondeterministic property, and when set to true, will be ignored // for partial evaluation. -var IgnoreDuringPartialEval = []*Builtin{ - RandIntn, - UUIDRFC4122, - JWTDecodeVerify, - JWTEncodeSignRaw, - JWTEncodeSign, - NowNanos, - HTTPSend, - OPARuntime, - NetLookupIPAddr, -} +var IgnoreDuringPartialEval = v1.IgnoreDuringPartialEval /** * Unification */ // Equality represents the "=" operator. -var Equality = &Builtin{ - Name: "eq", - Infix: "=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Equality = v1.Equality /** * Assignment */ // Assign represents the assignment (":=") operator. -var Assign = &Builtin{ - Name: "assign", - Infix: ":=", - Decl: types.NewFunction( - types.Args(types.A, types.A), - types.B, - ), -} +var Assign = v1.Assign // Member represents the `in` (infix) operator. -var Member = &Builtin{ - Name: "internal.member_2", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - ), - types.B, - ), -} +var Member = v1.Member // MemberWithKey represents the `in` (infix) operator when used // with two terms on the lhs, i.e., `k, v in obj`. -var MemberWithKey = &Builtin{ - Name: "internal.member_3", - Infix: "in", - Decl: types.NewFunction( - types.Args( - types.A, - types.A, - types.A, - ), - types.B, - ), -} +var MemberWithKey = v1.MemberWithKey -/** - * Comparisons - */ -var comparison = category("comparison") - -var GreaterThan = &Builtin{ - Name: "gt", - Infix: ">", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), - ), -} +var GreaterThan = v1.GreaterThan -var GreaterThanEq = &Builtin{ - Name: "gte", - Infix: ">=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), - ), -} +var GreaterThanEq = v1.GreaterThanEq // LessThan represents the "<" comparison operator. -var LessThan = &Builtin{ - Name: "lt", - Infix: "<", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), - ), -} +var LessThan = v1.LessThan -var LessThanEq = &Builtin{ - Name: "lte", - Infix: "<=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), - ), -} +var LessThanEq = v1.LessThanEq -var NotEqual = &Builtin{ - Name: "neq", - Infix: "!=", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), - ), -} +var NotEqual = v1.NotEqual // Equal represents the "==" comparison operator. -var Equal = &Builtin{ - Name: "equal", - Infix: "==", - Categories: comparison, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - types.Named("y", types.A), - ), - types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), - ), -} +var Equal = v1.Equal -/** - * Arithmetic - */ -var number = category("numbers") - -var Plus = &Builtin{ - Name: "plus", - Infix: "+", - Description: "Plus adds two numbers together.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the sum of `x` and `y`"), - ), - Categories: number, -} +var Plus = v1.Plus -var Minus = &Builtin{ - Name: "minus", - Infix: "-", - Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny(types.N, types.NewSet(types.A))), - types.Named("y", types.NewAny(types.N, types.NewSet(types.A))), - ), - types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"), - ), - Categories: category("sets", "numbers"), -} +var Minus = v1.Minus -var Multiply = &Builtin{ - Name: "mul", - Infix: "*", - Description: "Multiplies two numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the product of `x` and `y`"), - ), - Categories: number, -} +var Multiply = v1.Multiply -var Divide = &Builtin{ - Name: "div", - Infix: "/", - Description: "Divides the first number by the second number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the dividend"), - types.Named("y", types.N).Description("the divisor"), - ), - types.Named("z", types.N).Description("the result of `x` divided by `y`"), - ), - Categories: number, -} +var Divide = v1.Divide -var Round = &Builtin{ - Name: "round", - Description: "Rounds the number to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x`"), - ), - Categories: number, -} +var Round = v1.Round -var Ceil = &Builtin{ - Name: "ceil", - Description: "Rounds the number _up_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _up_"), - ), - Categories: number, -} +var Ceil = v1.Ceil -var Floor = &Builtin{ - Name: "floor", - Description: "Rounds the number _down_ to the nearest integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N).Description("the number to round"), - ), - types.Named("y", types.N).Description("the result of rounding `x` _down_"), - ), - Categories: number, -} +var Floor = v1.Floor -var Abs = &Builtin{ - Name: "abs", - Description: "Returns the number without its sign.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("y", types.N).Description("the absolute value of `x`"), - ), - Categories: number, -} +var Abs = v1.Abs -var Rem = &Builtin{ - Name: "rem", - Infix: "%", - Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N).Description("the remainder"), - ), - Categories: number, -} +var Rem = v1.Rem /** * Bitwise */ -var BitsOr = &Builtin{ - Name: "bits.or", - Description: "Returns the bitwise \"OR\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsOr = v1.BitsOr -var BitsAnd = &Builtin{ - Name: "bits.and", - Description: "Returns the bitwise \"AND\" of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsAnd = v1.BitsAnd -var BitsNegate = &Builtin{ - Name: "bits.negate", - Description: "Returns the bitwise negation (flip) of an integer.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsNegate = v1.BitsNegate -var BitsXOr = &Builtin{ - Name: "bits.xor", - Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("y", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsXOr = v1.BitsXOr -var BitsShiftLeft = &Builtin{ - Name: "bits.lsh", - Description: "Returns a new integer with its bits shifted `s` bits to the left.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftLeft = v1.BitsShiftLeft -var BitsShiftRight = &Builtin{ - Name: "bits.rsh", - Description: "Returns a new integer with its bits shifted `s` bits to the right.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.N), - types.Named("s", types.N), - ), - types.Named("z", types.N), - ), -} +var BitsShiftRight = v1.BitsShiftRight /** * Sets */ -var sets = category("sets") - -var And = &Builtin{ - Name: "and", - Infix: "&", - Description: "Returns the intersection of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"), - ), - Categories: sets, -} +var And = v1.And // Or performs a union operation on sets. -var Or = &Builtin{ - Name: "or", - Infix: "|", - Description: "Returns the union of two sets.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewSet(types.A)), - types.Named("y", types.NewSet(types.A)), - ), - types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"), - ), - Categories: sets, -} +var Or = v1.Or -var Intersection = &Builtin{ - Name: "intersection", - Description: "Returns the intersection of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"), - ), - types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"), - ), - Categories: sets, -} +var Intersection = v1.Intersection -var Union = &Builtin{ - Name: "union", - Description: "Returns the union of the given input sets.", - Decl: types.NewFunction( - types.Args( - types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"), - ), - types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"), - ), - Categories: sets, -} +var Union = v1.Union /** * Aggregates */ -var aggregates = category("aggregates") - -var Count = &Builtin{ - Name: "count", - Description: " Count takes a collection or string and returns the number of elements (or characters) in it.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - )).Description("the set/array/object/string to be counted"), - ), - types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), - ), - Categories: aggregates, -} +var Count = v1.Count -var Sum = &Builtin{ - Name: "sum", - Description: "Sums elements of an array or set of numbers.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the sum of all elements"), - ), - Categories: aggregates, -} +var Sum = v1.Sum -var Product = &Builtin{ - Name: "product", - Description: "Muliplies elements of an array or set of numbers", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.N), - types.NewArray(nil, types.N), - )), - ), - types.Named("n", types.N).Description("the product of all elements"), - ), - Categories: aggregates, -} +var Product = v1.Product -var Max = &Builtin{ - Name: "max", - Description: "Returns the maximum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the maximum of all elements"), - ), - Categories: aggregates, -} +var Max = v1.Max -var Min = &Builtin{ - Name: "min", - Description: "Returns the minimum value in a collection.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - )), - ), - types.Named("n", types.A).Description("the minimum of all elements"), - ), - Categories: aggregates, -} +var Min = v1.Min /** * Sorting */ -var Sort = &Builtin{ - Name: "sort", - Description: "Returns a sorted array.", - Decl: types.NewFunction( - types.Args( - types.Named("collection", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - )).Description("the array or set to be sorted"), - ), - types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), - ), - Categories: aggregates, -} +var Sort = v1.Sort /** * Arrays */ -var ArrayConcat = &Builtin{ - Name: "array.concat", - Description: "Concatenates two arrays.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewArray(nil, types.A)), - types.Named("y", types.NewArray(nil, types.A)), - ), - types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), - ), -} +var ArrayConcat = v1.ArrayConcat -var ArraySlice = &Builtin{ - Name: "array.slice", - Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), - types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), - types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), - ), - types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), - ), -} // NOTE(sr): this function really needs examples - -var ArrayReverse = &Builtin{ - Name: "array.reverse", - Description: "Returns the reverse of a given array.", - Decl: types.NewFunction( - types.Args( - types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), - ), - types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), - ), -} +var ArraySlice = v1.ArraySlice + +var ArrayReverse = v1.ArrayReverse /** * Conversions */ -var conversions = category("conversions") - -var ToNumber = &Builtin{ - Name: "to_number", - Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.S, - types.B, - types.NewNull(), - )), - ), - types.Named("num", types.N), - ), - Categories: conversions, -} + +var ToNumber = v1.ToNumber /** * Regular Expressions */ -var RegexMatch = &Builtin{ - Name: "regex.match", - Description: "Matches a string against a regular expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("value to match against `pattern`"), - ), - types.Named("result", types.B), - ), -} +var RegexMatch = v1.RegexMatch -var RegexIsValid = &Builtin{ - Name: "regex.is_valid", - Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - ), - types.Named("result", types.B), - ), -} +var RegexIsValid = v1.RegexIsValid -var RegexFindAllStringSubmatch = &Builtin{ - Name: "regex.find_all_string_submatch_n", - Description: "Returns all successive matches of the expression.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), - ), - types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))), - ), -} +var RegexFindAllStringSubmatch = v1.RegexFindAllStringSubmatch -var RegexTemplateMatch = &Builtin{ - Name: "regex.template_match", - Description: "Matches a string against a pattern, where there pattern may be glob-like", - Decl: types.NewFunction( - types.Args( - types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), - types.Named("value", types.S).Description("string to match"), - types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), - types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), - ), - types.Named("result", types.B), - ), -} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. - -var RegexSplit = &Builtin{ - Name: "regex.split", - Description: "Splits the input string by the occurrences of the given pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), - ), -} +var RegexTemplateMatch = v1.RegexTemplateMatch + +var RegexSplit = v1.RegexSplit // RegexFind takes two strings and a number, the pattern, the value and number of match values to // return, -1 means all match values. -var RegexFind = &Builtin{ - Name: "regex.find_n", - Description: "Returns the specified number of matches when matching the input against the pattern.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S).Description("regular expression"), - types.Named("value", types.S).Description("string to match"), - types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), - ), - types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), - ), -} +var RegexFind = v1.RegexFind // GlobsMatch takes two strings regexp-style strings and evaluates to true if their // intersection matches a non-empty set of non-empty strings. // Examples: // - "a.a." and ".b.b" -> true. // - "[a-z]*" and [0-9]+" -> not true. -var GlobsMatch = &Builtin{ - Name: "regex.globs_match", - Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. -The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", - Decl: types.NewFunction( - types.Args( - types.Named("glob1", types.S), - types.Named("glob2", types.S), - ), - types.Named("result", types.B), - ), -} +var GlobsMatch = v1.GlobsMatch /** * Strings */ -var stringsCat = category("strings") - -var AnyPrefixMatch = &Builtin{ - Name: "strings.any_prefix_match", - Description: "Returns true if any of the search strings begins with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} -var AnySuffixMatch = &Builtin{ - Name: "strings.any_suffix_match", - Description: "Returns true if any of the search strings ends with any of the base strings.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("search string(s)"), - types.Named("base", types.NewAny( - types.S, - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("base string(s)"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var AnyPrefixMatch = v1.AnyPrefixMatch -var Concat = &Builtin{ - Name: "concat", - Description: "Joins a set or array of strings with a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("delimiter", types.S), - types.Named("collection", types.NewAny( - types.NewSet(types.S), - types.NewArray(nil, types.S), - )).Description("strings to join"), - ), - types.Named("output", types.S), - ), - Categories: stringsCat, -} +var AnySuffixMatch = v1.AnySuffixMatch -var FormatInt = &Builtin{ - Name: "format_int", - Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", - Decl: types.NewFunction( - types.Args( - types.Named("number", types.N).Description("number to format"), - types.Named("base", types.N).Description("base of number representation to use"), - ), - types.Named("output", types.S).Description("formatted number"), - ), - Categories: stringsCat, -} +var Concat = v1.Concat -var IndexOf = &Builtin{ - Name: "indexof", - Description: "Returns the index of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), - ), - Categories: stringsCat, -} +var FormatInt = v1.FormatInt -var IndexOfN = &Builtin{ - Name: "indexof_n", - Description: "Returns a list of all the indexes of a substring contained inside a string.", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), - ), - Categories: stringsCat, -} +var IndexOf = v1.IndexOf -var Substring = &Builtin{ - Name: "substring", - Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - types.Named("offset", types.N).Description("offset, must be positive"), - types.Named("length", types.N).Description("length of the substring starting from `offset`"), - ), - types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), - ), - Categories: stringsCat, -} +var IndexOfN = v1.IndexOfN -var Contains = &Builtin{ - Name: "contains", - Description: "Returns `true` if the search string is included in the base string", - Decl: types.NewFunction( - types.Args( - types.Named("haystack", types.S).Description("string to search in"), - types.Named("needle", types.S).Description("substring to look for"), - ), - types.Named("result", types.B).Description("result of the containment check"), - ), - Categories: stringsCat, -} +var Substring = v1.Substring -var StringCount = &Builtin{ - Name: "strings.count", - Description: "Returns the number of non-overlapping instances of a substring in a string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("string to search in"), - types.Named("substring", types.S).Description("substring to look for"), - ), - types.Named("output", types.N).Description("count of occurrences, `0` if not found"), - ), - Categories: stringsCat, -} +var Contains = v1.Contains -var StartsWith = &Builtin{ - Name: "startswith", - Description: "Returns true if the search string begins with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the prefix check"), - ), - Categories: stringsCat, -} +var StringCount = v1.StringCount -var EndsWith = &Builtin{ - Name: "endswith", - Description: "Returns true if the search string ends with the base string.", - Decl: types.NewFunction( - types.Args( - types.Named("search", types.S).Description("search string"), - types.Named("base", types.S).Description("base string"), - ), - types.Named("result", types.B).Description("result of the suffix check"), - ), - Categories: stringsCat, -} +var StartsWith = v1.StartsWith -var Lower = &Builtin{ - Name: "lower", - Description: "Returns the input string but with all characters in lower-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to lower-case"), - ), - types.Named("y", types.S).Description("lower-case of x"), - ), - Categories: stringsCat, -} +var EndsWith = v1.EndsWith -var Upper = &Builtin{ - Name: "upper", - Description: "Returns the input string but with all characters in upper-case.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is converted to upper-case"), - ), - types.Named("y", types.S).Description("upper-case of x"), - ), - Categories: stringsCat, -} +var Lower = v1.Lower -var Split = &Builtin{ - Name: "split", - Description: "Split returns an array containing elements of the input string split on a delimiter.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string that is split"), - types.Named("delimiter", types.S).Description("delimiter used for splitting"), - ), - types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), - ), - Categories: stringsCat, -} +var Upper = v1.Upper -var Replace = &Builtin{ - Name: "replace", - Description: "Replace replaces all instances of a sub-string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("string being processed"), - types.Named("old", types.S).Description("substring to replace"), - types.Named("new", types.S).Description("string to replace `old` with"), - ), - types.Named("y", types.S).Description("string with replaced substrings"), - ), - Categories: stringsCat, -} +var Split = v1.Split -var ReplaceN = &Builtin{ - Name: "strings.replace_n", - Description: `Replaces a string from a list of old, new string pairs. -Replacements are performed in the order they appear in the target string, without overlapping matches. -The old string comparisons are done in argument order.`, - Decl: types.NewFunction( - types.Args( - types.Named("patterns", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.S)), - ).Description("replacement pairs"), - types.Named("value", types.S).Description("string to replace substring matches in"), - ), - types.Named("output", types.S), - ), -} +var Replace = v1.Replace -var RegexReplace = &Builtin{ - Name: "regex.replace", - Description: `Find and replaces the text using the regular expression pattern.`, - Decl: types.NewFunction( - types.Args( - types.Named("s", types.S).Description("string being processed"), - types.Named("pattern", types.S).Description("regex pattern to be applied"), - types.Named("value", types.S).Description("regex value"), - ), - types.Named("output", types.S), - ), -} +var ReplaceN = v1.ReplaceN -var Trim = &Builtin{ - Name: "trim", - Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off"), - ), - types.Named("output", types.S).Description("string trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var RegexReplace = v1.RegexReplace -var TrimLeft = &Builtin{ - Name: "trim_left", - Description: "Returns `value` with all leading instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), - ), - types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var Trim = v1.Trim -var TrimPrefix = &Builtin{ - Name: "trim_prefix", - Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("prefix", types.S).Description("prefix to cut off"), - ), - types.Named("output", types.S).Description("string with `prefix` cut off"), - ), - Categories: stringsCat, -} +var TrimLeft = v1.TrimLeft -var TrimRight = &Builtin{ - Name: "trim_right", - Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), - ), - types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), - ), - Categories: stringsCat, -} +var TrimPrefix = v1.TrimPrefix -var TrimSuffix = &Builtin{ - Name: "trim_suffix", - Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - types.Named("suffix", types.S).Description("suffix to cut off"), - ), - types.Named("output", types.S).Description("string with `suffix` cut off"), - ), - Categories: stringsCat, -} +var TrimRight = v1.TrimRight -var TrimSpace = &Builtin{ - Name: "trim_space", - Description: "Return the given string with all leading and trailing white space removed.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("string to trim"), - ), - types.Named("output", types.S).Description("string leading and trailing white space cut off"), - ), - Categories: stringsCat, -} +var TrimSuffix = v1.TrimSuffix -var Sprintf = &Builtin{ - Name: "sprintf", - Description: "Returns the given string, formatted.", - Decl: types.NewFunction( - types.Args( - types.Named("format", types.S).Description("string with formatting verbs"), - types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), - ), - types.Named("output", types.S).Description("`format` formatted by the values in `values`"), - ), - Categories: stringsCat, -} +var TrimSpace = v1.TrimSpace -var StringReverse = &Builtin{ - Name: "strings.reverse", - Description: "Reverses a given string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S), - ), - Categories: stringsCat, -} +var Sprintf = v1.Sprintf -var RenderTemplate = &Builtin{ - Name: "strings.render_template", - Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. - For examples of templating syntax, see https://pkg.go.dev/text/template`, - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S).Description("a templated string"), - types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), - ), - types.Named("result", types.S).Description("rendered template with template variables injected"), - ), - Categories: stringsCat, -} +var StringReverse = v1.StringReverse + +var RenderTemplate = v1.RenderTemplate /** * Numbers @@ -1354,82 +236,19 @@ var RenderTemplate = &Builtin{ // RandIntn returns a random number 0 - n // Marked non-deterministic because it relies on RNG internally. -var RandIntn = &Builtin{ - Name: "rand.intn", - Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", - Decl: types.NewFunction( - types.Args( - types.Named("str", types.S), - types.Named("n", types.N), - ), - types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), - ), - Categories: number, - Nondeterministic: true, -} +var RandIntn = v1.RandIntn -var NumbersRange = &Builtin{ - Name: "numbers.range", - Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), - ), -} +var NumbersRange = v1.NumbersRange -var NumbersRangeStep = &Builtin{ - Name: "numbers.range_step", - Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. - If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. - If the provided "step" is less then 1, an error will be thrown. - If "b" is not in the range of the provided "step", "b" won't be included in the result. - `, - Decl: types.NewFunction( - types.Args( - types.Named("a", types.N), - types.Named("b", types.N), - types.Named("step", types.N), - ), - types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), - ), -} +var NumbersRangeStep = v1.NumbersRangeStep /** * Units */ -var UnitsParse = &Builtin{ - Name: "units.parse", - Description: `Converts strings like "10G", "5K", "4M", "1500m" and the like into a number. -This number can be a non-integer, such as 1.5, 0.22, etc. Supports standard metric decimal and -binary SI units (e.g., K, Ki, M, Mi, G, Gi etc.) m, K, M, G, T, P, and E are treated as decimal -units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as binary units. - -Note that 'm' and 'M' are case-sensitive, to allow distinguishing between "milli" and "mega" units respectively. Other units are case-insensitive.`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParse = v1.UnitsParse -var UnitsParseBytes = &Builtin{ - Name: "units.parse_bytes", - Description: `Converts strings like "10GB", "5K", "4mb" into an integer number of bytes. -Supports standard byte units (e.g., KB, KiB, etc.) KB, MB, GB, and TB are treated as decimal -units and KiB, MiB, GiB, and TiB are treated as binary units. The bytes symbol (b/B) in the -unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the byte unit to parse"), - ), - types.Named("y", types.N).Description("the parsed number"), - ), -} +var UnitsParseBytes = v1.UnitsParseBytes // /** @@ -1438,1372 +257,241 @@ unit is optional and omitting it wil give the same result (e.g. Mi and MiB).`, // UUIDRFC4122 returns a version 4 UUID string. // Marked non-deterministic because it relies on RNG internally. -var UUIDRFC4122 = &Builtin{ - Name: "uuid.rfc4122", - Description: "Returns a new UUIDv4.", - Decl: types.NewFunction( - types.Args( - types.Named("k", types.S), - ), - types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), - ), - Nondeterministic: true, -} +var UUIDRFC4122 = v1.UUIDRFC4122 -var UUIDParse = &Builtin{ - Name: "uuid.parse", - Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", - Categories: nil, - Decl: types.NewFunction( - types.Args( - types.Named("uuid", types.S), - ), - types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), - ), - Relation: false, -} +var UUIDParse = v1.UUIDParse /** * JSON */ -var objectCat = category("object") - -var JSONFilter = &Builtin{ - Name: "json.filter", - Description: "Filters the object. " + - "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + - "Paths are not filtered in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONFilter = v1.JSONFilter -var JSONRemove = &Builtin{ - Name: "json.remove", - Description: "Removes paths from an object. " + - "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + - "Paths are not removed in-order and are deduplicated before being evaluated.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("paths", types.NewAny( - types.NewArray( - nil, - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - types.NewSet( - types.NewAny( - types.S, - types.NewArray( - nil, - types.A, - ), - ), - ), - )).Description("JSON string paths"), - ), - types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), - ), - Categories: objectCat, -} +var JSONRemove = v1.JSONRemove -var JSONPatch = &Builtin{ - Name: "json.patch", - Description: "Patches an object according to RFC6902. " + - "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + - "The patches are applied atomically: if any of them fails, the result will be undefined. " + - "Additionally works on sets, where a value contained in the set is considered to be its path.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.A), // TODO(sr): types.A? - types.Named("patches", types.NewArray( - nil, - types.NewObject( - []*types.StaticProperty{ - {Key: "op", Value: types.S}, - {Key: "path", Value: types.A}, - }, - types.NewDynamicProperty(types.A, types.A), - ), - )), - ), - types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), - ), - Categories: objectCat, -} +var JSONPatch = v1.JSONPatch -var ObjectSubset = &Builtin{ - Name: "object.subset", - Description: "Determines if an object `sub` is a subset of another object `super`." + - "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + - "**and** for all keys which `sub` and `super` share, they have the same value. " + - "This function works with objects, sets, arrays and a set of array and set." + - "If both arguments are objects, then the operation is recursive, e.g. " + - "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + - "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + - "but does not attempt to recurse. If both arguments are arrays, " + - "then this function checks if `sub` appears contiguously in order within `super`, " + - "and also does not attempt to recurse. If `super` is array and `sub` is set, " + - "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + - "and also does not attempt to recurse.", - Decl: types.NewFunction( - types.Args( - types.Named("super", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if sub is a subset of"), - types.Named("sub", types.NewAny(types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - ), - types.NewSet(types.A), - types.NewArray(nil, types.A), - )).Description("object to test if super is a superset of"), - ), - types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), - ), -} +var ObjectSubset = v1.ObjectSubset -var ObjectUnion = &Builtin{ - Name: "object.union", - Description: "Creates a new object of the asymmetric union of two objects. " + - "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - types.Named("b", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )), - ), - types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), - ), // TODO(sr): types.A? ^^^^^^^ (also below) -} +var ObjectUnion = v1.ObjectUnion -var ObjectUnionN = &Builtin{ - Name: "object.union_n", - Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + - "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", - Decl: types.NewFunction( - types.Args( - types.Named("objects", types.NewArray( - nil, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), - ), -} +var ObjectUnionN = v1.ObjectUnionN -var ObjectRemove = &Builtin{ - Name: "object.remove", - Description: "Removes specified keys from an object.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to remove keys from"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )).Description("keys to remove from x"), - ), - types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), - ), -} +var ObjectRemove = v1.ObjectRemove -var ObjectFilter = &Builtin{ - Name: "object.filter", - Description: "Filters the object by keeping only specified keys. " + - "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty(types.A, types.A), - )).Description("object to filter keys"), - types.Named("keys", types.NewAny( - types.NewArray(nil, types.A), - types.NewSet(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - )), - ), - types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), - ), -} +var ObjectFilter = v1.ObjectFilter -var ObjectGet = &Builtin{ - Name: "object.get", - Description: "Returns value of an object's key if present, otherwise a default. " + - "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + - "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), - types.Named("key", types.A).Description("key to lookup in `object`"), - types.Named("default", types.A).Description("default to use when lookup fails"), - ), - types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), - ), -} +var ObjectGet = v1.ObjectGet -var ObjectKeys = &Builtin{ - Name: "object.keys", - Description: "Returns a set of an object's keys. " + - "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), - ), - types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"), - ), -} +var ObjectKeys = v1.ObjectKeys /* * Encoding */ -var encoding = category("encoding") - -var JSONMarshal = &Builtin{ - Name: "json.marshal", - Description: "Serializes the input term to JSON.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`"), - ), - Categories: encoding, -} -var JSONMarshalWithOptions = &Builtin{ - Name: "json.marshal_with_options", - Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + - "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - types.Named("opts", types.NewObject( - []*types.StaticProperty{ - types.NewStaticProperty("pretty", types.B), - types.NewStaticProperty("indent", types.S), - types.NewStaticProperty("prefix", types.S), - }, - types.NewDynamicProperty(types.S, types.A), - )).Description("encoding options"), - ), - types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), - ), - Categories: encoding, -} +var JSONMarshal = v1.JSONMarshal -var JSONUnmarshal = &Builtin{ - Name: "json.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var JSONMarshalWithOptions = v1.JSONMarshalWithOptions -var JSONIsValid = &Builtin{ - Name: "json.is_valid", - Description: "Verifies the input string is a valid JSON document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a JSON string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), - ), - Categories: encoding, -} +var JSONUnmarshal = v1.JSONUnmarshal -var Base64Encode = &Builtin{ - Name: "base64.encode", - Description: "Serializes the input string into base64 encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 serialization of `x`"), - ), - Categories: encoding, -} +var JSONIsValid = v1.JSONIsValid -var Base64Decode = &Builtin{ - Name: "base64.decode", - Description: "Deserializes the base64 encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64 deserialization of `x`"), - ), - Categories: encoding, -} +var Base64Encode = v1.Base64Encode -var Base64IsValid = &Builtin{ - Name: "base64.is_valid", - Description: "Verifies the input string is base64 encoded.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), - ), - Categories: encoding, -} +var Base64Decode = v1.Base64Decode -var Base64UrlEncode = &Builtin{ - Name: "base64url.encode", - Description: "Serializes the input string into base64url encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64IsValid = v1.Base64IsValid -var Base64UrlEncodeNoPad = &Builtin{ - Name: "base64url.encode_no_pad", - Description: "Serializes the input string into base64url encoding without padding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url serialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncode = v1.Base64UrlEncode -var Base64UrlDecode = &Builtin{ - Name: "base64url.decode", - Description: "Deserializes the base64url encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("base64url deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlEncodeNoPad = v1.Base64UrlEncodeNoPad -var URLQueryDecode = &Builtin{ - Name: "urlquery.decode", - Description: "Decodes a URL-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), - ), - Categories: encoding, -} +var Base64UrlDecode = v1.Base64UrlDecode -var URLQueryEncode = &Builtin{ - Name: "urlquery.encode", - Description: "Encodes the input string into a URL-encoded string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("URL-encoding serialization of `x`"), - ), - Categories: encoding, -} +var URLQueryDecode = v1.URLQueryDecode -var URLQueryEncodeObject = &Builtin{ - Name: "urlquery.encode_object", - Description: "Encodes the given object into a URL encoded query string.", - Decl: types.NewFunction( - types.Args( - types.Named("object", types.NewObject( - nil, - types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.S), - types.NewSet(types.S)))))), - types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), - ), - Categories: encoding, -} +var URLQueryEncode = v1.URLQueryEncode -var URLQueryDecodeObject = &Builtin{ - Name: "urlquery.decode_object", - Description: "Decodes the given URL query string into an object.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("the query string"), - ), - types.Named("object", types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewArray(nil, types.S)))).Description("the resulting object"), - ), - Categories: encoding, -} +var URLQueryEncodeObject = v1.URLQueryEncodeObject -var YAMLMarshal = &Builtin{ - Name: "yaml.marshal", - Description: "Serializes the input term to YAML.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A).Description("the term to serialize"), - ), - types.Named("y", types.S).Description("the YAML string representation of `x`"), - ), - Categories: encoding, -} +var URLQueryDecodeObject = v1.URLQueryDecodeObject -var YAMLUnmarshal = &Builtin{ - Name: "yaml.unmarshal", - Description: "Deserializes the input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("y", types.A).Description("the term deserialized from `x`"), - ), - Categories: encoding, -} +var YAMLMarshal = v1.YAMLMarshal + +var YAMLUnmarshal = v1.YAMLUnmarshal // YAMLIsValid verifies the input string is a valid YAML document. -var YAMLIsValid = &Builtin{ - Name: "yaml.is_valid", - Description: "Verifies the input string is a valid YAML document.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a YAML string"), - ), - types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), - ), - Categories: encoding, -} +var YAMLIsValid = v1.YAMLIsValid -var HexEncode = &Builtin{ - Name: "hex.encode", - Description: "Serializes the input string using hex-encoding.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), - ), - Categories: encoding, -} +var HexEncode = v1.HexEncode -var HexDecode = &Builtin{ - Name: "hex.decode", - Description: "Deserializes the hex-encoded input string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("a hex-encoded string"), - ), - types.Named("y", types.S).Description("deserialized from `x`"), - ), - Categories: encoding, -} +var HexDecode = v1.HexDecode /** * Tokens */ -var tokensCat = category("tokens") - -var JWTDecode = &Builtin{ - Name: "io.jwt.decode", - Description: "Decodes a JSON Web Token and outputs it as an object.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token to decode"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.S, - }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), - ), - Categories: tokensCat, -} -var JWTVerifyRS256 = &Builtin{ - Name: "io.jwt.verify_rs256", - Description: "Verifies if a RS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTDecode = v1.JWTDecode -var JWTVerifyRS384 = &Builtin{ - Name: "io.jwt.verify_rs384", - Description: "Verifies if a RS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS256 = v1.JWTVerifyRS256 -var JWTVerifyRS512 = &Builtin{ - Name: "io.jwt.verify_rs512", - Description: "Verifies if a RS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS384 = v1.JWTVerifyRS384 -var JWTVerifyPS256 = &Builtin{ - Name: "io.jwt.verify_ps256", - Description: "Verifies if a PS256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyRS512 = v1.JWTVerifyRS512 -var JWTVerifyPS384 = &Builtin{ - Name: "io.jwt.verify_ps384", - Description: "Verifies if a PS384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS256 = v1.JWTVerifyPS256 -var JWTVerifyPS512 = &Builtin{ - Name: "io.jwt.verify_ps512", - Description: "Verifies if a PS512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS384 = v1.JWTVerifyPS384 -var JWTVerifyES256 = &Builtin{ - Name: "io.jwt.verify_es256", - Description: "Verifies if a ES256 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyPS512 = v1.JWTVerifyPS512 -var JWTVerifyES384 = &Builtin{ - Name: "io.jwt.verify_es384", - Description: "Verifies if a ES384 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES256 = v1.JWTVerifyES256 -var JWTVerifyES512 = &Builtin{ - Name: "io.jwt.verify_es512", - Description: "Verifies if a ES512 JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES384 = v1.JWTVerifyES384 -var JWTVerifyHS256 = &Builtin{ - Name: "io.jwt.verify_hs256", - Description: "Verifies if a HS256 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyES512 = v1.JWTVerifyES512 -var JWTVerifyHS384 = &Builtin{ - Name: "io.jwt.verify_hs384", - Description: "Verifies if a HS384 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS256 = v1.JWTVerifyHS256 -var JWTVerifyHS512 = &Builtin{ - Name: "io.jwt.verify_hs512", - Description: "Verifies if a HS512 (secret) JWT signature is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), - types.Named("secret", types.S).Description("plain text secret used to verify the signature"), - ), - types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), - ), - Categories: tokensCat, -} +var JWTVerifyHS384 = v1.JWTVerifyHS384 -// Marked non-deterministic because it relies on time internally. -var JWTDecodeVerify = &Builtin{ - Name: "io.jwt.decode_verify", - Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. -Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`, - Decl: types.NewFunction( - types.Args( - types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), - types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), - ), - Categories: tokensCat, - Nondeterministic: true, -} +var JWTVerifyHS512 = v1.JWTVerifyHS512 -var tokenSign = category("tokensign") +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = v1.JWTDecodeVerify // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSignRaw = &Builtin{ - Name: "io.jwt.encode_sign_raw", - Description: "Encodes and optionally signs a JSON Web Token.", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.S).Description("JWS Protected Header"), - types.Named("payload", types.S).Description("JWS Payload"), - types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSignRaw = v1.JWTEncodeSignRaw // Marked non-deterministic because it relies on RNG internally. -var JWTEncodeSign = &Builtin{ - Name: "io.jwt.encode_sign", - Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", - Decl: types.NewFunction( - types.Args( - types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), - types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), - types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), - ), - types.Named("output", types.S).Description("signed JWT"), - ), - Categories: tokenSign, - Nondeterministic: true, -} +var JWTEncodeSign = v1.JWTEncodeSign /** * Time */ // Marked non-deterministic because it relies on time directly. -var NowNanos = &Builtin{ - Name: "time.now_ns", - Description: "Returns the current time since epoch in nanoseconds.", - Decl: types.NewFunction( - nil, - types.Named("now", types.N).Description("nanoseconds since epoch"), - ), - Nondeterministic: true, -} +var NowNanos = v1.NowNanos -var ParseNanos = &Builtin{ - Name: "time.parse_ns", - Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), - types.Named("value", types.S).Description("input to parse according to `layout`"), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseNanos = v1.ParseNanos -var ParseRFC3339Nanos = &Builtin{ - Name: "time.parse_rfc3339_ns", - Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("value", types.S), - ), - types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), - ), -} +var ParseRFC3339Nanos = v1.ParseRFC3339Nanos -var ParseDurationNanos = &Builtin{ - Name: "time.parse_duration_ns", - Description: "Returns the duration in nanoseconds represented by a string.", - Decl: types.NewFunction( - types.Args( - types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), - ), - types.Named("ns", types.N).Description("the `duration` in nanoseconds"), - ), -} +var ParseDurationNanos = v1.ParseDurationNanos -var Format = &Builtin{ - Name: "time.format", - Description: "Returns the formatted timestamp for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - types.NewArray([]types.Type{types.N, types.S, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), - ), - types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Format = v1.Format -var Date = &Builtin{ - Name: "time.date", - Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), - ), -} +var Date = v1.Date -var Clock = &Builtin{ - Name: "time.clock", - Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). - Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), - ), -} +var Clock = v1.Clock -var Weekday = &Builtin{ - Name: "time.weekday", - Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), - ), - types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), - ), -} +var Weekday = v1.Weekday -var AddDate = &Builtin{ - Name: "time.add_date", - Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", - Decl: types.NewFunction( - types.Args( - types.Named("ns", types.N).Description("nanoseconds since the epoch"), - types.Named("years", types.N), - types.Named("months", types.N), - types.Named("days", types.N), - ), - types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), - ), -} +var AddDate = v1.AddDate -var Diff = &Builtin{ - Name: "time.diff", - Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", - Decl: types.NewFunction( - types.Args( - types.Named("ns1", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - types.Named("ns2", types.NewAny( - types.N, - types.NewArray([]types.Type{types.N, types.S}, nil), - )), - ), - types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), - ), -} +var Diff = v1.Diff /** * Crypto. */ -var CryptoX509ParseCertificates = &Builtin{ - Name: "crypto.x509.parse_certificates", - Description: `Returns zero or more certificates from the given encoded string containing -DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more -concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), - ), -} +var CryptoX509ParseCertificates = v1.CryptoX509ParseCertificates -var CryptoX509ParseAndVerifyCertificates = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificates = v1.CryptoX509ParseAndVerifyCertificates -var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ - Name: "crypto.x509.parse_and_verify_certificates_with_options", - Description: `Returns one or more certificates from the given string containing PEM -or base64 encoded DER certificates after verifying the supplied certificates form a complete -certificate chain back to a trusted root. A config option passed as the second argument can -be used to configure the validation options used. - -The first certificate is treated as the root and the last is treated as the leaf, -with all others being treated as intermediates.`, - - Decl: types.NewFunction( - types.Args( - types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), - types.Named("options", types.NewObject( - nil, - types.NewDynamicProperty(types.S, types.A), - )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), - ), -} +var CryptoX509ParseAndVerifyCertificatesWithOptions = v1.CryptoX509ParseAndVerifyCertificatesWithOptions -var CryptoX509ParseCertificateRequest = &Builtin{ - Name: "crypto.x509.parse_certificate_request", - Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", - Decl: types.NewFunction( - types.Args( - types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), - ), -} +var CryptoX509ParseCertificateRequest = v1.CryptoX509ParseCertificateRequest -var CryptoX509ParseKeyPair = &Builtin{ - Name: "crypto.x509.parse_keypair", - Description: "Returns a valid key pair", - Decl: types.NewFunction( - types.Args( - types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), - types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), - ), -} -var CryptoX509ParseRSAPrivateKey = &Builtin{ - Name: "crypto.x509.parse_rsa_private_key", - Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", - Decl: types.NewFunction( - types.Args( - types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), - ), -} +var CryptoX509ParseKeyPair = v1.CryptoX509ParseKeyPair +var CryptoX509ParseRSAPrivateKey = v1.CryptoX509ParseRSAPrivateKey -var CryptoParsePrivateKeys = &Builtin{ - Name: "crypto.parse_private_keys", - Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. - -If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, - Decl: types.NewFunction( - types.Args( - types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), - ), - types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), - ), -} +var CryptoParsePrivateKeys = v1.CryptoParsePrivateKeys -var CryptoMd5 = &Builtin{ - Name: "crypto.md5", - Description: "Returns a string representing the input string hashed with the MD5 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("MD5-hash of `x`"), - ), -} +var CryptoMd5 = v1.CryptoMd5 -var CryptoSha1 = &Builtin{ - Name: "crypto.sha1", - Description: "Returns a string representing the input string hashed with the SHA1 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA1-hash of `x`"), - ), -} +var CryptoSha1 = v1.CryptoSha1 -var CryptoSha256 = &Builtin{ - Name: "crypto.sha256", - Description: "Returns a string representing the input string hashed with the SHA256 function", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S), - ), - types.Named("y", types.S).Description("SHA256-hash of `x`"), - ), -} +var CryptoSha256 = v1.CryptoSha256 -var CryptoHmacMd5 = &Builtin{ - Name: "crypto.hmac.md5", - Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("MD5-HMAC of `x`"), - ), -} +var CryptoHmacMd5 = v1.CryptoHmacMd5 -var CryptoHmacSha1 = &Builtin{ - Name: "crypto.hmac.sha1", - Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA1-HMAC of `x`"), - ), -} +var CryptoHmacSha1 = v1.CryptoHmacSha1 -var CryptoHmacSha256 = &Builtin{ - Name: "crypto.hmac.sha256", - Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA256-HMAC of `x`"), - ), -} +var CryptoHmacSha256 = v1.CryptoHmacSha256 -var CryptoHmacSha512 = &Builtin{ - Name: "crypto.hmac.sha512", - Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.S).Description("input string"), - types.Named("key", types.S).Description("key to use"), - ), - types.Named("y", types.S).Description("SHA512-HMAC of `x`"), - ), -} +var CryptoHmacSha512 = v1.CryptoHmacSha512 -var CryptoHmacEqual = &Builtin{ - Name: "crypto.hmac.equal", - Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", - Decl: types.NewFunction( - types.Args( - types.Named("mac1", types.S).Description("mac1 to compare"), - types.Named("mac2", types.S).Description("mac2 to compare"), - ), - types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), - ), -} +var CryptoHmacEqual = v1.CryptoHmacEqual /** * Graphs. */ -var graphs = category("graph") - -var WalkBuiltin = &Builtin{ - Name: "walk", - Relation: true, - Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("output", types.NewArray( - []types.Type{ - types.NewArray(nil, types.A), - types.A, - }, - nil, - )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), - ), - Categories: graphs, -} -var ReachableBuiltin = &Builtin{ - Name: "graph.reachable", - Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of neighboring vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"), - ), - types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), - ), -} +var WalkBuiltin = v1.WalkBuiltin -var ReachablePathsBuiltin = &Builtin{ - Name: "graph.reachable_paths", - Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", - Decl: types.NewFunction( - types.Args( - types.Named("graph", types.NewObject( - nil, - types.NewDynamicProperty( - types.A, - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A)), - )), - ).Description("object containing a set or array of root vertices"), - types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? - ), - types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), - ), -} +var ReachableBuiltin = v1.ReachableBuiltin + +var ReachablePathsBuiltin = v1.ReachablePathsBuiltin /** * Type */ -var typesCat = category("types") - -var IsNumber = &Builtin{ - Name: "is_number", - Description: "Returns `true` if the input value is a number.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), - ), - Categories: typesCat, -} -var IsString = &Builtin{ - Name: "is_string", - Description: "Returns `true` if the input value is a string.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), - ), - Categories: typesCat, -} +var IsNumber = v1.IsNumber -var IsBoolean = &Builtin{ - Name: "is_boolean", - Description: "Returns `true` if the input value is a boolean.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), - ), - Categories: typesCat, -} +var IsString = v1.IsString -var IsArray = &Builtin{ - Name: "is_array", - Description: "Returns `true` if the input value is an array.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), - ), - Categories: typesCat, -} +var IsBoolean = v1.IsBoolean -var IsSet = &Builtin{ - Name: "is_set", - Description: "Returns `true` if the input value is a set.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), - ), - Categories: typesCat, -} +var IsArray = v1.IsArray -var IsObject = &Builtin{ - Name: "is_object", - Description: "Returns true if the input value is an object", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), - ), - Categories: typesCat, -} +var IsSet = v1.IsSet -var IsNull = &Builtin{ - Name: "is_null", - Description: "Returns `true` if the input value is null.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), - ), - Categories: typesCat, -} +var IsObject = v1.IsObject + +var IsNull = v1.IsNull /** * Type Name */ // TypeNameBuiltin returns the type of the input. -var TypeNameBuiltin = &Builtin{ - Name: "type_name", - Description: "Returns the type of its input value.", - Decl: types.NewFunction( - types.Args( - types.Named("x", types.A), - ), - types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), - ), - Categories: typesCat, -} +var TypeNameBuiltin = v1.TypeNameBuiltin /** * HTTP Request */ // Marked non-deterministic because HTTP request results can be non-deterministic. -var HTTPSend = &Builtin{ - Name: "http.send", - Description: "Returns a HTTP response to the given HTTP request.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - ), - types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Nondeterministic: true, -} +var HTTPSend = v1.HTTPSend /** * GraphQL */ // GraphQLParse returns a pair of AST objects from parsing/validation. -var GraphQLParse = &Builtin{ - Name: "graphql.parse", - Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), - ), -} +var GraphQLParse = v1.GraphQLParse // GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. -var GraphQLParseAndVerify = &Builtin{ - Name: "graphql.parse_and_verify", - Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), - ), -} +var GraphQLParseAndVerify = v1.GraphQLParseAndVerify // GraphQLParseQuery parses the input GraphQL query and returns a JSON // representation of its AST. -var GraphQLParseQuery = &Builtin{ - Name: "graphql.parse_query", - Description: "Returns an AST object for a GraphQL query.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), - ), -} +var GraphQLParseQuery = v1.GraphQLParseQuery // GraphQLParseSchema parses the input GraphQL schema and returns a JSON // representation of its AST. -var GraphQLParseSchema = &Builtin{ - Name: "graphql.parse_schema", - Description: "Returns an AST object for a GraphQL schema.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.S), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), - ), -} +var GraphQLParseSchema = v1.GraphQLParseSchema // GraphQLIsValid returns true if a GraphQL query is valid with a given // schema, and returns false for all other inputs. -var GraphQLIsValid = &Builtin{ - Name: "graphql.is_valid", - Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), - ), -} +var GraphQLIsValid = v1.GraphQLIsValid // GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, // and returns false for all other inputs. -var GraphQLSchemaIsValid = &Builtin{ - Name: "graphql.schema_is_valid", - Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))), - ), - types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), - ), -} +var GraphQLSchemaIsValid = v1.GraphQLSchemaIsValid /** * JSON Schema @@ -2811,313 +499,76 @@ var GraphQLSchemaIsValid = &Builtin{ // JSONSchemaVerify returns empty string if the input is valid JSON schema // and returns error string for all other inputs. -var JSONSchemaVerify = &Builtin{ - Name: "json.verify_schema", - Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", - Decl: types.NewFunction( - types.Args( - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("the schema to verify"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewAny(types.S, types.Null{}), - }, nil)). - Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), - ), - Categories: objectCat, -} +var JSONSchemaVerify = v1.JSONSchemaVerify // JSONMatchSchema returns empty array if the document matches the JSON schema, // and returns non-empty array with error objects otherwise. -var JSONMatchSchema = &Builtin{ - Name: "json.match_schema", - Description: "Checks that the document matches the JSON schema.", - Decl: types.NewFunction( - types.Args( - types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("document to verify by schema"), - types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). - Description("schema to verify document by"), - ), - types.Named("output", types.NewArray([]types.Type{ - types.B, - types.NewArray( - nil, types.NewObject( - []*types.StaticProperty{ - {Key: "error", Value: types.S}, - {Key: "type", Value: types.S}, - {Key: "field", Value: types.S}, - {Key: "desc", Value: types.S}, - }, - nil, - ), - ), - }, nil)). - Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), - ), - Categories: objectCat, -} +var JSONMatchSchema = v1.JSONMatchSchema /** * Cloud Provider Helper Functions */ -var providersAWSCat = category("providers.aws") - -var ProvidersAWSSignReqObj = &Builtin{ - Name: "providers.aws.sign_req", - Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", - Decl: types.NewFunction( - types.Args( - types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), - types.Named("time_ns", types.N), - ), - types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))), - ), - Categories: providersAWSCat, -} + +var ProvidersAWSSignReqObj = v1.ProvidersAWSSignReqObj /** * Rego */ -var RegoParseModule = &Builtin{ - Name: "rego.parse_module", - Description: "Parses the input Rego string and returns an object representation of the AST.", - Decl: types.NewFunction( - types.Args( - types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), - types.Named("rego", types.S).Description("Rego module"), - ), - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), // TODO(tsandall): import AST schema - ), -} +var RegoParseModule = v1.RegoParseModule -var RegoMetadataChain = &Builtin{ - Name: "rego.metadata.chain", - Description: `Returns the chain of metadata for the active rule. -Ordered starting at the active rule, going outward to the most distant node in its package ancestry. -A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. -The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, - Decl: types.NewFunction( - types.Args(), - types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), - ), -} +var RegoMetadataChain = v1.RegoMetadataChain // RegoMetadataRule returns the metadata for the active rule -var RegoMetadataRule = &Builtin{ - Name: "rego.metadata.rule", - Description: "Returns annotations declared for the active rule and using the _rule_ scope.", - Decl: types.NewFunction( - types.Args(), - types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), - ), -} +var RegoMetadataRule = v1.RegoMetadataRule /** * OPA */ // Marked non-deterministic because of unpredictable config/environment-dependent results. -var OPARuntime = &Builtin{ - Name: "opa.runtime", - Description: "Returns an object that describes the runtime environment where OPA is deployed.", - Decl: types.NewFunction( - nil, - types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). - Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), - ), - Nondeterministic: true, -} +var OPARuntime = v1.OPARuntime /** * Trace */ -var tracing = category("tracing") - -var Trace = &Builtin{ - Name: "trace", - Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", - Decl: types.NewFunction( - types.Args( - types.Named("note", types.S).Description("the note to include"), - ), - types.Named("result", types.B).Description("always `true`"), - ), - Categories: tracing, -} + +var Trace = v1.Trace /** * Glob */ -var GlobMatch = &Builtin{ - Name: "glob.match", - Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - types.Named("delimiters", types.NewAny( - types.NewArray(nil, types.S), - types.NewNull(), - )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), - types.Named("match", types.S), - ), - types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), - ), -} +var GlobMatch = v1.GlobMatch -var GlobQuoteMeta = &Builtin{ - Name: "glob.quote_meta", - Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", - Decl: types.NewFunction( - types.Args( - types.Named("pattern", types.S), - ), - types.Named("output", types.S).Description("the escaped string of `pattern`"), - ), - // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. -} +var GlobQuoteMeta = v1.GlobQuoteMeta /** * Networking */ -var NetCIDRIntersects = &Builtin{ - Name: "net.cidr_intersects", - Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr1", types.S), - types.Named("cidr2", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRIntersects = v1.NetCIDRIntersects -var NetCIDRExpand = &Builtin{ - Name: "net.cidr_expand", - Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"), - ), -} +var NetCIDRExpand = v1.NetCIDRExpand -var NetCIDRContains = &Builtin{ - Name: "net.cidr_contains", - Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - types.Named("cidr_or_ip", types.S), - ), - types.Named("result", types.B), - ), -} +var NetCIDRContains = v1.NetCIDRContains -var NetCIDRContainsMatches = &Builtin{ - Name: "net.cidr_contains_matches", - Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + - "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", - Decl: types.NewFunction( - types.Args( - types.Named("cidrs", netCidrContainsMatchesOperandType), - types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType), - ), - types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), - ), -} +var NetCIDRContainsMatches = v1.NetCIDRContainsMatches -var NetCIDRMerge = &Builtin{ - Name: "net.cidr_merge", - Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + - `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. -Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, - Decl: types.NewFunction( - types.Args( - types.Named("addrs", types.NewAny( - types.NewArray(nil, types.NewAny(types.S)), - types.NewSet(types.S), - )).Description("CIDRs or IP addresses"), - ), - types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), - ), -} +var NetCIDRMerge = v1.NetCIDRMerge -var NetCIDRIsValid = &Builtin{ - Name: "net.cidr_is_valid", - Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", - Decl: types.NewFunction( - types.Args( - types.Named("cidr", types.S), - ), - types.Named("result", types.B), - ), -} - -var netCidrContainsMatchesOperandType = types.NewAny( - types.S, - types.NewArray(nil, types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewSet(types.NewAny( - types.S, - types.NewArray(nil, types.A), - )), - types.NewObject(nil, types.NewDynamicProperty( - types.S, - types.NewAny( - types.S, - types.NewArray(nil, types.A), - ), - )), -) +var NetCIDRIsValid = v1.NetCIDRIsValid // Marked non-deterministic because DNS resolution results can be non-deterministic. -var NetLookupIPAddr = &Builtin{ - Name: "net.lookup_ip_addr", - Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", - Decl: types.NewFunction( - types.Args( - types.Named("name", types.S).Description("domain name to resolve"), - ), - types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"), - ), - Nondeterministic: true, -} +var NetLookupIPAddr = v1.NetLookupIPAddr /** * Semantic Versions */ -var SemVerIsValid = &Builtin{ - Name: "semver.is_valid", - Description: "Validates that the input is a valid SemVer string.", - Decl: types.NewFunction( - types.Args( - types.Named("vsn", types.A), - ), - types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), - ), -} +var SemVerIsValid = v1.SemVerIsValid -var SemVerCompare = &Builtin{ - Name: "semver.compare", - Description: "Compares valid SemVer formatted version strings.", - Decl: types.NewFunction( - types.Args( - types.Named("a", types.S), - types.Named("b", types.S), - ), - types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), - ), -} +var SemVerCompare = v1.SemVerCompare /** * Printing @@ -3128,248 +579,56 @@ var SemVerCompare = &Builtin{ // operands may be of any type. Furthermore, unlike other built-in functions, // undefined operands DO NOT cause the print() function to fail during // evaluation. -var Print = &Builtin{ - Name: "print", - Decl: types.NewVariadicFunction(nil, types.A, nil), -} +var Print = v1.Print // InternalPrint represents the internal implementation of the print() function. // The compiler rewrites print() calls to refer to the internal implementation. -var InternalPrint = &Builtin{ - Name: "internal.print", - Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil), -} +var InternalPrint = v1.InternalPrint /** * Deprecated built-ins. */ // SetDiff has been replaced by the minus built-in. -var SetDiff = &Builtin{ - Name: "set_diff", - Decl: types.NewFunction( - types.Args( - types.NewSet(types.A), - types.NewSet(types.A), - ), - types.NewSet(types.A), - ), - deprecated: true, -} +var SetDiff = v1.SetDiff // NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. -var NetCIDROverlap = &Builtin{ - Name: "net.cidr_overlap", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var NetCIDROverlap = v1.NetCIDROverlap // CastArray checks the underlying type of the input. If it is array or set, an array // containing the values is returned. If it is not an array, an error is thrown. -var CastArray = &Builtin{ - Name: "cast_array", - Decl: types.NewFunction( - types.Args(types.A), - types.NewArray(nil, types.A), - ), - deprecated: true, -} +var CastArray = v1.CastArray // CastSet checks the underlying type of the input. // If it is a set, the set is returned. // If it is an array, the array is returned in set form (all duplicates removed) // If neither, an error is thrown -var CastSet = &Builtin{ - Name: "cast_set", - Decl: types.NewFunction( - types.Args(types.A), - types.NewSet(types.A), - ), - deprecated: true, -} +var CastSet = v1.CastSet // CastString returns input if it is a string; if not returns error. // For formatting variables, see sprintf -var CastString = &Builtin{ - Name: "cast_string", - Decl: types.NewFunction( - types.Args(types.A), - types.S, - ), - deprecated: true, -} +var CastString = v1.CastString // CastBoolean returns input if it is a boolean; if not returns error. -var CastBoolean = &Builtin{ - Name: "cast_boolean", - Decl: types.NewFunction( - types.Args(types.A), - types.B, - ), - deprecated: true, -} +var CastBoolean = v1.CastBoolean // CastNull returns null if input is null; if not returns error. -var CastNull = &Builtin{ - Name: "cast_null", - Decl: types.NewFunction( - types.Args(types.A), - types.NewNull(), - ), - deprecated: true, -} +var CastNull = v1.CastNull // CastObject returns the given object if it is null; throws an error otherwise -var CastObject = &Builtin{ - Name: "cast_object", - Decl: types.NewFunction( - types.Args(types.A), - types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), - ), - deprecated: true, -} +var CastObject = v1.CastObject // RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. -var RegexMatchDeprecated = &Builtin{ - Name: "re_match", - Decl: types.NewFunction( - types.Args( - types.S, - types.S, - ), - types.B, - ), - deprecated: true, -} +var RegexMatchDeprecated = v1.RegexMatchDeprecated // All takes a list and returns true if all of the items // are true. A collection of length 0 returns true. -var All = &Builtin{ - Name: "all", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var All = v1.All // Any takes a collection and returns true if any of the items // is true. A collection of length 0 returns false. -var Any = &Builtin{ - Name: "any", - Decl: types.NewFunction( - types.Args( - types.NewAny( - types.NewSet(types.A), - types.NewArray(nil, types.A), - ), - ), - types.B, - ), - deprecated: true, -} +var Any = v1.Any // Builtin represents a built-in function supported by OPA. Every built-in // function is uniquely identified by a name. -type Builtin struct { - Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) - Description string `json:"description,omitempty"` // Description of what the built-in function does. - - // Categories of the built-in function. Omitted for namespaced - // built-ins, i.e. "array.concat" is taken to be of the "array" category. - // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) - Categories []string `json:"categories,omitempty"` - - Decl *types.Function `json:"decl"` // Built-in function type declaration. - Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. - Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. - deprecated bool // Indicates if the built-in has been deprecated. - Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. -} - -// category is a helper for specifying a Builtin's Categories -func category(cs ...string) []string { - return cs -} - -// Minimal returns a shallow copy of b with the descriptions and categories and -// named arguments stripped out. -func (b *Builtin) Minimal() *Builtin { - cpy := *b - fargs := b.Decl.FuncArgs() - if fargs.Variadic != nil { - cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) - } else { - cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) - } - cpy.Categories = nil - cpy.Description = "" - return &cpy -} - -// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. -func (b *Builtin) IsDeprecated() bool { - return b.deprecated -} - -// IsDeterministic returns true if the Builtin function returns non-deterministic results. -func (b *Builtin) IsNondeterministic() bool { - return b.Nondeterministic -} - -// Expr creates a new expression for the built-in with the given operands. -func (b *Builtin) Expr(operands ...*Term) *Expr { - ts := make([]*Term, len(operands)+1) - ts[0] = NewTerm(b.Ref()) - for i := range operands { - ts[i+1] = operands[i] - } - return &Expr{ - Terms: ts, - } -} - -// Call creates a new term for the built-in with the given operands. -func (b *Builtin) Call(operands ...*Term) *Term { - call := make(Call, len(operands)+1) - call[0] = NewTerm(b.Ref()) - for i := range operands { - call[i+1] = operands[i] - } - return NewTerm(call) -} - -// Ref returns a Ref that refers to the built-in function. -func (b *Builtin) Ref() Ref { - parts := strings.Split(b.Name, ".") - ref := make(Ref, len(parts)) - ref[0] = VarTerm(parts[0]) - for i := 1; i < len(parts); i++ { - ref[i] = StringTerm(parts[i]) - } - return ref -} - -// IsTargetPos returns true if a variable in the i-th position will be bound by -// evaluating the call expression. -func (b *Builtin) IsTargetPos(i int) bool { - return len(b.Decl.FuncArgs().Args) == i -} - -func init() { - BuiltinMap = map[string]*Builtin{} - for _, b := range DefaultBuiltins { - RegisterBuiltin(b) - } -} +type Builtin = v1.Builtin diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/capabilities.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/capabilities.go index 3b95d79e5..bc7278a88 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/capabilities.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/capabilities.go @@ -5,228 +5,54 @@ package ast import ( - "bytes" - _ "embed" - "encoding/json" - "fmt" "io" - "os" - "sort" - "strings" - caps "github.com/open-policy-agent/opa/capabilities" - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VersonIndex contains an index from built-in function name, language feature, // and future rego keyword to version number. During the build, this is used to // create an index of the minimum version required for the built-in/feature/kw. -type VersionIndex struct { - Builtins map[string]semver.Version `json:"builtins"` - Features map[string]semver.Version `json:"features"` - Keywords map[string]semver.Version `json:"keywords"` -} - -// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go -// and run as part of go:generate. We generate the version index as part of the -// build process because it's relatively expensive to build (it takes ~500ms on -// my machine) and never changes. -// -//go:embed version_index.json -var versionIndexBs []byte - -var minVersionIndex = func() VersionIndex { - var vi VersionIndex - err := json.Unmarshal(versionIndexBs, &vi) - if err != nil { - panic(err) - } - return vi -}() +type VersionIndex = v1.VersionIndex // In the compiler, we used this to check that we're OK working with ref heads. // If this isn't present, we'll fail. This is to ensure that older versions of // OPA can work with policies that we're compiling -- if they don't know ref // heads, they wouldn't be able to parse them. -const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" -const FeatureRefHeads = "rule_head_refs" -const FeatureRegoV1Import = "rego_v1_import" +const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes +const FeatureRefHeads = v1.FeatureRefHeads +const FeatureRegoV1 = v1.FeatureRegoV1 +const FeatureRegoV1Import = v1.FeatureRegoV1Import // Capabilities defines a structure containing data that describes the capabilities // or features supported by a particular version of OPA. -type Capabilities struct { - Builtins []*Builtin `json:"builtins,omitempty"` - FutureKeywords []string `json:"future_keywords,omitempty"` - WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` - - // Features is a bit of a mixed bag for checking that an older version of OPA - // is able to do what needs to be done. - // TODO(sr): find better words ^^ - Features []string `json:"features,omitempty"` - - // allow_net is an array of hostnames or IP addresses, that an OPA instance is - // allowed to connect to. - // If omitted, ANY host can be connected to. If empty, NO host can be connected to. - // As of now, this only controls fetching remote refs for using JSON Schemas in - // the type checker. - // TODO(sr): support ports to further restrict connection peers - // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) - AllowNet []string `json:"allow_net,omitempty"` -} +type Capabilities = v1.Capabilities // WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating // backwards-compatible changes. -type WasmABIVersion struct { - Version int `json:"version"` - Minor int `json:"minor_version"` -} +type WasmABIVersion = v1.WasmABIVersion // CapabilitiesForThisVersion returns the capabilities of this version of OPA. func CapabilitiesForThisVersion() *Capabilities { - f := &Capabilities{} - - for _, vers := range capabilities.ABIVersions() { - f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) - } - - f.Builtins = make([]*Builtin, len(Builtins)) - copy(f.Builtins, Builtins) - sort.Slice(f.Builtins, func(i, j int) bool { - return f.Builtins[i].Name < f.Builtins[j].Name - }) - - for kw := range futureKeywords { - f.FutureKeywords = append(f.FutureKeywords, kw) - } - sort.Strings(f.FutureKeywords) - - f.Features = []string{ - FeatureRefHeadStringPrefixes, - FeatureRefHeads, - FeatureRegoV1Import, - } - - return f + return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion)) } // LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { - d := util.NewJSONDecoder(r) - var c Capabilities - return &c, d.Decode(&c) + return v1.LoadCapabilitiesJSON(r) } // LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. func LoadCapabilitiesVersion(version string) (*Capabilities, error) { - cvs, err := LoadCapabilitiesVersions() - if err != nil { - return nil, err - } - - for _, cv := range cvs { - if cv == version { - cont, err := caps.FS.ReadFile(cv + ".json") - if err != nil { - return nil, err - } - - return LoadCapabilitiesJSON(bytes.NewReader(cont)) - } - - } - return nil, fmt.Errorf("no capabilities version found %v", version) + return v1.LoadCapabilitiesVersion(version) } // LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. func LoadCapabilitiesFile(file string) (*Capabilities, error) { - fd, err := os.Open(file) - if err != nil { - return nil, err - } - defer fd.Close() - return LoadCapabilitiesJSON(fd) + return v1.LoadCapabilitiesFile(file) } // LoadCapabilitiesVersions loads all capabilities versions func LoadCapabilitiesVersions() ([]string, error) { - ents, err := caps.FS.ReadDir(".") - if err != nil { - return nil, err - } - - capabilitiesVersions := make([]string, 0, len(ents)) - for _, ent := range ents { - capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) - } - return capabilitiesVersions, nil -} - -// MinimumCompatibleVersion returns the minimum compatible OPA version based on -// the built-ins, features, and keywords in c. -func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { - - var maxVersion semver.Version - - // this is the oldest OPA release that includes capabilities - if err := maxVersion.Set("0.17.0"); err != nil { - panic("unreachable") - } - - for _, bi := range c.Builtins { - v, ok := minVersionIndex.Builtins[bi.Name] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, kw := range c.FutureKeywords { - v, ok := minVersionIndex.Keywords[kw] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - for _, feat := range c.Features { - v, ok := minVersionIndex.Features[feat] - if !ok { - return "", false - } - if v.Compare(maxVersion) > 0 { - maxVersion = v - } - } - - return maxVersion.String(), true -} - -func (c *Capabilities) ContainsFeature(feature string) bool { - for _, f := range c.Features { - if f == feature { - return true - } - } - return false -} - -// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name -// will be overwritten. -func (c *Capabilities) addBuiltinSorted(bi *Builtin) { - i := sort.Search(len(c.Builtins), func(x int) bool { - return c.Builtins[x].Name >= bi.Name - }) - if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { - c.Builtins[i] = bi - return - } - c.Builtins = append(c.Builtins, nil) - copy(c.Builtins[i+1:], c.Builtins[i:]) - c.Builtins[i] = bi + return v1.LoadCapabilitiesVersions() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/check.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/check.go index 23d1ed8fa..4cf00436d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/check.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/check.go @@ -5,1291 +5,18 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -type varRewriter func(Ref) Ref - -// exprChecker defines the interface for executing type checking on a single -// expression. The exprChecker must update the provided TypeEnv with inferred -// types of vars. -type exprChecker func(*TypeEnv, *Expr) *Error - -// typeChecker implements type checking on queries and rules. Errors are -// accumulated on the typeChecker so that a single run can report multiple -// issues. -type typeChecker struct { - builtins map[string]*Builtin - required *Capabilities - errs Errors - exprCheckers map[string]exprChecker - varRewriter varRewriter - ss *SchemaSet - allowNet []string - input types.Type - allowUndefinedFuncs bool -} - -// newTypeChecker returns a new typeChecker object that has no errors. -func newTypeChecker() *typeChecker { - tc := &typeChecker{} - tc.exprCheckers = map[string]exprChecker{ - "eq": tc.checkExprEq, - } - return tc -} - -func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { - if exist != nil { - return exist.wrap() - } - env := newTypeEnv(tc.copy) - if tc.input != nil { - env.tree.Put(InputRootRef, tc.input) - } - return env -} - -func (tc *typeChecker) copy() *typeChecker { - return newTypeChecker(). - WithVarRewriter(tc.varRewriter). - WithSchemaSet(tc.ss). - WithAllowNet(tc.allowNet). - WithInputType(tc.input). - WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). - WithBuiltins(tc.builtins). - WithRequiredCapabilities(tc.required) -} - -func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { - tc.required = c - return tc -} - -func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { - tc.builtins = builtins - return tc -} - -func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { - tc.ss = ss - return tc -} - -func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { - tc.allowNet = hosts - return tc -} - -func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { - tc.varRewriter = f - return tc -} - -func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { - tc.input = tpe - return tc -} - -// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. -// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. -func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { - tc.allowUndefinedFuncs = allow - return tc -} - -// Env returns a type environment for the specified built-ins with any other -// global types configured on the checker. In practice, this is the default -// environment that other statements will be checked against. -func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { - env := tc.newEnv(nil) - for _, bi := range builtins { - env.tree.Put(bi.Ref(), bi.Decl) - } - return env -} - -// CheckBody runs type checking on the body and returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of vars contained in the body. -func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { - - errors := []*Error{} - env = tc.newEnv(env) - - WalkExprs(body, func(expr *Expr) bool { - - closureErrs := tc.checkClosures(env, expr) - for _, err := range closureErrs { - errors = append(errors, err) - } - - hasClosureErrors := len(closureErrs) > 0 - - vis := newRefChecker(env, tc.varRewriter) - NewGenericVisitor(vis.Visit).Walk(expr) - for _, err := range vis.errs { - errors = append(errors, err) - } - - hasRefErrors := len(vis.errs) > 0 - - if err := tc.checkExpr(env, expr); err != nil { - // Suppress this error if a more actionable one has occurred. In - // this case, if an error occurred in a ref or closure contained in - // this expression, and the error is due to a nil type, then it's - // likely to be the result of the more specific error. - skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) - if !skip { - errors = append(errors, err) - } - } - return true - }) - - tc.err(errors) - return env, errors -} - -// CheckTypes runs type checking on the rules returns a TypeEnv if no errors -// are found. The resulting TypeEnv wraps the provided one. The resulting -// TypeEnv will be able to resolve types of refs that refer to rules. -func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { - env = tc.newEnv(env) - for _, s := range sorted { - tc.checkRule(env, as, s.(*Rule)) - } - tc.errs.Sort() - return env, tc.errs -} - -func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { - var result Errors - WalkClosures(expr, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *SetComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - case *ObjectComprehension: - _, errs := tc.copy().CheckBody(env, x.Body) - if len(errs) > 0 { - result = errs - return true - } - } - return false - }) - return result -} - -func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { - - env = env.wrap() - - schemaAnnots := getRuleAnnotation(as, rule) - for _, schemaAnnot := range schemaAnnots { - ref, refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) - if err != nil { - tc.err([]*Error{err}) - continue - } - if ref == nil && refType == nil { - continue - } - prefixRef, t := getPrefix(env, ref) - if t == nil || len(prefixRef) == len(ref) { - env.tree.Put(ref, refType) - } else { - newType, err := override(ref[len(prefixRef):], t, refType, rule) - if err != nil { - tc.err([]*Error{err}) - continue - } - env.tree.Put(prefixRef, newType) - } - } - - cpy, err := tc.CheckBody(env, rule.Body) - env = env.next - path := rule.Ref() - - if len(err) > 0 { - // if the rule/function contains an error, add it to the type env so - // that expressions that refer to this rule/function do not encounter - // type errors. - env.tree.Put(path, types.A) - return - } - - var tpe types.Type - - if len(rule.Head.Args) > 0 { - // If args are not referred to in body, infer as any. - WalkVars(rule.Head.Args, func(v Var) bool { - if cpy.Get(v) == nil { - cpy.tree.PutOne(v, types.A) - } - return false - }) - - // Construct function type. - args := make([]types.Type, len(rule.Head.Args)) - for i := 0; i < len(rule.Head.Args); i++ { - args[i] = cpy.Get(rule.Head.Args[i]) - } - - f := types.NewFunction(args, cpy.Get(rule.Head.Value)) - - tpe = f - } else { - switch rule.Head.RuleKind() { - case SingleValue: - typeV := cpy.Get(rule.Head.Value) - if !path.IsGround() { - // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] - objPath := path.DynamicSuffix() - path = path.GroundPrefix() - - var err error - tpe, err = nestedObject(cpy, objPath, typeV) - if err != nil { - tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) - tpe = nil - } - } else { - if typeV != nil { - tpe = typeV - } - } - case MultiValue: - typeK := cpy.Get(rule.Head.Key) - if typeK != nil { - tpe = types.NewSet(typeK) - } - } - } - - if tpe != nil { - env.tree.Insert(path, tpe, env) - } -} - -// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the -// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. -func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { - if len(path) == 0 { - return tpe, nil - } - - k := path[0] - typeV, err := nestedObject(env, path[1:], tpe) - if err != nil { - return nil, err - } - if typeV == nil { - return nil, nil - } - - var dynamicProperty *types.DynamicProperty - typeK := env.Get(k) - if typeK == nil { - return nil, nil - } - dynamicProperty = types.NewDynamicProperty(typeK, typeV) - - return types.NewObject(nil, dynamicProperty), nil -} - -func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { - if err := tc.checkExprWith(env, expr, 0); err != nil { - return err - } - if !expr.IsCall() { - return nil - } - - operator := expr.Operator().String() - - // If the type checker wasn't provided with a required capabilities - // structure then just skip. In some cases, type checking might be run - // without the need to record what builtins are required. - if tc.required != nil { - if bi, ok := tc.builtins[operator]; ok { - tc.required.addBuiltinSorted(bi) - } - } - - checker := tc.exprCheckers[operator] - if checker != nil { - return checker(env, expr) - } - - return tc.checkExprBuiltin(env, expr) -} - -func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { - - args := expr.Operands() - pre := getArgTypes(env, args) - - // NOTE(tsandall): undefined functions will have been caught earlier in the - // compiler. We check for undefined functions before the safety check so - // that references to non-existent functions result in undefined function - // errors as opposed to unsafe var errors. - // - // We cannot run type checking before the safety check because part of the - // type checker relies on reordering (in particular for references to local - // vars). - name := expr.Operator() - tpe := env.Get(name) - - if tpe == nil { - if tc.allowUndefinedFuncs { - return nil - } - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - // check if the expression refers to a function that contains an error - _, ok := tpe.(types.Any) - if ok { - return nil - } - - ftpe, ok := tpe.(*types.Function) - if !ok { - return NewError(TypeErr, expr.Location, "undefined function %v", name) - } - - fargs := ftpe.FuncArgs() - namedFargs := ftpe.NamedFuncArgs() - - if ftpe.Result() != nil { - fargs.Args = append(fargs.Args, ftpe.Result()) - namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) - } - - if len(args) > len(fargs.Args) && fargs.Variadic == nil { - return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) - } - - if len(args) < len(ftpe.FuncArgs().Args) { - return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) - } - - for i := range args { - if !unify1(env, args[i], fargs.Arg(i), false) { - post := make([]types.Type, len(args)) - for i := range args { - post[i] = env.Get(args[i]) - } - return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) - } - } - - return nil -} - -func (tc *typeChecker) checkExprEq(env *TypeEnv, expr *Expr) *Error { - - pre := getArgTypes(env, expr.Operands()) - exp := Equality.Decl.FuncArgs() - - if len(pre) < len(exp.Args) { - return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, exp) - } - - if len(exp.Args) < len(pre) { - return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, exp) - } - - a, b := expr.Operand(0), expr.Operand(1) - typeA, typeB := env.Get(a), env.Get(b) - - if !unify2(env, a, typeA, b, typeB) { - err := NewError(TypeErr, expr.Location, "match error") - err.Details = &UnificationErrDetail{ - Left: typeA, - Right: typeB, - } - return err - } - - return nil -} - -func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { - if i == len(expr.With) { - return nil - } - - target, value := expr.With[i].Target, expr.With[i].Value - targetType, valueType := env.Get(target), env.Get(value) - - if t, ok := targetType.(*types.Function); ok { // built-in function replacement - switch v := valueType.(type) { - case *types.Function: // ...by function - if !unifies(targetType, valueType) { - return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) - } - default: // ... by value, nothing to check - } - } - - return tc.checkExprWith(env, expr, i+1) -} - -func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { - - nilA := types.Nil(typeA) - nilB := types.Nil(typeB) - - if nilA && !nilB { - return unify1(env, a, typeB, false) - } else if nilB && !nilA { - return unify1(env, b, typeA, false) - } else if !nilA && !nilB { - return unifies(typeA, typeB) - } - - switch a.Value.(type) { - case *Array: - return unify2Array(env, a, b) - case *object: - return unify2Object(env, a, b) - case Var: - switch b.Value.(type) { - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - case *Array: - return unify2Array(env, b, a) - case *object: - return unify2Object(env, b, a) - } - } - - return false -} - -func unify2Array(env *TypeEnv, a *Term, b *Term) bool { - arr := a.Value.(*Array) - switch bv := b.Value.(type) { - case *Array: - if arr.Len() == bv.Len() { - for i := 0; i < arr.Len(); i++ { - if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify2Object(env *TypeEnv, a *Term, b *Term) bool { - obj := a.Value.(Object) - switch bv := b.Value.(type) { - case *object: - cv := obj.Intersect(bv) - if obj.Len() == bv.Len() && bv.Len() == len(cv) { - for i := range cv { - if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) { - return false - } - } - return true - } - case Var: - return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) - } - return false -} - -func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { - switch v := term.Value.(type) { - case *Array: - switch tpe := tpe.(type) { - case *types.Array: - return unify1Array(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - for i := 0; i < v.Len(); i++ { - unify1(env, v.Elem(i), types.A, true) - } - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case *object: - switch tpe := tpe.(type) { - case *types.Object: - return unify1Object(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(key, value *Term) { - unify1(env, key, types.A, true) - unify1(env, value, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Set: - switch tpe := tpe.(type) { - case *types.Set: - return unify1Set(env, v, tpe, union) - case types.Any: - if types.Compare(tpe, types.A) == 0 { - v.Foreach(func(elem *Term) { - unify1(env, elem, types.A, true) - }) - return true - } - unifies := false - for i := range tpe { - unifies = unify1(env, term, tpe[i], true) || unifies - } - return unifies - } - return false - case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return unifies(env.Get(v), tpe) - case Var: - if !union { - if exist := env.Get(v); exist != nil { - return unifies(exist, tpe) - } - env.tree.PutOne(term.Value, tpe) - } else { - env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe)) - } - return true - default: - if !IsConstant(v) { - panic("unreachable") - } - return unifies(env.Get(term), tpe) - } -} - -func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { - if val.Len() != tpe.Len() && tpe.Dynamic() == nil { - return false - } - for i := 0; i < val.Len(); i++ { - if !unify1(env, val.Elem(i), tpe.Select(i), union) { - return false - } - } - return true -} - -func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { - if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { - return false - } - stop := val.Until(func(k, v *Term) bool { - if IsConstant(k.Value) { - if child := selectConstant(tpe, k); child != nil { - if !unify1(env, v, child, union) { - return true - } - } else { - return true - } - } else { - // Inferring type of value under dynamic key would involve unioning - // with all property values of tpe whose keys unify. For now, type - // these values as Any. We can investigate stricter inference in - // the future. - unify1(env, v, types.A, union) - } - return false - }) - return !stop -} - -func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { - of := types.Values(tpe) - return !val.Until(func(elem *Term) bool { - return !unify1(env, elem, of, union) - }) -} - -func (tc *typeChecker) err(errors []*Error) { - tc.errs = append(tc.errs, errors...) -} - -type refChecker struct { - env *TypeEnv - errs Errors - varRewriter varRewriter -} - -func rewriteVarsNop(node Ref) Ref { - return node -} - -func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { - - if f == nil { - f = rewriteVarsNop - } - - return &refChecker{ - env: env, - errs: nil, - varRewriter: f, - } -} - -func (rc *refChecker) Visit(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - switch terms := x.Terms.(type) { - case []*Term: - for i := 1; i < len(terms); i++ { - NewGenericVisitor(rc.Visit).Walk(terms[i]) - } - return true - case *Term: - NewGenericVisitor(rc.Visit).Walk(terms) - return true - } - case Ref: - if err := rc.checkApply(rc.env, x); err != nil { - rc.errs = append(rc.errs, err) - return true - } - if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { - rc.errs = append(rc.errs, err) - } - } - return false -} - -func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { - switch tpe := curr.Get(ref).(type) { - case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`. - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) - } - - return nil -} - -func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - // NOTE(sr): as long as package statements are required, this isn't possible: - // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to - // be strings or vars. - if idx == 1 || idx == 2 { - switch head.Value.(type) { - case Var, String: // OK - default: - have := rc.env.Get(head.Value) - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) - } - } - - if v, ok := head.Value.(Var); ok && idx != 0 { - tpe := types.Keys(rc.env.getRefRecExtent(node)) - if exist := rc.env.Get(v); exist != nil { - if !unifies(tpe, exist) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) - } - } else { - rc.env.tree.PutOne(v, tpe) - } - } - - child := node.Child(head.Value) - if child == nil { - // NOTE(sr): idx is reset on purpose: we start over - switch { - case curr.next != nil: - next := curr.next - return rc.checkRef(next, next.tree, ref, 0) - - case RootDocumentNames.Contains(ref[0]): - if idx != 0 { - node.Children().Iter(func(_, child util.T) bool { - _ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error - return false - }) - return nil - } - return rc.checkRefLeaf(types.A, ref, 1) - - default: - return rc.checkRefLeaf(types.A, ref, 0) - } - } - - if child.Leaf() { - return rc.checkRefLeaf(child.Value(), ref, idx+1) - } - - return rc.checkRef(curr, child, ref, idx+1) -} - -func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { - - if idx == len(ref) { - return nil - } - - head := ref[idx] - - keys := types.Keys(tpe) - if keys == nil { - return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) - } - - switch value := head.Value.(type) { - - case Var: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } else { - rc.env.tree.PutOne(value, types.Keys(tpe)) - } - - case Ref: - if exist := rc.env.Get(value); exist != nil { - if !unifies(exist, keys) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) - } - } - - case *Array, Object, Set: - if !unify1(rc.env, head, keys, false) { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) - } - - default: - child := selectConstant(tpe, head) - if child == nil { - return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) - } - return rc.checkRefLeaf(child, ref, idx+1) - } - - return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) -} - -func unifies(a, b types.Type) bool { - - if a == nil || b == nil { - return false - } - - anyA, ok1 := a.(types.Any) - if ok1 { - if unifiesAny(anyA, b) { - return true - } - } - - anyB, ok2 := b.(types.Any) - if ok2 { - if unifiesAny(anyB, a) { - return true - } - } - - if ok1 || ok2 { - return false - } - - switch a := a.(type) { - case types.Null: - _, ok := b.(types.Null) - return ok - case types.Boolean: - _, ok := b.(types.Boolean) - return ok - case types.Number: - _, ok := b.(types.Number) - return ok - case types.String: - _, ok := b.(types.String) - return ok - case *types.Array: - b, ok := b.(*types.Array) - if !ok { - return false - } - return unifiesArrays(a, b) - case *types.Object: - b, ok := b.(*types.Object) - if !ok { - return false - } - return unifiesObjects(a, b) - case *types.Set: - b, ok := b.(*types.Set) - if !ok { - return false - } - return unifies(types.Values(a), types.Values(b)) - case *types.Function: - // NOTE(sr): variadic functions can only be internal ones, and we've forbidden - // their replacement via `with`; so we disregard variadic here - if types.Arity(a) == types.Arity(b) { - b := b.(*types.Function) - for i := range a.FuncArgs().Args { - if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { - return false - } - } - return true - } - return false - default: - panic("unreachable") - } -} - -func unifiesAny(a types.Any, b types.Type) bool { - if _, ok := b.(*types.Function); ok { - return false - } - for i := range a { - if unifies(a[i], b) { - return true - } - } - return len(a) == 0 -} - -func unifiesArrays(a, b *types.Array) bool { - - if !unifiesArraysStatic(a, b) { - return false - } - - if !unifiesArraysStatic(b, a) { - return false - } - - return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) -} - -func unifiesArraysStatic(a, b *types.Array) bool { - if a.Len() != 0 { - for i := 0; i < a.Len(); i++ { - if !unifies(a.Select(i), b.Select(i)) { - return false - } - } - } - return true -} - -func unifiesObjects(a, b *types.Object) bool { - if !unifiesObjectsStatic(a, b) { - return false - } - - if !unifiesObjectsStatic(b, a) { - return false - } - - return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) -} - -func unifiesObjectsStatic(a, b *types.Object) bool { - for _, k := range a.Keys() { - if !unifies(a.Select(k), b.Select(k)) { - return false - } - } - return true -} - -// typeErrorCause defines an interface to determine the reason for a type -// error. The type error details implement this interface so that type checking -// can report more actionable errors. -type typeErrorCause interface { - nilType() bool -} - -func causedByNilType(err *Error) bool { - cause, ok := err.Details.(typeErrorCause) - if !ok { - return false - } - return cause.nilType() -} - -// ArgErrDetail represents a generic argument error. -type ArgErrDetail struct { - Have []types.Type `json:"have"` - Want types.FuncArgs `json:"want"` -} - -// Lines returns the string representation of the detail. -func (d *ArgErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = "have: " + formatArgs(d.Have) - lines[1] = "want: " + fmt.Sprint(d.Want) - return lines -} - -func (d *ArgErrDetail) nilType() bool { - for i := range d.Have { - if types.Nil(d.Have[i]) { - return true - } - } - return false -} - // UnificationErrDetail describes a type mismatch error when two values are // unified (e.g., x = [1,2,y]). -type UnificationErrDetail struct { - Left types.Type `json:"a"` - Right types.Type `json:"b"` -} - -func (a *UnificationErrDetail) nilType() bool { - return types.Nil(a.Left) || types.Nil(a.Right) -} - -// Lines returns the string representation of the detail. -func (a *UnificationErrDetail) Lines() []string { - lines := make([]string, 2) - lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) - lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) - return lines -} +type UnificationErrDetail = v1.UnificationErrDetail // RefErrUnsupportedDetail describes an undefined reference error where the // referenced value does not support dereferencing (e.g., scalars). -type RefErrUnsupportedDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have"` // referenced type -} - -// Lines returns the string representation of the detail. -func (r *RefErrUnsupportedDetail) Lines() []string { - lines := []string{ - r.Ref.String(), - strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), - fmt.Sprintf("have: %v", r.Have), - } - return lines -} +type RefErrUnsupportedDetail = v1.RefErrUnsupportedDetail // RefErrInvalidDetail describes an undefined reference error where the referenced // value does not support the reference operand (e.g., missing object key, // invalid key type, etc.) -type RefErrInvalidDetail struct { - Ref Ref `json:"ref"` // invalid ref - Pos int `json:"pos"` // invalid element - Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) - Want types.Type `json:"want"` // allowed type (for non-object values) - OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) -} - -// Lines returns the string representation of the detail. -func (r *RefErrInvalidDetail) Lines() []string { - lines := []string{r.Ref.String()} - offset := len(r.Ref[:r.Pos].String()) + 1 - pad := strings.Repeat(" ", offset) - lines = append(lines, fmt.Sprintf("%s^", pad)) - if r.Have != nil { - lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) - } else { - lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) - } - if len(r.OneOf) > 0 { - lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) - } else { - lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) - } - return lines -} - -func formatArgs(args []types.Type) string { - buf := make([]string, len(args)) - for i := range args { - buf[i] = types.Sprint(args[i]) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrInvalidDetail{ - Ref: ref, - Pos: idx, - Have: have, - Want: want, - OneOf: oneOf, - } - return err -} - -func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { - err := newRefError(loc, ref) - err.Details = &RefErrUnsupportedDetail{ - Ref: ref, - Pos: idx, - Have: have, - } - return err -} - -func newRefError(loc *Location, ref Ref) *Error { - return NewError(TypeErr, loc, "undefined ref: %v", ref) -} - -func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { - err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) - err.Details = &ArgErrDetail{ - Have: have, - Want: want, - } - return err -} - -func getOneOfForNode(node *typeTreeNode) (result []Value) { - node.Children().Iter(func(k, _ util.T) bool { - result = append(result, k.(Value)) - return false - }) - - sortValueSlice(result) - return result -} - -func getOneOfForType(tpe types.Type) (result []Value) { - switch tpe := tpe.(type) { - case *types.Object: - for _, k := range tpe.Keys() { - v, err := InterfaceToValue(k) - if err != nil { - panic(err) - } - result = append(result, v) - } - - case types.Any: - for _, object := range tpe { - objRes := getOneOfForType(object) - result = append(result, objRes...) - } - } - - result = removeDuplicate(result) - sortValueSlice(result) - return result -} - -func sortValueSlice(sl []Value) { - sort.Slice(sl, func(i, j int) bool { - return sl[i].Compare(sl[j]) < 0 - }) -} - -func removeDuplicate(list []Value) []Value { - seen := make(map[Value]bool) - var newResult []Value - for _, item := range list { - if !seen[item] { - newResult = append(newResult, item) - seen[item] = true - } - } - return newResult -} - -func getArgTypes(env *TypeEnv, args []*Term) []types.Type { - pre := make([]types.Type, len(args)) - for i := range args { - pre[i] = env.Get(args[i]) - } - return pre -} - -// getPrefix returns the shortest prefix of ref that exists in env -func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { - if len(ref) == 1 { - t := env.Get(ref) - if t != nil { - return ref, t - } - } - for i := 1; i < len(ref); i++ { - t := env.Get(ref[:i]) - if t != nil { - return ref[:i], t - } - } - return nil, nil -} - -// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) -func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { - var newStaticProps []*types.StaticProperty - obj, ok := t.(*types.Object) - if !ok { - newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) - if err != nil { - return nil, err - } - return newType, nil - } - found := false - if ok { - staticProps := obj.StaticProperties() - for _, prop := range staticProps { - valueCopy := prop.Value - key, err := InterfaceToValue(prop.Key) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) - } - if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { - found = true - if len(ref) == 1 { - valueCopy = o - } else { - newVal, err := override(ref[1:], valueCopy, o, rule) - if err != nil { - return nil, err - } - valueCopy = newVal - } - } - newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) - } - } - - // ref[0] is not a top-level key in staticProps, so it must be added - if !found { - newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) - if err != nil { - return nil, err - } - newStaticProps = append(newStaticProps, newType.StaticProperties()...) - } - return types.NewObject(newStaticProps, obj.DynamicProperties()), nil -} - -func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) { - keys := []interface{}{} - for _, refElem := range ref { - key, err := JSON(refElem.Value) - if err != nil { - return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) - } - keys = append(keys, key) - } - return keys, nil -} - -func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object { - if len(keys) == 1 { - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} - return types.NewObject(staticProps, d) - } - - staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} - return types.NewObject(staticProps, d) -} - -func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { - keys, err := getKeys(ref, rule) - if err != nil { - return nil, err - } - return getObjectTypeRec(keys, o, d), nil -} - -func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { - - for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { - result = append(result, x.Schemas...) - } - - if x := as.GetPackageScope(rule.Module.Package); x != nil { - result = append(result, x.Schemas...) - } - - if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { - result = append(result, x.Schemas...) - } - - for _, x := range as.GetRuleScope(rule) { - result = append(result, x.Schemas...) - } - - return result -} - -func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (Ref, types.Type, *Error) { - - var schema interface{} - - if annot.Schema != nil { - if ss == nil { - return nil, nil, nil - } - schema = ss.Get(annot.Schema) - if schema == nil { - return nil, nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) - } - } else if annot.Definition != nil { - schema = *annot.Definition - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return nil, nil, NewError(TypeErr, rule.Location, err.Error()) - } - - return annot.Path, tpe, nil -} - -func errAnnotationRedeclared(a *Annotations, other *Location) *Error { - return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) -} +type RefErrInvalidDetail = v1.RefErrInvalidDetail diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/compare.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/compare.go index 3bb6f2a75..d36078e33 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/compare.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/compare.go @@ -5,9 +5,7 @@ package ast import ( - "encoding/json" - "fmt" - "math/big" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Compare returns an integer indicating whether two AST values are less than, @@ -37,360 +35,5 @@ import ( // is empty. // Other comparisons are consistent but not defined. func Compare(a, b interface{}) int { - - if t, ok := a.(*Term); ok { - if t == nil { - a = nil - } else { - a = t.Value - } - } - - if t, ok := b.(*Term); ok { - if t == nil { - b = nil - } else { - b = t.Value - } - } - - if a == nil { - if b == nil { - return 0 - } - return -1 - } - if b == nil { - return 1 - } - - sortA := sortOrder(a) - sortB := sortOrder(b) - - if sortA < sortB { - return -1 - } else if sortB < sortA { - return 1 - } - - switch a := a.(type) { - case Null: - return 0 - case Boolean: - b := b.(Boolean) - if a.Equal(b) { - return 0 - } - if !a { - return -1 - } - return 1 - case Number: - if ai, err := json.Number(a).Int64(); err == nil { - if bi, err := json.Number(b.(Number)).Int64(); err == nil { - if ai == bi { - return 0 - } - if ai < bi { - return -1 - } - return 1 - } - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var bigA, bigB *big.Rat - fa, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - bigA = new(big.Rat).SetInt64(0) - } - } - if bigA == nil { - bigA, ok = new(big.Rat).SetString(string(a)) - if !ok { - panic("illegal value") - } - } - - fb, ok := new(big.Float).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - bigB = new(big.Rat).SetInt64(0) - } - } - if bigB == nil { - bigB, ok = new(big.Rat).SetString(string(b.(Number))) - if !ok { - panic("illegal value") - } - } - - return bigA.Cmp(bigB) - case String: - b := b.(String) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Var: - b := b.(Var) - if a.Equal(b) { - return 0 - } - if a < b { - return -1 - } - return 1 - case Ref: - b := b.(Ref) - return termSliceCompare(a, b) - case *Array: - b := b.(*Array) - return termSliceCompare(a.elems, b.elems) - case *lazyObj: - return Compare(a.force(), b) - case *object: - if x, ok := b.(*lazyObj); ok { - b = x.force() - } - b := b.(*object) - return a.Compare(b) - case Set: - b := b.(Set) - return a.Compare(b) - case *ArrayComprehension: - b := b.(*ArrayComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *ObjectComprehension: - b := b.(*ObjectComprehension) - if cmp := Compare(a.Key, b.Key); cmp != 0 { - return cmp - } - if cmp := Compare(a.Value, b.Value); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case *SetComprehension: - b := b.(*SetComprehension) - if cmp := Compare(a.Term, b.Term); cmp != 0 { - return cmp - } - return Compare(a.Body, b.Body) - case Call: - b := b.(Call) - return termSliceCompare(a, b) - case *Expr: - b := b.(*Expr) - return a.Compare(b) - case *SomeDecl: - b := b.(*SomeDecl) - return a.Compare(b) - case *Every: - b := b.(*Every) - return a.Compare(b) - case *With: - b := b.(*With) - return a.Compare(b) - case Body: - b := b.(Body) - return a.Compare(b) - case *Head: - b := b.(*Head) - return a.Compare(b) - case *Rule: - b := b.(*Rule) - return a.Compare(b) - case Args: - b := b.(Args) - return termSliceCompare(a, b) - case *Import: - b := b.(*Import) - return a.Compare(b) - case *Package: - b := b.(*Package) - return a.Compare(b) - case *Annotations: - b := b.(*Annotations) - return a.Compare(b) - case *Module: - b := b.(*Module) - return a.Compare(b) - } - panic(fmt.Sprintf("illegal value: %T", a)) -} - -type termSlice []*Term - -func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } -func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s termSlice) Len() int { return len(s) } - -func sortOrder(x interface{}) int { - switch x.(type) { - case Null: - return 0 - case Boolean: - return 1 - case Number: - return 2 - case String: - return 3 - case Var: - return 4 - case Ref: - return 5 - case *Array: - return 6 - case Object: - return 7 - case Set: - return 8 - case *ArrayComprehension: - return 9 - case *ObjectComprehension: - return 10 - case *SetComprehension: - return 11 - case Call: - return 12 - case Args: - return 13 - case *Expr: - return 100 - case *SomeDecl: - return 101 - case *Every: - return 102 - case *With: - return 110 - case *Head: - return 120 - case Body: - return 200 - case *Rule: - return 1000 - case *Import: - return 1001 - case *Package: - return 1002 - case *Annotations: - return 1003 - case *Module: - return 10000 - } - panic(fmt.Sprintf("illegal value: %T", x)) -} - -func importsCompare(a, b []*Import) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func annotationsCompare(a, b []*Annotations) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func rulesCompare(a, b []*Rule) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := a[i].Compare(b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } - if len(b) < len(a) { - return 1 - } - return 0 -} - -func termSliceCompare(a, b []*Term) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 -} - -func withSliceCompare(a, b []*With) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 + return v1.Compare(a, b) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/compile.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/compile.go index c59cfede6..5a3daa910 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/compile.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/compile.go @@ -5,5865 +5,123 @@ package ast import ( - "errors" - "fmt" - "io" - "sort" - "strconv" - "strings" - - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/internal/gojsonschema" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CompileErrorLimitDefault is the default number errors a compiler will allow before // exiting. const CompileErrorLimitDefault = 10 -var errLimitReached = NewError(CompileErr, nil, "error limit reached") - // Compiler contains the state of a compilation process. -type Compiler struct { - - // Errors contains errors that occurred during the compilation process. - // If there are one or more errors, the compilation process is considered - // "failed". - Errors Errors - - // Modules contains the compiled modules. The compiled modules are the - // output of the compilation process. If the compilation process failed, - // there is no guarantee about the state of the modules. - Modules map[string]*Module - - // ModuleTree organizes the modules into a tree where each node is keyed by - // an element in the module's package path. E.g., given modules containing - // the following package directives: "a", "a.b", "a.c", and "a.b", the - // resulting module tree would be: - // - // root - // | - // +--- data (no modules) - // | - // +--- a (1 module) - // | - // +--- b (2 modules) - // | - // +--- c (1 module) - // - ModuleTree *ModuleTreeNode - - // RuleTree organizes rules into a tree where each node is keyed by an - // element in the rule's path. The rule path is the concatenation of the - // containing package and the stringified rule name. E.g., given the - // following module: - // - // package ex - // p[1] { true } - // p[2] { true } - // q = true - // a.b.c = 3 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- p (2 rules) - // | - // +--- q (1 rule) - // | - // +--- a - // | - // +--- b - // | - // +--- c (1 rule) - // - // Another example with general refs containing vars at arbitrary locations: - // - // package ex - // a.b[x].d { x := "c" } # R1 - // a.b.c[x] { x := "d" } # R2 - // a.b[x][y] { x := "c"; y := "d" } # R3 - // p := true # R4 - // - // root - // | - // +--- data (no rules) - // | - // +--- ex (no rules) - // | - // +--- a - // | | - // | +--- b (R1, R3) - // | | - // | +--- c (R2) - // | - // +--- p (R4) - RuleTree *TreeNode - - // Graph contains dependencies between rules. An edge (u,v) is added to the - // graph if rule 'u' refers to the virtual document defined by 'v'. - Graph *Graph - - // TypeEnv holds type information for values inferred by the compiler. - TypeEnv *TypeEnv - - // RewrittenVars is a mapping of variables that have been rewritten - // with the key being the generated name and value being the original. - RewrittenVars map[Var]Var - - // Capabliities required by the modules that were compiled. - Required *Capabilities - - localvargen *localVarGenerator - moduleLoader ModuleLoader - ruleIndices *util.HashMap - stages []stage - maxErrs int - sorted []string // list of sorted module names - pathExists func([]string) (bool, error) - after map[string][]CompilerStageDefinition - metrics metrics.Metrics - capabilities *Capabilities // user-supplied capabilities - imports map[string][]*Import // saved imports from stripping - builtins map[string]*Builtin // universe of built-in functions - customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) - unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) - deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions - enablePrintStatements bool // indicates if print statements should be elided (default) - comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index - initialized bool // indicates if init() has been called - debug debug.Debug // emits debug information produced during compilation - schemaSet *SchemaSet // user-supplied schemas for input and data documents - inputType types.Type // global input type retrieved from schema set - annotationSet *AnnotationSet // hierarchical set of annotations - strict bool // enforce strict compilation checks - keepModules bool // whether to keep the unprocessed, parse modules (below) - parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true - useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker - allowUndefinedFuncCalls bool // don't error on calls to unknown functions. - evalMode CompilerEvalMode // - rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. -} +type Compiler = v1.Compiler // CompilerStage defines the interface for stages in the compiler. -type CompilerStage func(*Compiler) *Error +type CompilerStage = v1.CompilerStage // CompilerEvalMode allows toggling certain stages that are only // needed for certain modes, Concretely, only "topdown" mode will // have the compiler build comprehension and rule indices. -type CompilerEvalMode int +type CompilerEvalMode = v1.CompilerEvalMode const ( // EvalModeTopdown (default) instructs the compiler to build rule // and comprehension indices used by topdown evaluation. - EvalModeTopdown CompilerEvalMode = iota + EvalModeTopdown = v1.EvalModeTopdown // EvalModeIR makes the compiler skip the stages for comprehension // and rule indices. - EvalModeIR + EvalModeIR = v1.EvalModeIR ) // CompilerStageDefinition defines a compiler stage -type CompilerStageDefinition struct { - Name string - MetricName string - Stage CompilerStage -} +type CompilerStageDefinition = v1.CompilerStageDefinition // RulesOptions defines the options for retrieving rules by Ref from the // compiler. -type RulesOptions struct { - // IncludeHiddenModules determines if the result contains hidden modules, - // currently only the "system" namespace, i.e. "data.system.*". - IncludeHiddenModules bool -} +type RulesOptions = v1.RulesOptions // QueryContext contains contextual information for running an ad-hoc query. // // Ad-hoc queries can be run in the context of a package and imports may be // included to provide concise access to data. -type QueryContext struct { - Package *Package - Imports []*Import -} +type QueryContext = v1.QueryContext // NewQueryContext returns a new QueryContext object. func NewQueryContext() *QueryContext { - return &QueryContext{} -} - -// WithPackage sets the pkg on qc. -func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Package = pkg - return qc -} - -// WithImports sets the imports on qc. -func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { - if qc == nil { - qc = NewQueryContext() - } - qc.Imports = imports - return qc -} - -// Copy returns a deep copy of qc. -func (qc *QueryContext) Copy() *QueryContext { - if qc == nil { - return nil - } - cpy := *qc - if cpy.Package != nil { - cpy.Package = qc.Package.Copy() - } - cpy.Imports = make([]*Import, len(qc.Imports)) - for i := range qc.Imports { - cpy.Imports[i] = qc.Imports[i].Copy() - } - return &cpy + return v1.NewQueryContext() } // QueryCompiler defines the interface for compiling ad-hoc queries. -type QueryCompiler interface { - - // Compile should be called to compile ad-hoc queries. The return value is - // the compiled version of the query. - Compile(q Body) (Body, error) - - // TypeEnv returns the type environment built after running type checking - // on the query. - TypeEnv() *TypeEnv - - // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls - // to Compile will take the QueryContext into account. - WithContext(qctx *QueryContext) QueryCompiler - - // WithEnablePrintStatements enables print statements in queries compiled - // with the QueryCompiler. - WithEnablePrintStatements(yes bool) QueryCompiler - - // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not - // allow inside of queries. By default the query compiler inherits the - // compiler's unsafe built-in functions. This function allows callers to - // override that set. If an empty (non-nil) map is provided, all built-ins - // are allowed. - WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler - - // WithStageAfter registers a stage to run during query compilation after - // the named stage. - WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler - - // RewrittenVars maps generated vars in the compiled query to vars from the - // parsed query. For example, given the query "input := 1" the rewritten - // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. - RewrittenVars() map[Var]Var - - // ComprehensionIndex returns an index data structure for the given comprehension - // term. If no index is found, returns nil. - ComprehensionIndex(term *Term) *ComprehensionIndex - - // WithStrict enables strict mode for the query compiler. - WithStrict(strict bool) QueryCompiler -} +type QueryCompiler = v1.QueryCompiler // QueryCompilerStage defines the interface for stages in the query compiler. -type QueryCompilerStage func(QueryCompiler, Body) (Body, error) +type QueryCompilerStage = v1.QueryCompilerStage // QueryCompilerStageDefinition defines a QueryCompiler stage -type QueryCompilerStageDefinition struct { - Name string - MetricName string - Stage QueryCompilerStage -} - -type stage struct { - name string - metricName string - f func() -} +type QueryCompilerStageDefinition = v1.QueryCompilerStageDefinition // NewCompiler returns a new empty compiler. func NewCompiler() *Compiler { - - c := &Compiler{ - Modules: map[string]*Module{}, - RewrittenVars: map[Var]Var{}, - Required: &Capabilities{}, - ruleIndices: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - maxErrs: CompileErrorLimitDefault, - after: map[string][]CompilerStageDefinition{}, - unsafeBuiltinsMap: map[string]struct{}{}, - deprecatedBuiltinsMap: map[string]struct{}{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - debug: debug.Discard(), - } - - c.ModuleTree = NewModuleTree(nil) - c.RuleTree = NewRuleTree(c.ModuleTree) - - c.stages = []stage{ - // Reference resolution should run first as it may be used to lazily - // load additional modules. If any stages run before resolution, they - // need to be re-run after resolution. - {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, - // The local variable generator must be initialized after references are - // resolved and the dynamic module loader has run but before subsequent - // stages that need to generate variables. - {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, - {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, - {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, - {"CheckDuplicateImports", "compile_stage_check_duplicate_imports", c.checkDuplicateImports}, - {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, - {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, - {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs - {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, - {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, - {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, - {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, - {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, - {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, - {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, - {"SetGraph", "compile_stage_set_graph", c.setGraph}, - {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, - {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, - {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, - {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, - {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, - {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, - {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, - {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, - {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, - {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms - {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, - {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion - {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, - {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, - {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, - {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, - } - - return c -} - -// SetErrorLimit sets the number of errors the compiler can encounter before it -// quits. Zero or a negative number indicates no limit. -func (c *Compiler) SetErrorLimit(limit int) *Compiler { - c.maxErrs = limit - return c -} - -// WithEnablePrintStatements enables print statements inside of modules compiled -// by the compiler. If print statements are not enabled, calls to print() are -// erased at compile-time. -func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { - c.enablePrintStatements = yes - return c -} - -// WithPathConflictsCheck enables base-virtual document conflict -// detection. The compiler will check that rules don't overlap with -// paths that exist as determined by the provided callable. -func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { - c.pathExists = fn - return c -} - -// WithStageAfter registers a stage to run during compilation after -// the named stage. -func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { - c.after[after] = append(c.after[after], stage) - return c -} - -// WithMetrics will set a metrics.Metrics and be used for profiling -// the Compiler instance. -func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { - c.metrics = metrics - return c -} - -// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller -// to specify the set of built-in functions available to the policy. In the future, capabilities -// may be able to restrict access to other language features. Capabilities allow callers to check -// if policies are compatible with a particular version of OPA. If policies are a compiled for a -// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them -// successfully. -func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { - c.capabilities = capabilities - return c -} - -// Capabilities returns the capabilities enabled during compilation. -func (c *Compiler) Capabilities() *Capabilities { - return c.capabilities -} - -// WithDebug sets where debug messages are written to. Passing `nil` has no -// effect. -func (c *Compiler) WithDebug(sink io.Writer) *Compiler { - if sink != nil { - c.debug = debug.New(sink) - } - return c -} - -// WithBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { - c.customBuiltins = make(map[string]*Builtin) - for k, v := range builtins { - c.customBuiltins[k] = v - } - return c -} - -// WithUnsafeBuiltins is deprecated. Use WithCapabilities instead. -func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { - for name := range unsafeBuiltins { - c.unsafeBuiltinsMap[name] = struct{}{} - } - return c -} - -// WithStrict enables strict mode in the compiler. -func (c *Compiler) WithStrict(strict bool) *Compiler { - c.strict = strict - return c -} - -// WithKeepModules enables retaining unprocessed modules in the compiler. -// Note that the modules aren't copied on the way in or out -- so when -// accessing them via ParsedModules(), mutations will occur in the module -// map that was passed into Compile().` -func (c *Compiler) WithKeepModules(y bool) *Compiler { - c.keepModules = y - return c -} - -// WithUseTypeCheckAnnotations use schema annotations during type checking -func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { - c.useTypeCheckAnnotations = enabled - return c -} - -func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { - c.allowUndefinedFuncCalls = allow - return c -} - -// WithEvalMode allows setting the CompilerEvalMode of the compiler -func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { - c.evalMode = e - return c -} - -// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, -// so they can be accessed by tracing. -func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { - c.rewriteTestRulesForTracing = rewrite - return c -} - -// ParsedModules returns the parsed, unprocessed modules from the compiler. -// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. -// The map includes all modules loaded via the ModuleLoader, if one was used. -func (c *Compiler) ParsedModules() map[string]*Module { - return c.parsedModules -} - -func (c *Compiler) QueryCompiler() QueryCompiler { - c.init() - c0 := *c - return newQueryCompiler(&c0) -} - -// Compile runs the compilation process on the input modules. The compiled -// version of the modules and associated data structures are stored on the -// compiler. If the compilation process fails for any reason, the compiler will -// contain a slice of errors. -func (c *Compiler) Compile(modules map[string]*Module) { - - c.init() - - c.Modules = make(map[string]*Module, len(modules)) - c.sorted = make([]string, 0, len(modules)) - - if c.keepModules { - c.parsedModules = make(map[string]*Module, len(modules)) - } else { - c.parsedModules = nil - } - - for k, v := range modules { - c.Modules[k] = v.Copy() - c.sorted = append(c.sorted, k) - if c.parsedModules != nil { - c.parsedModules[k] = v - } - } - - sort.Strings(c.sorted) - - c.compile() -} - -// WithSchemas sets a schemaSet to the compiler -func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { - c.schemaSet = schemas - return c -} - -// Failed returns true if a compilation error has been encountered. -func (c *Compiler) Failed() bool { - return len(c.Errors) > 0 -} - -// ComprehensionIndex returns a data structure specifying how to index comprehension -// results so that callers do not have to recompute the comprehension more than once. -// If no index is found, returns nil. -func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - return c.comprehensionIndices[term] -} - -// GetArity returns the number of args a function referred to by ref takes. If -// ref refers to built-in function, the built-in declaration is consulted, -// otherwise, the ref is used to perform a ruleset lookup. -func (c *Compiler) GetArity(ref Ref) int { - if bi := c.builtins[ref.String()]; bi != nil { - return len(bi.Decl.FuncArgs().Args) - } - rules := c.GetRulesExact(ref) - if len(rules) == 0 { - return -1 - } - return len(rules[0].Head.Args) -} - -// GetRulesExact returns a slice of rules referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesExact("data.a.b.c.p") => [rule1, rule2] -// GetRulesExact("data.a.b.c.p.x") => nil -// GetRulesExact("data.a.b.c") => nil -func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - return extractRules(node.Values) -} - -// GetRulesForVirtualDocument returns a slice of rules that produce the virtual -// document referred to by the reference. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[k] = v { ... } # rule1 -// p[k1] = v1 { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] -// GetRulesForVirtualDocument("data.a.b.c") => nil -func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - if len(node.Values) > 0 { - return extractRules(node.Values) - } - } - - return extractRules(node.Values) -} - -// GetRulesWithPrefix returns a slice of rules that share the prefix ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { ... } # rule1 -// p[k] = v { ... } # rule2 -// q { ... } # rule3 -// -// The following calls yield the rules on the right. -// -// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] -// GetRulesWithPrefix("data.a.b.c.p.a") => nil -// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] -func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { - - node := c.RuleTree - - for _, x := range ref { - if node = node.Child(x.Value); node == nil { - return nil - } - } - - var acc func(node *TreeNode) - - acc = func(node *TreeNode) { - rules = append(rules, extractRules(node.Values)...) - for _, child := range node.Children { - if child.Hide { - continue - } - acc(child) - } - } - - acc(node) - - return rules -} - -func extractRules(s []util.T) []*Rule { - rules := make([]*Rule, len(s)) - for i := range s { - rules[i] = s[i].(*Rule) - } - return rules -} - -// GetRules returns a slice of rules that are referred to by ref. -// -// E.g., given the following module: -// -// package a.b.c -// -// p[x] = y { q[x] = y; ... } # rule1 -// q[x] = y { ... } # rule2 -// -// The following calls yield the rules on the right. -// -// GetRules("data.a.b.c.p") => [rule1] -// GetRules("data.a.b.c.p.x") => [rule1] -// GetRules("data.a.b.c.q") => [rule2] -// GetRules("data.a.b.c") => [rule1, rule2] -// GetRules("data.a.b.d") => nil -func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { - - set := map[*Rule]struct{}{} - - for _, rule := range c.GetRulesForVirtualDocument(ref) { - set[rule] = struct{}{} - } - - for _, rule := range c.GetRulesWithPrefix(ref) { - set[rule] = struct{}{} - } - - for rule := range set { - rules = append(rules, rule) - } - - return rules -} - -// GetRulesDynamic returns a slice of rules that could be referred to by a ref. -// -// Deprecated: use GetRulesDynamicWithOpts -func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { - return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) -} - -// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by -// a ref. -// When parts of the ref are statically known, we use that information to narrow -// down which rules the ref could refer to, but in the most general case this -// will be an over-approximation. -// -// E.g., given the following modules: -// -// package a.b.c -// -// r1 = 1 # rule1 -// -// and: -// -// package a.d.c -// -// r2 = 2 # rule2 -// -// The following calls yield the rules on the right. -// -// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] -// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] -// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] -// -// Using the RulesOptions parameter, the inclusion of hidden modules can be -// controlled: -// -// With -// -// package system.main -// -// r3 = 3 # rule3 -// -// We'd get this result: -// -// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] -// -// Without the options, it would be excluded. -func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { - node := c.RuleTree - - set := map[*Rule]struct{}{} - var walk func(node *TreeNode, i int) - walk = func(node *TreeNode, i int) { - switch { - case i >= len(ref): - // We've reached the end of the reference and want to collect everything - // under this "prefix". - node.DepthFirst(func(descendant *TreeNode) bool { - insertRules(set, descendant.Values) - if opts.IncludeHiddenModules { - return false - } - return descendant.Hide - }) - - case i == 0 || IsConstant(ref[i].Value): - // The head of the ref is always grounded. In case another part of the - // ref is also grounded, we can lookup the exact child. If it's not found - // we can immediately return... - if child := node.Child(ref[i].Value); child != nil { - if len(child.Values) > 0 { - // Add any rules at this position - insertRules(set, child.Values) - } - // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking - walk(child, i+1) - } else { - return - } - - default: - // This part of the ref is a dynamic term. We can't know what it refers - // to and will just need to try all of the children. - for _, child := range node.Children { - if child.Hide && !opts.IncludeHiddenModules { - continue - } - insertRules(set, child.Values) - walk(child, i+1) - } - } - } - - walk(node, 0) - rules := make([]*Rule, 0, len(set)) - for rule := range set { - rules = append(rules, rule) - } - return rules -} - -// Utility: add all rule values to the set. -func insertRules(set map[*Rule]struct{}, rules []util.T) { - for _, rule := range rules { - set[rule.(*Rule)] = struct{}{} - } -} - -// RuleIndex returns a RuleIndex built for the rule set referred to by path. -// The path must refer to the rule set exactly, i.e., given a rule set at path -// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a -// RuleIndex built for the rule. -func (c *Compiler) RuleIndex(path Ref) RuleIndex { - r, ok := c.ruleIndices.Get(path) - if !ok { - return nil - } - return r.(RuleIndex) -} - -// PassesTypeCheck determines whether the given body passes type checking -func (c *Compiler) PassesTypeCheck(body Body) bool { - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - env := c.TypeEnv - _, errs := checker.CheckBody(env, body) - return len(errs) == 0 -} - -// PassesTypeCheckRules determines whether the given rules passes type checking -func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { - elems := []util.T{} - - for _, rule := range rules { - elems = append(elems, rule) - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - - var allowNet []string - if c.capabilities != nil { - allowNet = c.capabilities.AllowNet - } - - tpe, err := loadSchema(schema, allowNet) - if err != nil { - return Errors{NewError(TypeErr, nil, err.Error())} - } - c.inputType = tpe - } - } - - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - - checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) - - if c.TypeEnv == nil { - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - c.TypeEnv = checker.Env(c.builtins) - } - - _, errs := checker.CheckTypes(c.TypeEnv, elems, as) - return errs + return v1.NewCompiler().WithDefaultRegoVersion(DefaultRegoVersion) } // ModuleLoader defines the interface that callers can implement to enable lazy // loading of modules during compilation. -type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) - -// WithModuleLoader sets f as the ModuleLoader on the compiler. -// -// The compiler will invoke the ModuleLoader after resolving all references in -// the current set of input modules. The ModuleLoader can return a new -// collection of parsed modules that are to be included in the compilation -// process. This process will repeat until the ModuleLoader returns an empty -// collection or an error. If an error is returned, compilation will stop -// immediately. -func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { - c.moduleLoader = f - return c -} - -func (c *Compiler) counterAdd(name string, n uint64) { - if c.metrics == nil { - return - } - c.metrics.Counter(name).Add(n) -} - -func (c *Compiler) buildRuleIndices() { - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false - } - rules := extractRules(node.Values) - hasNonGroundRef := false - for _, r := range rules { - hasNonGroundRef = !r.Head.Ref().IsGround() - } - if hasNonGroundRef { - // Collect children to ensure that all rules within the extent of a rule with a general ref - // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: - // - // package a - // b.c[x].e := 1 { x := input.x } - // b.c.d := 2 - // b.c.d2.e[x] := 3 { x := input.x } - for _, child := range node.Children { - child.DepthFirst(func(c *TreeNode) bool { - rules = append(rules, extractRules(c.Values)...) - return false - }) - } - } - - index := newBaseDocEqIndex(func(ref Ref) bool { - return isVirtual(c.RuleTree, ref.GroundPrefix()) - }) - if index.Build(rules) { - c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) - } - return hasNonGroundRef // currently, we don't allow those branches to go deeper - }) - -} - -func (c *Compiler) buildComprehensionIndices() { - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(r *Rule) bool { - candidates := r.Head.Args.Vars() - candidates.Update(ReservedVars) - n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) - c.counterAdd(compileStageComprehensionIndexBuild, n) - return false - }) - } -} +type ModuleLoader = v1.ModuleLoader -// buildRequiredCapabilities updates the required capabilities on the compiler -// to include any keyword and feature dependencies present in the modules. The -// built-in function dependencies will have already been added by the type -// checker. -func (c *Compiler) buildRequiredCapabilities() { - - features := map[string]struct{}{} - - // extract required keywords from modules - keywords := map[string]struct{}{} - futureKeywordsPrefix := Ref{FutureRootDocument, StringTerm("keywords")} - for _, name := range c.sorted { - for _, imp := range c.imports[name] { - path := imp.Path.Value.(Ref) - switch { - case path.Equal(RegoV1CompatibleRef): - features[FeatureRegoV1Import] = struct{}{} - case path.HasPrefix(futureKeywordsPrefix): - if len(path) == 2 { - for kw := range futureKeywords { - keywords[kw] = struct{}{} - } - } else { - keywords[string(path[2].Value.(String))] = struct{}{} - } - } - } - } - - c.Required.FutureKeywords = stringMapToSortedSlice(keywords) - - // extract required features from modules - - for _, name := range c.sorted { - for _, rule := range c.Modules[name].Rules { - refLen := len(rule.Head.Reference) - if refLen >= 3 { - if refLen > len(rule.Head.Reference.ConstantPrefix()) { - features[FeatureRefHeads] = struct{}{} - } else { - features[FeatureRefHeadStringPrefixes] = struct{}{} - } - } - } - } - - c.Required.Features = stringMapToSortedSlice(features) - - for i, bi := range c.Required.Builtins { - c.Required.Builtins[i] = bi.Minimal() - } -} - -func stringMapToSortedSlice(xs map[string]struct{}) []string { - if len(xs) == 0 { - return nil - } - s := make([]string, 0, len(xs)) - for k := range xs { - s = append(s, k) - } - sort.Strings(s) - return s -} - -// checkRecursion ensures that there are no recursive definitions, i.e., there are -// no cycles in the Graph. -func (c *Compiler) checkRecursion() { - eq := func(a, b util.T) bool { - return a.(*Rule) == b.(*Rule) - } - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - for _, rule := range node.Values { - for node := rule.(*Rule); node != nil; node = node.Else { - c.checkSelfPath(node.Loc(), eq, node, node) - } - } - return false - }) -} - -func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { - tr := NewGraphTraversal(c.Graph) - if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { - n := make([]string, 0, len(p)) - for _, x := range p { - n = append(n, astNodeToString(x)) - } - c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) - } -} - -func astNodeToString(x interface{}) string { - return x.(*Rule).Ref().String() -} - -// checkRuleConflicts ensures that rules definitions are not in conflict. -func (c *Compiler) checkRuleConflicts() { - rw := rewriteVarsInRef(c.RewrittenVars) - - c.RuleTree.DepthFirst(func(node *TreeNode) bool { - if len(node.Values) == 0 { - return false // go deeper - } - - kinds := make(map[RuleKind]struct{}, len(node.Values)) - defaultRules := 0 - completeRules := 0 - partialRules := 0 - arities := make(map[int]struct{}, len(node.Values)) - name := "" - var conflicts []Ref - - for _, rule := range node.Values { - r := rule.(*Rule) - ref := r.Ref() - name = rw(ref.Copy()).String() // varRewriter operates in-place - kinds[r.Head.RuleKind()] = struct{}{} - arities[len(r.Head.Args)] = struct{}{} - if r.Default { - defaultRules++ - } - - // Single-value rules may not have any other rules in their extent. - // Rules with vars in their ref are allowed to have rules inside their extent. - // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining - // whether it's inside the extent of another (c.RuleTree is organized this way already). - // These pairs are invalid: - // - // data.p.q.r { true } # data.p.q is { "r": true } - // data.p.q.r.s { true } - // - // data.p.q.r { true } - // data.p.q.r[s].t { s = input.key } - // - // But this is allowed: - // - // data.p.q.r { true } - // data.p.q[r].s.t { r = input.key } - // - // data.p[r] := x { r = input.key; x = input.bar } - // data.p.q[r] := x { r = input.key; x = input.bar } - // - // data.p.q[r] { r := input.r } - // data.p.q.r.s { true } - // - // data.p.q[r] = 1 { r := "r" } - // data.p.q.s = 2 - // - // data.p[q][r] { q := input.q; r := input.r } - // data.p.q.r { true } - // - // data.p.q[r] { r := input.r } - // data.p[q].r { q := input.q } - // - // data.p.q[r][s] { r := input.r; s := input.s } - // data.p[q].r.s { q := input.q } - - if r.Ref().IsGround() && len(node.Children) > 0 { - conflicts = node.flattenChildren() - } - - if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { - completeRules++ - } else { - partialRules++ - } - } - - switch { - case conflicts != nil: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) - - case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) - - case defaultRules > 1: - c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "multiple default rules %s found", name)) - } - - return false - }) +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = v1.SafetyCheckVisitorParams - if c.pathExists != nil { - for _, err := range CheckPathConflicts(c, c.pathExists) { - c.err(err) - } - } +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex = v1.ComprehensionIndex - // NOTE(sr): depthfirst might better use sorted for stable errs? - c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { - for _, mod := range node.Modules { - for _, rule := range mod.Rules { - ref := rule.Head.Ref().GroundPrefix() - // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion - // can only be detected at eval-time. - if len(ref) < len(rule.Head.Ref()) { - continue - } +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode = v1.ModuleTreeNode - childNode, tail := node.find(ref) - if childNode != nil && len(tail) == 0 { - for _, childMod := range childNode.Modules { - // Avoid recursively checking a module for equality unless we know it's a possible self-match. - if childMod.Equal(mod) { - continue // don't self-conflict - } - msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) - c.err(NewError(TypeErr, mod.Package.Loc(), msg)) - } - } - } - } - return false - }) -} +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode = v1.TreeNode -func (c *Compiler) checkUndefinedFuncs() { - for _, name := range c.sorted { - m := c.Modules[name] - for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { - c.err(err) - } - } +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + return v1.NewRuleTree(mtree) } -func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors { - - var errs Errors - - WalkExprs(x, func(expr *Expr) bool { - if !expr.IsCall() { - return false - } - ref := expr.Operator() - if arity := arity(ref); arity >= 0 { - operands := len(expr.Operands()) - if expr.Generated { // an output var was added - if !expr.IsEquality() && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) - return true - } - } else { // either output var or not - if operands != arity && operands != arity+1 { - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) - return true - } - } - return false - } - ref = rewriteVarsInRef(rwVars)(ref) - errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) - return true - }) - - return errs -} +// Graph represents the graph of dependencies between rules. +type Graph = v1.Graph -func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { - if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions - have := make([]types.Type, len(expr.Operands())) - for i, op := range expr.Operands() { - have[i] = env.Get(op) - } - return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) - } - if act != 1 { - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) - } - return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + return v1.NewGraph(modules, list) } -// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target -// positions of built-in expressions will be bound when evaluating the rule from left -// to right, re-ordering as necessary. -func (c *Compiler) checkSafetyRuleBodies() { - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := ReservedVars.Copy() - safe.Update(r.Head.Args.Vars()) - r.Body = c.checkBodySafety(safe, r.Body) - return false - }) - } -} +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal = v1.GraphTraversal -func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { - reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) - if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { - for _, err := range errs { - c.err(err) - } - return b - } - return reordered +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return v1.NewGraphTraversal(graph) } -// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting -// variables during the safety check. This has to be exported because it's relied on -// by the copy propagation implementation in topdown. -var SafetyCheckVisitorParams = VarVisitorParams{ - SkipRefCallHead: true, - SkipClosures: true, +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return v1.OutputVarsFromBody(c, body, safe) } -// checkSafetyRuleHeads ensures that variables appearing in the head of a -// rule also appear in the body. -func (c *Compiler) checkSafetyRuleHeads() { - - for _, name := range c.sorted { - m := c.Modules[name] - WalkRules(m, func(r *Rule) bool { - safe := r.Body.Vars(SafetyCheckVisitorParams) - safe.Update(r.Head.Args.Vars()) - unsafe := r.Head.Vars().Diff(safe) - for v := range unsafe { - if w, ok := c.RewrittenVars[v]; ok { - v = w - } - if !v.IsGenerated() { - c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) - } - } - return false - }) - } -} - -func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) { - gojsonschema.SetAllowNet(allowNet) - - var refLoader gojsonschema.JSONLoader - sl := gojsonschema.NewSchemaLoader() - - if goSchema != nil { - refLoader = gojsonschema.NewGoLoader(goSchema) - } else { - return nil, fmt.Errorf("no schema as input to compile") - } - schemasCompiled, err := sl.Compile(refLoader) - if err != nil { - return nil, fmt.Errorf("unable to compile the schema: %w", err) - } - return schemasCompiled, nil -} - -func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { - if len(schemas) == 0 { - return nil, nil - } - var result = schemas[0] - - for i := range schemas { - if len(schemas[i].PropertiesChildren) > 0 { - if !schemas[i].Types.Contains("object") { - if err := schemas[i].Types.Add("object"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } else if len(schemas[i].ItemsChildren) > 0 { - if !schemas[i].Types.Contains("array") { - if err := schemas[i].Types.Add("array"); err != nil { - return nil, fmt.Errorf("unable to set the type in schemas") - } - } - } - } - - for i := 1; i < len(schemas); i++ { - if result.Types.String() != schemas[i].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) - } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { - result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) - } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { - for j := 0; j < len(schemas[i].ItemsChildren); j++ { - if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { - result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) - } - if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - } - } - return result, nil -} - -type schemaParser struct { - definitionCache map[string]*cachedDef -} - -type cachedDef struct { - properties []*types.StaticProperty -} - -func newSchemaParser() *schemaParser { - return &schemaParser{ - definitionCache: map[string]*cachedDef{}, - } -} - -func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) { - return parser.parseSchemaWithPropertyKey(schema, "") -} - -func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) { - subSchema, ok := schema.(*gojsonschema.SubSchema) - if !ok { - return nil, fmt.Errorf("unexpected schema type %v", subSchema) - } - - // Handle referenced schemas, returns directly when a $ref is found - if subSchema.RefSchema != nil { - if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { - return types.NewObject(existing.properties, nil), nil - } - return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) - } - - // Handle anyOf - if subSchema.AnyOf != nil { - var orType types.Type - - // If there is a core schema, find its type first - if subSchema.Types.IsTyped() { - copySchema := *subSchema - copySchemaRef := ©Schema - copySchemaRef.AnyOf = nil - coreType, err := parser.parseSchema(copySchemaRef) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) - } - - // Only add Object type with static props to orType - if objType, ok := coreType.(*types.Object); ok { - if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { - orType = types.Or(orType, coreType) - } - } - } - - // Iterate through every property of AnyOf and add it to orType - for _, pSchema := range subSchema.AnyOf { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - orType = types.Or(newtype, orType) - } - - return orType, nil - } - - if subSchema.AllOf != nil { - subSchemaArray := subSchema.AllOf - allOfResult, err := mergeSchemas(subSchemaArray...) - if err != nil { - return nil, err - } - - if subSchema.Types.IsTyped() { - if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { - objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) - if err != nil { - return nil, err - } - return parser.parseSchema(objectOrArrayResult) - } else if subSchema.Types.String() != allOfResult.Types.String() { - return nil, fmt.Errorf("unable to merge these schemas") - } - } - return parser.parseSchema(allOfResult) - } - - if subSchema.Types.IsTyped() { - if subSchema.Types.Contains("boolean") { - return types.B, nil - - } else if subSchema.Types.Contains("string") { - return types.S, nil - - } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { - return types.N, nil - - } else if subSchema.Types.Contains("object") { - if len(subSchema.PropertiesChildren) > 0 { - def := &cachedDef{ - properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), - } - for _, pSchema := range subSchema.PropertiesChildren { - def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) - } - if propertyKey != "" { - parser.definitionCache[propertyKey] = def - } - for _, pSchema := range subSchema.PropertiesChildren { - newtype, err := parser.parseSchema(pSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) - } - for i, prop := range def.properties { - if prop.Key == pSchema.Property { - def.properties[i].Value = newtype - break - } - } - } - return types.NewObject(def.properties, nil), nil - } - return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil - - } else if subSchema.Types.Contains("array") { - if len(subSchema.ItemsChildren) > 0 { - if subSchema.ItemsChildrenIsSingleSchema { - iSchema := subSchema.ItemsChildren[0] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - return types.NewArray(nil, newtype), nil - } - newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) - for i := 0; i != len(subSchema.ItemsChildren); i++ { - iSchema := subSchema.ItemsChildren[i] - newtype, err := parser.parseSchema(iSchema) - if err != nil { - return nil, fmt.Errorf("unexpected schema type %v", iSchema) - } - newTypes = append(newTypes, newtype) - } - return types.NewArray(newTypes, nil), nil - } - return types.NewArray(nil, types.A), nil - } - } - - // Assume types if not specified in schema - if len(subSchema.PropertiesChildren) > 0 { - if err := subSchema.Types.Add("object"); err == nil { - return parser.parseSchema(subSchema) - } - } else if len(subSchema.ItemsChildren) > 0 { - if err := subSchema.Types.Add("array"); err == nil { - return parser.parseSchema(subSchema) - } - } - - return types.A, nil -} - -func (c *Compiler) setAnnotationSet() { - // Sorting modules by name for stable error reporting - sorted := make([]*Module, 0, len(c.Modules)) - for _, mName := range c.sorted { - sorted = append(sorted, c.Modules[mName]) - } - - as, errs := BuildAnnotationSet(sorted) - for _, err := range errs { - c.err(err) - } - c.annotationSet = as -} - -// checkTypes runs the type checker on all rules. The type checker builds a -// TypeEnv that is stored on the compiler. -func (c *Compiler) checkTypes() { - // Recursion is caught in earlier step, so this cannot fail. - sorted, _ := c.Graph.Sort() - checker := newTypeChecker(). - WithAllowNet(c.capabilities.AllowNet). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - WithBuiltins(c.builtins). - WithRequiredCapabilities(c.Required). - WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). - WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) - var as *AnnotationSet - if c.useTypeCheckAnnotations { - as = c.annotationSet - } - env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) - for _, err := range errs { - c.err(err) - } - c.TypeEnv = env -} - -func (c *Compiler) checkUnsafeBuiltins() { - for _, name := range c.sorted { - errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) - for _, err := range errs { - c.err(err) - } - } -} - -func (c *Compiler) checkDeprecatedBuiltins() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -func (c *Compiler) runStage(metricName string, f func()) { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - f() -} - -func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { - if c.metrics != nil { - c.metrics.Timer(metricName).Start() - defer c.metrics.Timer(metricName).Stop() - } - return s(c) -} - -func (c *Compiler) compile() { - - defer func() { - if r := recover(); r != nil && r != errLimitReached { - panic(r) - } - }() - - for _, s := range c.stages { - if c.evalMode == EvalModeIR { - switch s.name { - case "BuildRuleIndices", "BuildComprehensionIndices": - continue // skip these stages - } - } - - if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { - continue - } - - c.runStage(s.metricName, s.f) - if c.Failed() { - return - } - for _, a := range c.after[s.name] { - if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { - c.err(err) - return - } - } - } -} - -func (c *Compiler) init() { - - if c.initialized { - return - } - - if c.capabilities == nil { - c.capabilities = CapabilitiesForThisVersion() - } - - c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) - - for _, bi := range c.capabilities.Builtins { - c.builtins[bi.Name] = bi - if bi.IsDeprecated() { - c.deprecatedBuiltinsMap[bi.Name] = struct{}{} - } - } - - for name, bi := range c.customBuiltins { - c.builtins[name] = bi - } - - // Load the global input schema if one was provided. - if c.schemaSet != nil { - if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { - tpe, err := loadSchema(schema, c.capabilities.AllowNet) - if err != nil { - c.err(NewError(TypeErr, nil, err.Error())) - } else { - c.inputType = tpe - } - } - } - - c.TypeEnv = newTypeChecker(). - WithSchemaSet(c.schemaSet). - WithInputType(c.inputType). - Env(c.builtins) - - c.initialized = true -} - -func (c *Compiler) err(err *Error) { - if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { - c.Errors = append(c.Errors, errLimitReached) - panic(errLimitReached) - } - c.Errors = append(c.Errors, err) -} - -func (c *Compiler) getExports() *util.HashMap { - - rules := util.NewHashMap(func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - }, func(v util.T) int { - return v.(Ref).Hash() - }) - - for _, name := range c.sorted { - mod := c.Modules[name] - - for _, rule := range mod.Rules { - hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) - } - } - - return rules -} - -func hashMapAdd(rules *util.HashMap, pkg, rule Ref) { - prev, ok := rules.Get(pkg) - if !ok { - rules.Put(pkg, []Ref{rule}) - return - } - for _, p := range prev.([]Ref) { - if p.Equal(rule) { - return - } - } - rules.Put(pkg, append(prev.([]Ref), rule)) -} - -func (c *Compiler) GetAnnotationSet() *AnnotationSet { - return c.annotationSet -} - -func (c *Compiler) checkDuplicateImports() { - modules := make([]*Module, 0, len(c.Modules)) - - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - modules = append(modules, mod) - } - } - - errs := checkDuplicateImports(modules) - for _, err := range errs { - c.err(err) - } -} - -func (c *Compiler) checkKeywordOverrides() { - for _, name := range c.sorted { - mod := c.Modules[name] - if c.strict || mod.regoV1Compatible() { - errs := checkRootDocumentOverrides(mod) - for _, err := range errs { - c.err(err) - } - } - } -} - -// resolveAllRefs resolves references in expressions to their fully qualified values. -// -// For instance, given the following module: -// -// package a.b -// import data.foo.bar -// p[x] { bar[_] = x } -// -// The reference "bar[_]" would be resolved to "data.foo.bar[_]". -// -// Ref rules are resolved, too: -// -// package a.b -// q { c.d.e == 1 } -// c.d[e] := 1 if e := "e" -// -// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". -func (c *Compiler) resolveAllRefs() { - - rules := c.getExports() - - for _, name := range c.sorted { - mod := c.Modules[name] - - var ruleExports []Ref - if x, ok := rules.Get(mod.Package.Path); ok { - ruleExports = x.([]Ref) - } - - globals := getGlobals(mod.Package, ruleExports, mod.Imports) - - WalkRules(mod, func(rule *Rule) bool { - err := resolveRefsInRule(globals, rule) - if err != nil { - c.err(NewError(CompileErr, rule.Location, err.Error())) - } - return false - }) - - if c.strict { // check for unused imports - for _, imp := range mod.Imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - - for v, u := range globals { - if v.Equal(imp.Name()) && !u.used { - c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) - } - } - } - } - } - - if c.moduleLoader != nil { - - parsed, err := c.moduleLoader(c.Modules) - if err != nil { - c.err(NewError(CompileErr, nil, err.Error())) - return - } - - if len(parsed) == 0 { - return - } - - for id, module := range parsed { - c.Modules[id] = module.Copy() - c.sorted = append(c.sorted, id) - if c.parsedModules != nil { - c.parsedModules[id] = module - } - } - - sort.Strings(c.sorted) - c.resolveAllRefs() - } -} - -func (c *Compiler) removeImports() { - c.imports = make(map[string][]*Import, len(c.Modules)) - for name := range c.Modules { - c.imports[name] = c.Modules[name].Imports - c.Modules[name].Imports = nil - } -} - -func (c *Compiler) initLocalVarGen() { - c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) -} - -func (c *Compiler) rewriteComprehensionTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - _, _ = rewriteComprehensionTerms(f, mod) // ignore error - } -} - -func (c *Compiler) rewriteExprTerms() { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rewriteExprTermsInHead(c.localvargen, rule) - rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) - return false - }) - } -} - -func (c *Compiler) rewriteRuleHeadRefs() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - WalkRules(c.Modules[name], func(rule *Rule) bool { - - ref := rule.Head.Ref() - // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but - // it's possible to construct Module{} instances from Golang code, so we need - // to accommodate for that, too. - if len(rule.Head.Reference) == 0 { - rule.Head.Reference = ref - } - - cannotSpeakStringPrefixRefs := true - cannotSpeakGeneralRefs := true - for _, f := range c.capabilities.Features { - switch f { - case FeatureRefHeadStringPrefixes: - cannotSpeakStringPrefixRefs = false - case FeatureRefHeads: - cannotSpeakGeneralRefs = false - } - } - - if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { - c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) - return true - } - - for i := 1; i < len(ref); i++ { - if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last - if _, ok := ref[i].Value.(String); !ok { - c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) - continue - } - } - - // Rewrite so that any non-scalar elements in the rule's ref are vars: - // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } - // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } - // because that's what the RuleTree knows how to deal with. - if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { - expr := f.Generate(ref[i]) - if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { - rule.Head.Key = expr.Operand(0) - } - rule.Head.Reference[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - - return true - }) - } -} - -func (c *Compiler) checkVoidCalls() { - for _, name := range c.sorted { - mod := c.Modules[name] - for _, err := range checkVoidCalls(c.TypeEnv, mod) { - c.err(err) - } - } -} - -func (c *Compiler) rewritePrintCalls() { - var modified bool - if !c.enablePrintStatements { - for _, name := range c.sorted { - if erasePrintCalls(c.Modules[name]) { - modified = true - } - } - } else { - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(r *Rule) bool { - safe := r.Head.Args.Vars() - safe.Update(ReservedVars) - vis := func(b Body) bool { - modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) - if modrec { - modified = true - } - for _, err := range errs { - c.err(err) - } - return false - } - WalkBodies(r.Head, vis) - WalkBodies(r.Body, vis) - return false - }) - } - } - if modified { - c.Required.addBuiltinSorted(Print) - } -} - -// checkVoidCalls returns errors for any expressions that treat void function -// calls as values. The only void functions in Rego are specific built-ins like -// print(). -func checkVoidCalls(env *TypeEnv, x interface{}) Errors { - var errs Errors - WalkTerms(x, func(x *Term) bool { - if call, ok := x.Value.(Call); ok { - if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { - errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) - } - } - return false - }) - return errs -} - -// rewritePrintCalls will rewrite the body so that print operands are captured -// in local variables and their evaluation occurs within a comprehension. -// Wrapping the terms inside of a comprehension ensures that undefined values do -// not short-circuit evaluation. -// -// For example, given the following print statement: -// -// print("the value of x is:", input.x) -// -// The expression would be rewritten to: -// -// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) -func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { - - var errs Errors - var modified bool - - // Visit comprehension bodies recursively to ensure print statements inside - // those bodies only close over variables that are safe. - for i := range body { - if ContainsClosures(body[i]) { - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - WalkClosures(body[i], func(x interface{}) bool { - var modrec bool - var errsrec Errors - switch x := x.(type) { - case *SetComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ArrayComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *ObjectComprehension: - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - case *Every: - safe.Update(x.KeyValueVars()) - modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) - } - if modrec { - modified = true - } - errs = append(errs, errsrec...) - return true - }) - if len(errs) > 0 { - return false, errs - } - } - } - - for i := range body { - - if !isPrintCall(body[i]) { - continue - } - - modified = true - - var errs Errors - safe := outputVarsForBody(body[:i], getArity, globals) - safe.Update(globals) - args := body[i].Operands() - - for j := range args { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(args[j]) - unsafe := vis.Vars().Diff(safe) - for _, v := range unsafe.Sorted() { - errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) - } - } - - if len(errs) > 0 { - return false, errs - } - - arr := NewArray() - - for j := range args { - x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) - capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) - arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) - } - - body.Set(NewExpr([]*Term{ - NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), - NewTerm(arr).SetLocation(body[i].Loc()), - }).SetLocation(body[i].Loc()), i) - } - - return modified, nil -} - -func erasePrintCalls(node interface{}) bool { - var modified bool - NewGenericVisitor(func(x interface{}) bool { - var modrec bool - switch x := x.(type) { - case *Rule: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ArrayComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *SetComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *ObjectComprehension: - modrec, x.Body = erasePrintCallsInBody(x.Body) - case *Every: - modrec, x.Body = erasePrintCallsInBody(x.Body) - } - if modrec { - modified = true - } - return false - }).Walk(node) - return modified -} - -func erasePrintCallsInBody(x Body) (bool, Body) { - - if !containsPrintCall(x) { - return false, x - } - - var cpy Body - - for i := range x { - - // Recursively visit any comprehensions contained in this expression. - erasePrintCalls(x[i]) - - if !isPrintCall(x[i]) { - cpy.Append(x[i]) - } - } - - if len(cpy) == 0 { - term := BooleanTerm(true).SetLocation(x.Loc()) - expr := NewExpr(term).SetLocation(x.Loc()) - cpy.Append(expr) - } - - return true, cpy -} - -func containsPrintCall(x interface{}) bool { - var found bool - WalkExprs(x, func(expr *Expr) bool { - if !found { - if isPrintCall(expr) { - found = true - } - } - return found - }) - return found -} - -func isPrintCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(Print.Ref()) -} - -// rewriteRefsInHead will rewrite rules so that the head does not contain any -// terms that require evaluation (e.g., refs or comprehensions). If the key or -// value contains one or more of these terms, the key or value will be moved -// into the body and assigned to a new variable. The new variable will replace -// the key or value in the head. -// -// For instance, given the following rule: -// -// p[{"foo": data.foo[i]}] { i < 100 } -// -// The rule would be re-written as: -// -// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } -func (c *Compiler) rewriteRefsInHead() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if requiresEval(rule.Head.Key) { - expr := f.Generate(rule.Head.Key) - rule.Head.Key = expr.Operand(0) - rule.Body.Append(expr) - } - if requiresEval(rule.Head.Value) { - expr := f.Generate(rule.Head.Value) - rule.Head.Value = expr.Operand(0) - rule.Body.Append(expr) - } - for i := 0; i < len(rule.Head.Args); i++ { - if requiresEval(rule.Head.Args[i]) { - expr := f.Generate(rule.Head.Args[i]) - rule.Head.Args[i] = expr.Operand(0) - rule.Body.Append(expr) - } - } - return false - }) - } -} - -func (c *Compiler) rewriteEquals() { - modified := false - for _, name := range c.sorted { - mod := c.Modules[name] - modified = rewriteEquals(mod) || modified - } - if modified { - c.Required.addBuiltinSorted(Equal) - } -} - -func (c *Compiler) rewriteDynamicTerms() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - rule.Body = rewriteDynamics(f, rule.Body) - return false - }) - } -} - -// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise -// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. -// For example, given the following module: -// -// package test -// -// p.q contains v if { -// some v in numbers.range(1, 3) -// } -// -// p.r := "foo" -// -// test_rule { -// p == { -// "q": {4, 5, 6} -// } -// } -// -// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. -// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. -func (c *Compiler) rewriteTestRuleEqualities() { - if !c.rewriteTestRulesForTracing { - return - } - - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - WalkRules(mod, func(rule *Rule) bool { - if strings.HasPrefix(string(rule.Head.Name), "test_") { - rule.Body = rewriteTestEqualities(f, rule.Body) - } - return false - }) - } -} - -func (c *Compiler) parseMetadataBlocks() { - // Only parse annotations if rego.metadata built-ins are called - regoMetadataCalled := false - for _, name := range c.sorted { - mod := c.Modules[name] - WalkExprs(mod, func(expr *Expr) bool { - if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { - regoMetadataCalled = true - } - return regoMetadataCalled - }) - - if regoMetadataCalled { - break - } - } - - if regoMetadataCalled { - // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module - for _, name := range c.sorted { - mod := c.Modules[name] - - if len(mod.Annotations) == 0 { - var errs Errors - mod.Annotations, errs = parseAnnotations(mod.Comments) - errs = append(errs, attachAnnotationsNodes(mod)...) - for _, err := range errs { - c.err(err) - } - - attachRuleAnnotations(mod) - } - } - } -} - -func (c *Compiler) rewriteRegoMetadataCalls() { - eqFactory := newEqualityFactory(c.localvargen) - - _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] - _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] - - for _, name := range c.sorted { - mod := c.Modules[name] - - WalkRules(mod, func(rule *Rule) bool { - var firstChainCall *Expr - var firstRuleCall *Expr - - WalkExprs(rule, func(expr *Expr) bool { - if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { - firstChainCall = expr - } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { - firstRuleCall = expr - } - return firstChainCall != nil && firstRuleCall != nil - }) - - chainCalled := firstChainCall != nil - ruleCalled := firstRuleCall != nil - - if chainCalled || ruleCalled { - body := make(Body, 0, len(rule.Body)+2) - - var metadataChainVar Var - if chainCalled { - // Create and inject metadata chain for rule - - chain, err := createMetadataChain(c.annotationSet.Chain(rule)) - if err != nil { - c.err(err) - return false - } - - chain.Location = firstChainCall.Location - eq := eqFactory.Generate(chain) - metadataChainVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - var metadataRuleVar Var - if ruleCalled { - // Create and inject metadata for rule - - var metadataRuleTerm *Term - - a := getPrimaryRuleAnnotations(c.annotationSet, rule) - if a != nil { - annotObj, err := a.toObject() - if err != nil { - c.err(err) - return false - } - metadataRuleTerm = NewTerm(*annotObj) - } else { - // If rule has no annotations, assign an empty object - metadataRuleTerm = ObjectTerm() - } - - metadataRuleTerm.Location = firstRuleCall.Location - eq := eqFactory.Generate(metadataRuleTerm) - metadataRuleVar = eq.Operands()[0].Value.(Var) - body.Append(eq) - } - - for _, expr := range rule.Body { - body.Append(expr) - } - rule.Body = body - - vis := func(b Body) bool { - for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { - c.err(err) - } - return false - } - WalkBodies(rule.Head, vis) - WalkBodies(rule.Body, vis) - } - - return false - }) - } -} - -func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { - annots := as.GetRuleScope(rule) - - if len(annots) == 0 { - return nil - } - - // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward - sort.SliceStable(annots, func(i, j int) bool { - return annots[i].Location.Compare(annots[j].Location) > 0 - }) - - return annots[0] -} - -func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { - var errs Errors - - WalkClosures(body, func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *SetComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *ObjectComprehension: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - case *Every: - errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) - } - return true - }) - - for i := range body { - expr := body[i] - var metadataVar Var - - if metadataChainVar != nil && isRegoMetadataChainCall(expr) { - metadataVar = *metadataChainVar - } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { - metadataVar = *metadataRuleVar - } else { - continue - } - - // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] - // usages with *metadataChainVar - operands := expr.Operands() - var newExpr *Expr - if len(operands) > 0 { // There is an output var to rewrite - rewrittenVar := operands[0] - newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) - } else { // No output var, just rewrite expr to metadataVar - newExpr = NewExpr(NewTerm(metadataVar)) - } - - newExpr.Generated = true - newExpr.Location = expr.Location - body.Set(newExpr, i) - } - - return errs -} - -func isRegoMetadataChainCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataChain.Ref()) -} - -func isRegoMetadataRuleCall(x *Expr) bool { - return x.IsCall() && x.Operator().Equal(RegoMetadataRule.Ref()) -} - -func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { - - metaArray := NewArray() - for _, link := range chain { - p := link.Path.toArray(). - Slice(1, -1) // Dropping leading 'data' element of path - obj := NewObject( - Item(StringTerm("path"), NewTerm(p)), - ) - if link.Annotations != nil { - annotObj, err := link.Annotations.toObject() - if err != nil { - return nil, err - } - obj.Insert(StringTerm("annotations"), NewTerm(*annotObj)) - } - metaArray = metaArray.Append(NewTerm(obj)) - } - - return NewTerm(metaArray), nil -} - -func (c *Compiler) rewriteLocalVars() { - - var assignment bool - - for _, name := range c.sorted { - mod := c.Modules[name] - gen := c.localvargen - - WalkRules(mod, func(rule *Rule) bool { - argsStack := newLocalDeclaredVars() - - args := NewVarVisitor() - if c.strict { - args.Walk(rule.Head.Args) - } - unusedArgs := args.Vars() - - c.rewriteLocalArgVars(gen, argsStack, rule) - - // Rewrite local vars in each else-branch of the rule. - // Note: this is done instead of a walk so that we can capture any unused function arguments - // across else-branches. - for rule := rule; rule != nil; rule = rule.Else { - stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) - if stack.assignment { - assignment = true - } - - for arg := range unusedArgs { - if stack.Count(arg) > 1 { - delete(unusedArgs, arg) - } - } - - for _, err := range errs { - c.err(err) - } - } - - if c.strict { - // Report an error for each unused function argument - for arg := range unusedArgs { - if !arg.IsWildcard() { - c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) - } - } - } - - return true - }) - } - - if assignment { - c.Required.addBuiltinSorted(Assign) - } -} - -func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { - // Rewrite assignments contained in head of rule. Assignments can - // occur in rule head if they're inside a comprehension. Note, - // assigned vars in comprehensions in the head will be rewritten - // first to preserve scoping rules. For example: - // - // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } - // - // This behaviour is consistent scoping inside the body. For example: - // - // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } - nestedXform := &rewriteNestedHeadVarLocalTransform{ - gen: gen, - RewrittenVars: c.RewrittenVars, - strict: c.strict, - } - - NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) - - for _, err := range nestedXform.errs { - c.err(err) - } - - // Rewrite assignments in body. - used := NewVarSet() - - for _, t := range rule.Head.Ref()[1:] { - used.Update(t.Vars()) - } - - if rule.Head.Key != nil { - used.Update(rule.Head.Key.Vars()) - } - - if rule.Head.Value != nil { - valueVars := rule.Head.Value.Vars() - used.Update(valueVars) - for arg := range unusedArgs { - if valueVars.Contains(arg) { - delete(unusedArgs, arg) - } - } - } - - stack := argsStack.Copy() - - body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) - - // For rewritten vars use the collection of all variables that - // were in the stack at some point in time. - for k, v := range stack.rewritten { - c.RewrittenVars[k] = v - } - - rule.Body = body - - // Rewrite vars in head that refer to locally declared vars in the body. - localXform := rewriteHeadVarLocalTransform{declared: declared} - - for i := range rule.Head.Args { - rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) - } - - for i := 1; i < len(rule.Head.Ref()); i++ { - rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) - } - if rule.Head.Key != nil { - rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) - } - return stack, errs -} - -type rewriteNestedHeadVarLocalTransform struct { - gen *localVarGenerator - errs Errors - RewrittenVars map[Var]Var - strict bool -} - -func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool { - - if term, ok := x.(*Term); ok { - - stop := false - stack := newLocalDeclaredVars() - - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - stop = true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - stop = true - case *ArrayComprehension: - xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *SetComprehension: - xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - case *ObjectComprehension: - xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) - stop = true - } - - for k, v := range stack.rewritten { - xform.RewrittenVars[k] = v - } - - return stop - } - - return false -} - -type rewriteHeadVarLocalTransform struct { - declared map[Var]Var -} - -func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - if gv, ok := xform.declared[v]; ok { - return gv, nil - } - } - return x, nil -} - -func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { - - vis := &ruleArgLocalRewriter{ - stack: stack, - gen: gen, - } - - for i := range rule.Head.Args { - Walk(vis, rule.Head.Args[i]) - } - - for i := range vis.errs { - c.err(vis.errs[i]) - } -} - -type ruleArgLocalRewriter struct { - stack *localDeclaredVars - gen *localVarGenerator - errs []*Error -} - -func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor { - - t, ok := x.(*Term) - if !ok { - return vis - } - - switch v := t.Value.(type) { - case Var: - gv, ok := vis.stack.Declared(v) - if ok { - vis.stack.Seen(v) - } else { - gv = vis.gen.Generate() - vis.stack.Insert(v, gv, argVar) - } - t.Value = gv - return nil - case *object: - if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { - vcpy := v.Copy() - Walk(vis, vcpy) - return k, vcpy, nil - }); err != nil { - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = cpy - } - return nil - case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: - // Scalars are no-ops. Comprehensions are handled above. Sets must not - // contain variables. - return nil - case Call: - vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) - return nil - default: - // Recurse on refs and arrays. Any embedded - // variables can be rewritten. - return vis - } -} - -func (c *Compiler) rewriteWithModifiers() { - f := newEqualityFactory(c.localvargen) - for _, name := range c.sorted { - mod := c.Modules[name] - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - body, ok := x.(Body) - if !ok { - return x, nil - } - body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) - if err != nil { - c.err(err) - } - - return body, nil - }) - _, _ = Transform(t, mod) // ignore error - } -} - -func (c *Compiler) setModuleTree() { - c.ModuleTree = NewModuleTree(c.Modules) -} - -func (c *Compiler) setRuleTree() { - c.RuleTree = NewRuleTree(c.ModuleTree) -} - -func (c *Compiler) setGraph() { - list := func(r Ref) []*Rule { - return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) - } - c.Graph = NewGraph(c.Modules, list) -} - -type queryCompiler struct { - compiler *Compiler - qctx *QueryContext - typeEnv *TypeEnv - rewritten map[Var]Var - after map[string][]QueryCompilerStageDefinition - unsafeBuiltins map[string]struct{} - comprehensionIndices map[*Term]*ComprehensionIndex - enablePrintStatements bool -} - -func newQueryCompiler(compiler *Compiler) QueryCompiler { - qc := &queryCompiler{ - compiler: compiler, - qctx: nil, - after: map[string][]QueryCompilerStageDefinition{}, - comprehensionIndices: map[*Term]*ComprehensionIndex{}, - } - return qc -} - -func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { - qc.compiler.WithStrict(strict) - return qc -} - -func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { - qc.enablePrintStatements = yes - return qc -} - -func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { - qc.qctx = qctx - return qc -} - -func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { - qc.after[after] = append(qc.after[after], stage) - return qc -} - -func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { - qc.unsafeBuiltins = unsafe - return qc -} - -func (qc *queryCompiler) RewrittenVars() map[Var]Var { - return qc.rewritten -} - -func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { - if result, ok := qc.comprehensionIndices[term]; ok { - return result - } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { - return result - } - return nil -} - -func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qctx, query) -} - -func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { - if qc.compiler.metrics != nil { - qc.compiler.metrics.Timer(metricName).Start() - defer qc.compiler.metrics.Timer(metricName).Stop() - } - return s(qc, query) -} - -type queryStage = struct { - name string - metricName string - f func(*QueryContext, Body) (Body, error) -} - -func (qc *queryCompiler) Compile(query Body) (Body, error) { - if len(query) == 0 { - return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} - } - - query = query.Copy() - - stages := []queryStage{ - {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, - {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, - {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, - {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, - {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, - {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, - {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, - {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, - {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, - {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, - {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, - {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, - {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, - {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, - } - if qc.compiler.evalMode == EvalModeTopdown { - stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) - } - - qctx := qc.qctx.Copy() - - for _, s := range stages { - var err error - query, err = qc.runStage(s.metricName, qctx, query, s.f) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - for _, s := range qc.after[s.name] { - query, err = qc.runStageAfter(s.MetricName, query, s.Stage) - if err != nil { - return nil, qc.applyErrorLimit(err) - } - } - } - - return query, nil -} - -func (qc *queryCompiler) TypeEnv() *TypeEnv { - return qc.typeEnv -} - -func (qc *queryCompiler) applyErrorLimit(err error) error { - var errs Errors - if errors.As(err, &errs) { - if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { - err = append(errs[:qc.compiler.maxErrs], errLimitReached) - } - } - return err -} - -func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - if errs := checkRootDocumentOverrides(body); len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { - - var globals map[Var]*usedRef - - if qctx != nil { - pkg := qctx.Package - // Query compiler ought to generate a package if one was not provided and one or more imports were provided. - // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) - if pkg == nil && len(qctx.Imports) > 0 { - pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} - } - if pkg != nil { - var ruleExports []Ref - rules := qc.compiler.getExports() - if exist, ok := rules.Get(pkg.Path); ok { - ruleExports = exist.([]Ref) - } - - globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) - qctx.Imports = nil - } - } - - ignore := &declaredVarStack{declaredVars(body)} - - return resolveRefsInBody(globals, ignore, body), nil -} - -func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - node, err := rewriteComprehensionTerms(f, body) - if err != nil { - return nil, err - } - return node.(Body), nil -} - -func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - f := newEqualityFactory(gen) - return rewriteDynamics(f, body), nil -} - -func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - return rewriteExprTermsInBody(gen, body), nil -} - -func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { - gen := newLocalVarGenerator("q", body) - stack := newLocalDeclaredVars() - body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) - if len(err) != 0 { - return nil, err - } - qc.rewritten = make(map[Var]Var, len(stack.rewritten)) - for k, v := range stack.rewritten { - // The vars returned during the rewrite will include all seen vars, - // even if they're not declared with an assignment operation. We don't - // want to include these inside the rewritten set though. - qc.rewritten[k] = v - } - return body, nil -} - -func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { - if !qc.enablePrintStatements { - _, cpy := erasePrintCallsInBody(body) - return cpy, nil - } - gen := newLocalVarGenerator("q", body) - if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { - if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { - if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { - safe := ReservedVars.Copy() - reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) - if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { - return nil, errs - } - return reordered, nil -} - -func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { - var errs Errors - checker := newTypeChecker(). - WithSchemaSet(qc.compiler.schemaSet). - WithInputType(qc.compiler.inputType). - WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) - qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) - if len(errs) > 0 { - return nil, errs - } - - return body, nil -} - -func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { - errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) - if len(errs) > 0 { - return nil, errs - } - return body, nil -} - -func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { - if qc.unsafeBuiltins != nil { - return qc.unsafeBuiltins - } - return qc.compiler.unsafeBuiltinsMap -} - -func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { - if qc.compiler.strict { - errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) - if len(errs) > 0 { - return nil, errs - } - } - return body, nil -} - -func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { - f := newEqualityFactory(newLocalVarGenerator("q", body)) - body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) - if err != nil { - return nil, Errors{err} - } - return body, nil -} - -func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { - // NOTE(tsandall): The query compiler does not have a metrics object so we - // cannot record index metrics currently. - _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) - return body, nil -} - -// ComprehensionIndex specifies how the comprehension term can be indexed. The keys -// tell the evaluator what variables to use for indexing. In the future, the index -// could be expanded with more information that would allow the evaluator to index -// a larger fragment of comprehensions (e.g., by closing over variables in the outer -// query.) -type ComprehensionIndex struct { - Term *Term - Keys []*Term -} - -func (ci *ComprehensionIndex) String() string { - if ci == nil { - return "" - } - return fmt.Sprintf("", NewArray(ci.Keys...)) -} - -func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 { - var n uint64 - cpy := candidates.Copy() - WalkBodies(node, func(b Body) bool { - for _, expr := range b { - index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) - if index != nil { - result[index.Term] = index - n++ - } - // Any variables appearing in the expressions leading up to the comprehension - // are fair-game to be used as index keys. - cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) - } - return false - }) - return n -} - -func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { - - // Ignore everything except = expressions. Extract - // the comprehension term from the expression. - if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { - // No debug message, these are assumed to be known hinderances - // to comprehension indexing. - return nil - } - - var term *Term - - lhs, rhs := expr.Operand(0), expr.Operand(1) - - if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { - term = rhs - } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { - term = lhs - } - - if term == nil { - // no debug for this, it's the ordinary "nothing to do here" case - return nil - } - - // Ignore comprehensions that contain expressions that close over variables - // in the outer body if those variables are not also output variables in the - // comprehension body. In other words, ignore comprehensions that we cannot - // safely evaluate without bindings from the outer body. For example: - // - // x = [1] - // [true | data.y[z] = x] # safe to evaluate w/o outer body - // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. - // - // By identifying output variables in the body we also know what to index on by - // intersecting with candidate variables from the outer query. - // - // For example: - // - // x = data.foo[_] - // _ = [y | data.bar[y] = x] # index on 'x' - // - // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). - var body Body - - switch x := term.Value.(type) { - case *ArrayComprehension: - body = x.Body - case *SetComprehension: - body = x.Body - case *ObjectComprehension: - body = x.Body - } - - outputs := outputVarsForBody(body, arity, ReservedVars) - unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) - - if len(unsafe) > 0 { - dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) - return nil - } - - // Similarly, ignore comprehensions that contain references with output variables - // that intersect with the candidates. Indexing these comprehensions could worsen - // performance. - regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) - regressionVis.Walk(body) - if regressionVis.worse { - dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) - return nil - } - - // Check if any nested comprehensions close over candidates. If any intersection is found - // the comprehension cannot be cached because it would require closing over the candidates - // which the evaluator does not support today. - nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) - nestedVis.Walk(body) - if nestedVis.found { - dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) - return nil - } - - // Make a sorted set of variable names that will serve as the index key set. - // Sort to ensure deterministic indexing. In future this could be relaxed - // if we can decide that one ordering is better than another. If the set is - // empty, there is no indexing to do. - indexVars := candidates.Intersect(outputs) - if len(indexVars) == 0 { - dbg.Printf("%s: comprehension index: no index vars", expr.Location) - return nil - } - - result := make([]*Term, 0, len(indexVars)) - - for v := range indexVars { - result = append(result, NewTerm(v)) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Value.Compare(result[j].Value) < 0 - }) - - debugRes := make([]*Term, len(result)) - for i, r := range result { - if o, ok := rwVars[r.Value.(Var)]; ok { - debugRes[i] = NewTerm(o) - } else { - debugRes[i] = r - } - } - dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) - return &ComprehensionIndex{Term: term, Keys: result} -} - -type comprehensionIndexRegressionCheckVisitor struct { - candidates VarSet - seen VarSet - worse bool -} - -// TODO(tsandall): Improve this so that users can either supply this list explicitly -// or the information is maintained on the built-in function declaration. What we really -// need to know is whether the built-in function allows callers to push down output -// values or not. It's unlikely that anything outside of OPA does this today so this -// solution is fine for now. -var comprehensionIndexBlacklist = map[string]int{ - WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), -} - -func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { - return &comprehensionIndexRegressionCheckVisitor{ - candidates: candidates, - seen: NewVarSet(), - } -} - -func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool { - if !vis.worse { - switch x := x.(type) { - case *Expr: - operands := x.Operands() - if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { - vis.assertEmptyIntersection(operands[pos].Vars()) - } - case Ref: - vis.assertEmptyIntersection(x.OutputVars()) - case Var: - vis.seen.Add(x) - // Always skip comprehensions. We do not have to visit their bodies here. - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - } - return vis.worse -} - -func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { - for v := range vs { - if vis.candidates.Contains(v) && !vis.seen.Contains(v) { - vis.worse = true - return - } - } -} - -type comprehensionIndexNestedCandidateVisitor struct { - candidates VarSet - found bool -} - -func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { - return &comprehensionIndexNestedCandidateVisitor{ - candidates: candidates, - } -} - -func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) { - NewGenericVisitor(vis.visit).Walk(x) -} - -func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool { - - if vis.found { - return true - } - - if v, ok := x.(Value); ok && IsComprehension(v) { - varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - varVis.Walk(v) - vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 - return true - } - - return false -} - -// ModuleTreeNode represents a node in the module tree. The module -// tree is keyed by the package path. -type ModuleTreeNode struct { - Key Value - Modules []*Module - Children map[Value]*ModuleTreeNode - Hide bool -} - -func (n *ModuleTreeNode) String() string { - var rules []string - for _, m := range n.Modules { - for _, r := range m.Rules { - rules = append(rules, r.Head.String()) - } - } - return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) -} - -// NewModuleTree returns a new ModuleTreeNode that represents the root -// of the module tree populated with the given modules. -func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { - root := &ModuleTreeNode{ - Children: map[Value]*ModuleTreeNode{}, - } - names := make([]string, 0, len(mods)) - for name := range mods { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - m := mods[name] - node := root - for i, x := range m.Package.Path { - c, ok := node.Children[x.Value] - if !ok { - var hide bool - if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { - hide = true - } - c = &ModuleTreeNode{ - Key: x.Value, - Children: map[Value]*ModuleTreeNode{}, - Hide: hide, - } - node.Children[x.Value] = c - } - node = c - } - node.Modules = append(node.Modules, m) - } - return root -} - -// Size returns the number of modules in the tree. -func (n *ModuleTreeNode) Size() int { - s := len(n.Modules) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { - switch k.(type) { - case String, Var: - return n.Children[k] - } - return nil -} - -// Find dereferences ref along the tree. ref[0] is converted to a String -// for convenience. -func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { - if v, ok := ref[0].Value.(Var); ok { - ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) - } - node := n - for i, r := range ref { - next := node.child(r.Value) - if next == nil { - tail := make(Ref, len(ref)-i) - tail[0] = VarTerm(string(ref[i].Value.(String))) - copy(tail[1:], ref[i+1:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the module tree rooted at n. -// If f returns true, traversal will not continue to the children of n. -func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -// TreeNode represents a node in the rule tree. The rule tree is keyed by -// rule path. -type TreeNode struct { - Key Value - Values []util.T - Children map[Value]*TreeNode - Sorted []Value - Hide bool -} - -func (n *TreeNode) String() string { - return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) -} - -// NewRuleTree returns a new TreeNode that represents the root -// of the rule tree populated with the given rules. -func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { - root := TreeNode{ - Key: mtree.Key, - } - - mtree.DepthFirst(func(m *ModuleTreeNode) bool { - for _, mod := range m.Modules { - if len(mod.Rules) == 0 { - root.add(mod.Package.Path, nil) - } - for _, rule := range mod.Rules { - root.add(rule.Ref().GroundPrefix(), rule) - } - } - return false - }) - - // ensure that data.system's TreeNode is hidden - node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) - if len(tail) == 0 { // found - node.Hide = true - } - - root.DepthFirst(func(x *TreeNode) bool { - x.sort() - return false - }) - - return &root -} - -func (n *TreeNode) add(path Ref, rule *Rule) { - node, tail := n.find(path) - if len(tail) > 0 { - sub := treeNodeFromRef(tail, rule) - if node.Children == nil { - node.Children = make(map[Value]*TreeNode, 1) - } - node.Children[sub.Key] = sub - node.Sorted = append(node.Sorted, sub.Key) - } else { - if rule != nil { - node.Values = append(node.Values, rule) - } - } -} - -// Size returns the number of rules in the tree. -func (n *TreeNode) Size() int { - s := len(n.Values) - for _, c := range n.Children { - s += c.Size() - } - return s -} - -// Child returns n's child with key k. -func (n *TreeNode) Child(k Value) *TreeNode { - switch k.(type) { - case Ref, Call: - return nil - default: - return n.Children[k] - } -} - -// Find dereferences ref along the tree -func (n *TreeNode) Find(ref Ref) *TreeNode { - node := n - for _, r := range ref { - node = node.Child(r.Value) - if node == nil { - return nil - } - } - return node -} - -// Iteratively dereferences ref along the node's subtree. -// - If matching fails immediately, the tail will contain the full ref. -// - Partial matching will result in a tail of non-zero length. -// - A complete match will result in a 0 length tail. -func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { - node := n - for i := range ref { - next := node.Child(ref[i].Value) - if next == nil { - tail := make(Ref, len(ref)-i) - copy(tail, ref[i:]) - return node, tail - } - node = next - } - return node, nil -} - -// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If -// f returns true, traversal will not continue to the children of n. -func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { - if f(n) { - return - } - for _, node := range n.Children { - node.DepthFirst(f) - } -} - -func (n *TreeNode) sort() { - sort.Slice(n.Sorted, func(i, j int) bool { - return n.Sorted[i].Compare(n.Sorted[j]) < 0 - }) -} - -func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { - depth := len(ref) - 1 - key := ref[depth].Value - node := &TreeNode{ - Key: key, - Children: nil, - } - if rule != nil { - node.Values = []util.T{rule} - } - - for i := len(ref) - 2; i >= 0; i-- { - key := ref[i].Value - node = &TreeNode{ - Key: key, - Children: map[Value]*TreeNode{ref[i+1].Value: node}, - Sorted: []Value{ref[i+1].Value}, - } - } - return node -} - -// flattenChildren flattens all children's rule refs into a sorted array. -func (n *TreeNode) flattenChildren() []Ref { - ret := newRefSet() - for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away - sub.DepthFirst(func(x *TreeNode) bool { - for _, r := range x.Values { - rule := r.(*Rule) - ret.AddPrefix(rule.Ref()) - } - return false - }) - } - - sort.Slice(ret.s, func(i, j int) bool { - return ret.s[i].Compare(ret.s[j]) < 0 - }) - return ret.s -} - -// Graph represents the graph of dependencies between rules. -type Graph struct { - adj map[util.T]map[util.T]struct{} - radj map[util.T]map[util.T]struct{} - nodes map[util.T]struct{} - sorted []util.T -} - -// NewGraph returns a new Graph based on modules. The list function must return -// the rules referred to directly by the ref. -func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { - - graph := &Graph{ - adj: map[util.T]map[util.T]struct{}{}, - radj: map[util.T]map[util.T]struct{}{}, - nodes: map[util.T]struct{}{}, - sorted: nil, - } - - // Create visitor to walk a rule AST and add edges to the rule graph for - // each dependency. - vis := func(a *Rule) *GenericVisitor { - stop := false - return NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Ref: - for _, b := range list(x) { - for node := b; node != nil; node = node.Else { - graph.addDependency(a, node) - } - } - case *Rule: - if stop { - // Do not recurse into else clauses (which will be handled - // by the outer visitor.) - return true - } - stop = true - } - return false - }) - } - - // Walk over all rules, add them to graph, and build adjacency lists. - for _, module := range modules { - WalkRules(module, func(a *Rule) bool { - graph.addNode(a) - vis(a).Walk(a) - return false - }) - } - - return graph -} - -// Dependencies returns the set of rules that x depends on. -func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { - return g.adj[x] -} - -// Dependents returns the set of rules that depend on x. -func (g *Graph) Dependents(x util.T) map[util.T]struct{} { - return g.radj[x] -} - -// Sort returns a slice of rules sorted by dependencies. If a cycle is found, -// ok is set to false. -func (g *Graph) Sort() (sorted []util.T, ok bool) { - if g.sorted != nil { - return g.sorted, true - } - - sorter := &graphSort{ - sorted: make([]util.T, 0, len(g.nodes)), - deps: g.Dependencies, - marked: map[util.T]struct{}{}, - temp: map[util.T]struct{}{}, - } - - for node := range g.nodes { - if !sorter.Visit(node) { - return nil, false - } - } - - g.sorted = sorter.sorted - return g.sorted, true -} - -func (g *Graph) addDependency(u util.T, v util.T) { - - if _, ok := g.nodes[u]; !ok { - g.addNode(u) - } - - if _, ok := g.nodes[v]; !ok { - g.addNode(v) - } - - edges, ok := g.adj[u] - if !ok { - edges = map[util.T]struct{}{} - g.adj[u] = edges - } - - edges[v] = struct{}{} - - edges, ok = g.radj[v] - if !ok { - edges = map[util.T]struct{}{} - g.radj[v] = edges - } - - edges[u] = struct{}{} -} - -func (g *Graph) addNode(n util.T) { - g.nodes[n] = struct{}{} -} - -type graphSort struct { - sorted []util.T - deps func(util.T) map[util.T]struct{} - marked map[util.T]struct{} - temp map[util.T]struct{} -} - -func (sort *graphSort) Marked(node util.T) bool { - _, marked := sort.marked[node] - return marked -} - -func (sort *graphSort) Visit(node util.T) (ok bool) { - if _, ok := sort.temp[node]; ok { - return false - } - if sort.Marked(node) { - return true - } - sort.temp[node] = struct{}{} - for other := range sort.deps(node) { - if !sort.Visit(other) { - return false - } - } - sort.marked[node] = struct{}{} - delete(sort.temp, node) - sort.sorted = append(sort.sorted, node) - return true -} - -// GraphTraversal is a Traversal that understands the dependency graph -type GraphTraversal struct { - graph *Graph - visited map[util.T]struct{} -} - -// NewGraphTraversal returns a Traversal for the dependency graph -func NewGraphTraversal(graph *Graph) *GraphTraversal { - return &GraphTraversal{ - graph: graph, - visited: map[util.T]struct{}{}, - } -} - -// Edges lists all dependency connections for a given node -func (g *GraphTraversal) Edges(x util.T) []util.T { - r := []util.T{} - for v := range g.graph.Dependencies(x) { - r = append(r, v) - } - return r -} - -// Visited returns whether a node has been visited, setting a node to visited if not -func (g *GraphTraversal) Visited(u util.T) bool { - _, ok := g.visited[u] - g.visited[u] = struct{}{} - return ok -} - -type unsafePair struct { - Expr *Expr - Vars VarSet -} - -type unsafeVarLoc struct { - Var Var - Loc *Location -} - -type unsafeVars map[*Expr]VarSet - -func (vs unsafeVars) Add(e *Expr, v Var) { - if u, ok := vs[e]; ok { - u[v] = struct{}{} - } else { - vs[e] = VarSet{v: struct{}{}} - } -} - -func (vs unsafeVars) Set(e *Expr, s VarSet) { - vs[e] = s -} - -func (vs unsafeVars) Update(o unsafeVars) { - for k, v := range o { - if _, ok := vs[k]; !ok { - vs[k] = VarSet{} - } - vs[k].Update(v) - } -} - -func (vs unsafeVars) Vars() (result []unsafeVarLoc) { - - locs := map[Var]*Location{} - - // If var appears in multiple sets then pick first by location. - for expr, vars := range vs { - for v := range vars { - if locs[v].Compare(expr.Location) > 0 { - locs[v] = expr.Location - } - } - } - - for v, loc := range locs { - result = append(result, unsafeVarLoc{ - Var: v, - Loc: loc, - }) - } - - sort.Slice(result, func(i, j int) bool { - return result[i].Loc.Compare(result[j].Loc) < 0 - }) - - return result -} - -func (vs unsafeVars) Slice() (result []unsafePair) { - for expr, vs := range vs { - result = append(result, unsafePair{ - Expr: expr, - Vars: vs, - }) - } - return -} - -// reorderBodyForSafety returns a copy of the body ordered such that -// left to right evaluation of the body will not encounter unbound variables -// in input positions or negated expressions. -// -// Expressions are added to the re-ordered body as soon as they are considered -// safe. If multiple expressions become safe in the same pass, they are added -// in their original order. This results in minimal re-ordering of the body. -// -// If the body cannot be reordered to ensure safety, the second return value -// contains a mapping of expressions to unsafe variables in those expressions. -func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { - - bodyVars := body.Vars(SafetyCheckVisitorParams) - reordered := make(Body, 0, len(body)) - safe := VarSet{} - unsafe := unsafeVars{} - - for _, e := range body { - for v := range e.Vars(SafetyCheckVisitorParams) { - if globals.Contains(v) { - safe.Add(v) - } else { - unsafe.Add(e, v) - } - } - } - - for { - n := len(reordered) - - for _, e := range body { - if reordered.Contains(e) { - continue - } - - ovs := outputVarsForExpr(e, arity, safe) - - // check closures: is this expression closing over variables that - // haven't been made safe by what's already included in `reordered`? - vs := unsafeVarsInClosures(e) - cv := vs.Intersect(bodyVars).Diff(globals) - uv := cv.Diff(outputVarsForBody(reordered, arity, safe)) - - if len(uv) > 0 { - if uv.Equal(ovs) { // special case "closure-self" - continue - } - unsafe.Set(e, uv) - } - - for v := range unsafe[e] { - if ovs.Contains(v) || safe.Contains(v) { - delete(unsafe[e], v) - } - } - - if len(unsafe[e]) == 0 { - delete(unsafe, e) - reordered.Append(e) - safe.Update(ovs) // this expression's outputs are safe - } - } - - if len(reordered) == n { // fixed point, could not add any expr of body - break - } - } - - // Recursively visit closures and perform the safety checks on them. - // Update the globals at each expression to include the variables that could - // be closed over. - g := globals.Copy() - for i, e := range reordered { - if i > 0 { - g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams)) - } - xform := &bodySafetyTransformer{ - builtins: builtins, - arity: arity, - current: e, - globals: g, - unsafe: unsafe, - } - NewGenericVisitor(xform.Visit).Walk(e) - } - - return reordered, unsafe -} - -type bodySafetyTransformer struct { - builtins map[string]*Builtin - arity func(Ref) int - current *Expr - globals VarSet - unsafe unsafeVars -} - -func (xform *bodySafetyTransformer) Visit(x interface{}) bool { - switch term := x.(type) { - case *Term: - switch x := term.Value.(type) { - case *object: - cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - NewGenericVisitor(xform.Visit).Walk(kcpy) - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return kcpy, vcpy, nil - }) - term.Value = cpy - return true - case *set: - cpy, _ := x.Map(func(v *Term) (*Term, error) { - vcpy := v.Copy() - NewGenericVisitor(xform.Visit).Walk(vcpy) - return vcpy, nil - }) - term.Value = cpy - return true - case *ArrayComprehension: - xform.reorderArrayComprehensionSafety(x) - return true - case *ObjectComprehension: - xform.reorderObjectComprehensionSafety(x) - return true - case *SetComprehension: - xform.reorderSetComprehensionSafety(x) - return true - } - case *Expr: - if ev, ok := term.Terms.(*Every); ok { - xform.globals.Update(ev.KeyValueVars()) - ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) - return true - } - } - return false -} - -func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { - bv := body.Vars(SafetyCheckVisitorParams) - bv.Update(xform.globals) - uv := tv.Diff(bv) - for v := range uv { - xform.unsafe.Add(xform.current, v) - } - - r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) - if len(u) == 0 { - return r - } - - xform.unsafe.Update(u) - return body -} - -func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { - ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) -} - -func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { - tv := oc.Key.Vars() - tv.Update(oc.Value.Vars()) - oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) -} - -func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { - sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) -} - -// unsafeVarsInClosures collects vars that are contained in closures within -// this expression. -func unsafeVarsInClosures(e *Expr) VarSet { - vs := VarSet{} - WalkClosures(e, func(x interface{}) bool { - vis := &VarVisitor{vars: vs} - if ev, ok := x.(*Every); ok { - vis.Walk(ev.Body) - return true - } - vis.Walk(x) - return true - }) - return vs -} - -// OutputVarsFromBody returns all variables which are the "output" for -// the given body. For safety checks this means that they would be -// made safe by the body. -func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { - return outputVarsForBody(body, c.GetArity, safe) -} - -func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { - o := safe.Copy() - for _, e := range body { - o.Update(outputVarsForExpr(e, arity, o)) - } - return o.Diff(safe) -} - -// OutputVarsFromExpr returns all variables which are the "output" for -// the given expression. For safety checks this means that they would be -// made safe by the expr. -func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { - return outputVarsForExpr(expr, c.GetArity, safe) -} - -func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet { - - // Negated expressions must be safe. - if expr.Negated { - return VarSet{} - } - - // With modifier inputs must be safe. - for _, with := range expr.With { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(with) - vars := vis.Vars() - unsafe := vars.Diff(safe) - if len(unsafe) > 0 { - return VarSet{} - } - } - - switch terms := expr.Terms.(type) { - case *Term: - return outputVarsForTerms(expr, safe) - case []*Term: - if expr.IsEquality() { - return outputVarsForExprEq(expr, safe) - } - - operator, ok := terms[0].Value.(Ref) - if !ok { - return VarSet{} - } - - ar := arity(operator) - if ar < 0 { - return VarSet{} - } - - return outputVarsForExprCall(expr, ar, safe, terms) - case *Every: - return outputVarsForTerms(terms.Domain, safe) - default: - panic("illegal expression") - } -} - -func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet { - - if !validEqAssignArgCount(expr) { - return safe - } - - output := outputVarsForTerms(expr, safe) - output.Update(safe) - output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) - - return output.Diff(safe) -} - -func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet { - - output := outputVarsForTerms(expr, safe) - - numInputTerms := arity + 1 - if numInputTerms >= len(terms) { - return output - } - - params := VarVisitorParams{ - SkipClosures: true, - SkipSets: true, - SkipObjectKeys: true, - SkipRefHead: true, - } - vis := NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[:numInputTerms])) - unsafe := vis.Vars().Diff(output).Diff(safe) - - if len(unsafe) > 0 { - return VarSet{} - } - - vis = NewVarVisitor().WithParams(params) - vis.Walk(Args(terms[numInputTerms:])) - output.Update(vis.vars) - return output -} - -func outputVarsForTerms(expr interface{}, safe VarSet) VarSet { - output := VarSet{} - WalkTerms(expr, func(x *Term) bool { - switch r := x.Value.(type) { - case *SetComprehension, *ArrayComprehension, *ObjectComprehension: - return true - case Ref: - if !isRefSafe(r, safe) { - return true - } - output.Update(r.OutputVars()) - return false - } - return false - }) - return output -} - -type equalityFactory struct { - gen *localVarGenerator -} - -func newEqualityFactory(gen *localVarGenerator) *equalityFactory { - return &equalityFactory{gen} -} - -func (f *equalityFactory) Generate(other *Term) *Expr { - term := NewTerm(f.gen.Generate()).SetLocation(other.Location) - expr := Equality.Expr(term, other) - expr.Generated = true - expr.Location = other.Location - return expr -} - -type localVarGenerator struct { - exclude VarSet - suffix string - next int -} - -func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - for _, key := range sorted { - vis.Walk(modules[key]) - } - return &localVarGenerator{exclude: exclude, next: 0} -} - -func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator { - exclude := NewVarSet() - vis := &VarVisitor{vars: exclude} - vis.Walk(node) - return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0} -} - -func (l *localVarGenerator) Generate() Var { - for { - result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__") - l.next++ - if !l.exclude.Contains(result) { - return result - } - } -} - -func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { - - globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports - - // Populate globals with exports within the package. - for _, ref := range rules { - v := ref[0].Value.(Var) - globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} - } - - // Populate globals with imports. - for _, imp := range imports { - path := imp.Path.Value.(Ref) - if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { - continue // ignore future and rego imports - } - globals[imp.Name()] = &usedRef{ref: path} - } - - return globals -} - -func requiresEval(x *Term) bool { - if x == nil { - return false - } - return ContainsRefs(x) || ContainsComprehensions(x) -} - -func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { - - r := Ref{} - for i, x := range ref { - switch v := x.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(x.Location) - } - if i == 0 { - r = cpy - } else { - r = append(r, NewTerm(cpy).SetLocation(x.Location)) - } - g.used = true - } else { - r = append(r, x) - } - case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - r = append(r, resolveRefsInTerm(globals, ignore, x)) - default: - r = append(r, x) - } - } - - return r -} - -type usedRef struct { - ref Ref - used bool -} - -func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { - ignore := &declaredVarStack{} - - vars := NewVarSet() - var vis *GenericVisitor - var err error - - // Walk args to collect vars and transform body so that callers can shadow - // root documents. - vis = NewGenericVisitor(func(x interface{}) bool { - if err != nil { - return true - } - switch x := x.(type) { - case Var: - vars.Add(x) - - // Object keys cannot be pattern matched so only walk values. - case *object: - x.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - - // Skip terms that could contain vars that cannot be pattern matched. - case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - return true - - case *Term: - if _, ok := x.Value.(Ref); ok { - if RootDocumentRefs.Contains(x) { - // We could support args named input, data, etc. however - // this would require rewriting terms in the head and body. - // Preventing root document shadowing is simpler, and - // arguably, will prevent confusing names from being used. - // NOTE: this check is also performed as part of strict-mode in - // checkRootDocumentOverrides. - err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) - return true - } - } - } - return false - }) - - vis.Walk(rule.Head.Args) - - if err != nil { - return err - } - - ignore.Push(vars) - ignore.Push(declaredVars(rule.Body)) - - ref := rule.Head.Ref() - for i := 1; i < len(ref); i++ { - ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) - } - if rule.Head.Key != nil { - rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) - } - - if rule.Head.Value != nil { - rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) - } - - rule.Body = resolveRefsInBody(globals, ignore, rule.Body) - return nil -} - -func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { - r := make([]*Expr, 0, len(body)) - for _, expr := range body { - r = append(r, resolveRefsInExpr(globals, ignore, expr)) - } - return r -} - -func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { - cpy := *expr - switch ts := expr.Terms.(type) { - case *Term: - cpy.Terms = resolveRefsInTerm(globals, ignore, ts) - case []*Term: - buf := make([]*Term, len(ts)) - for i := 0; i < len(ts); i++ { - buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) - } - cpy.Terms = buf - case *SomeDecl: - if val, ok := ts.Symbols[0].Value.(Call); ok { - cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}} - } - case *Every: - locals := NewVarSet() - if ts.Key != nil { - locals.Update(ts.Key.Vars()) - } - locals.Update(ts.Value.Vars()) - ignore.Push(locals) - cpy.Terms = &Every{ - Key: ts.Key.Copy(), // TODO(sr): do more? - Value: ts.Value.Copy(), // TODO(sr): do more? - Domain: resolveRefsInTerm(globals, ignore, ts.Domain), - Body: resolveRefsInBody(globals, ignore, ts.Body), - } - ignore.Pop() - } - for _, w := range cpy.With { - w.Target = resolveRefsInTerm(globals, ignore, w.Target) - w.Value = resolveRefsInTerm(globals, ignore, w.Value) - } - return &cpy -} - -func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { - switch v := term.Value.(type) { - case Var: - if g, ok := globals[v]; ok && !ignore.Contains(v) { - cpy := g.ref.Copy() - for i := range cpy { - cpy[i].SetLocation(term.Location) - } - g.used = true - return NewTerm(cpy).SetLocation(term.Location) - } - return term - case Ref: - fqn := resolveRef(globals, ignore, v) - cpy := *term - cpy.Value = fqn - return &cpy - case *object: - cpy := *term - cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { - k = resolveRefsInTerm(globals, ignore, k) - v = resolveRefsInTerm(globals, ignore, v) - return k, v, nil - }) - return &cpy - case *Array: - cpy := *term - cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) - return &cpy - case Call: - cpy := *term - cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) - return &cpy - case Set: - s, _ := v.Map(func(e *Term) (*Term, error) { - return resolveRefsInTerm(globals, ignore, e), nil - }) - cpy := *term - cpy.Value = s - return &cpy - case *ArrayComprehension: - ac := &ArrayComprehension{} - ignore.Push(declaredVars(v.Body)) - ac.Term = resolveRefsInTerm(globals, ignore, v.Term) - ac.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = ac - ignore.Pop() - return &cpy - case *ObjectComprehension: - oc := &ObjectComprehension{} - ignore.Push(declaredVars(v.Body)) - oc.Key = resolveRefsInTerm(globals, ignore, v.Key) - oc.Value = resolveRefsInTerm(globals, ignore, v.Value) - oc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = oc - ignore.Pop() - return &cpy - case *SetComprehension: - sc := &SetComprehension{} - ignore.Push(declaredVars(v.Body)) - sc.Term = resolveRefsInTerm(globals, ignore, v.Term) - sc.Body = resolveRefsInBody(globals, ignore, v.Body) - cpy := *term - cpy.Value = sc - ignore.Pop() - return &cpy - default: - return term - } -} - -func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { - cpy := make([]*Term, terms.Len()) - for i := 0; i < terms.Len(); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) - } - return cpy -} - -func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { - cpy := make([]*Term, len(terms)) - for i := 0; i < len(terms); i++ { - cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) - } - return cpy -} - -type declaredVarStack []VarSet - -func (s declaredVarStack) Contains(v Var) bool { - for i := len(s) - 1; i >= 0; i-- { - if _, ok := s[i][v]; ok { - return ok - } - } - return false -} - -func (s declaredVarStack) Add(v Var) { - s[len(s)-1].Add(v) -} - -func (s *declaredVarStack) Push(vs VarSet) { - *s = append(*s, vs) -} - -func (s *declaredVarStack) Pop() { - curr := *s - *s = curr[:len(curr)-1] -} - -func declaredVars(x interface{}) VarSet { - vars := NewVarSet() - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *Expr: - if x.IsAssignment() && validEqAssignArgCount(x) { - WalkVars(x.Operand(0), func(v Var) bool { - vars.Add(v) - return false - }) - } else if decl, ok := x.Terms.(*SomeDecl); ok { - for i := range decl.Symbols { - switch val := decl.Symbols[i].Value.(type) { - case Var: - vars.Add(val) - case Call: - args := val[1:] - if len(args) == 3 { // some x, y in xs - WalkVars(args[1], func(v Var) bool { - vars.Add(v) - return false - }) - } - // some x in xs - WalkVars(args[0], func(v Var) bool { - vars.Add(v) - return false - }) - } - } - } - case *ArrayComprehension, *SetComprehension, *ObjectComprehension: - return true - } - return false - }) - vis.Walk(x) - return vars -} - -// rewriteComprehensionTerms will rewrite comprehensions so that the term part -// is bound to a variable in the body. This allows any type of term to be used -// in the term part (even if the term requires evaluation.) -// -// For instance, given the following comprehension: -// -// [x[0] | x = y[_]; y = [1,2,3]] -// -// The comprehension would be rewritten as: -// -// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] -func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) { - return TransformComprehensions(node, func(x interface{}) (Value, error) { - switch x := x.(type) { - case *ArrayComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *SetComprehension: - if requiresEval(x.Term) { - expr := f.Generate(x.Term) - x.Term = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - case *ObjectComprehension: - if requiresEval(x.Key) { - expr := f.Generate(x.Key) - x.Key = expr.Operand(0) - x.Body.Append(expr) - } - if requiresEval(x.Value) { - expr := f.Generate(x.Value) - x.Value = expr.Operand(0) - x.Body.Append(expr) - } - return x, nil - } - panic("illegal type") - }) -} - -// rewriteEquals will rewrite exprs under x as unification calls instead of == -// calls. For example: -// -// data.foo == data.bar is rewritten as data.foo = data.bar -// -// This stage should only run the safety check (since == is a built-in with no -// outputs, so the inputs must not be marked as safe.) -// -// This stage is not executed by the query compiler by default because when -// callers specify == instead of = they expect to receive a true/false/undefined -// result back whereas with = the result is only ever true/undefined. For -// partial evaluation cases we do want to rewrite == to = to simplify the -// result. -func rewriteEquals(x interface{}) (modified bool) { - doubleEq := Equal.Ref() - unifyOp := Equality.Ref() - t := NewGenericTransformer(func(x interface{}) (interface{}, error) { - if x, ok := x.(*Expr); ok && x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - modified = true - x.SetOperator(NewTerm(unifyOp)) - } - } - return x, nil - }) - _, _ = Transform(t, x) // ignore error - return modified -} - -func rewriteTestEqualities(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before - // reaching the negation check. - if !expr.Negated && !expr.Generated { - switch { - case expr.IsEquality(): - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) - case expr.IsEvery(): - // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. - // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. - every := expr.Terms.(*Every) - every.Body = rewriteTestEqualities(f, every.Body) - } - } - result = appendExpr(result, expr) - } - return result -} - -func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch term.Value.(type) { - case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and -// comprehensions) are bound to vars earlier in the query. This translation -// results in eager evaluation. -// -// For instance, given the following query: -// -// foo(data.bar) = 1 -// -// The rewritten version will be: -// -// __local0__ = data.bar; foo(__local0__) = 1 -func rewriteDynamics(f *equalityFactory, body Body) Body { - result := make(Body, 0, len(body)) - for _, expr := range body { - switch { - case expr.IsEquality(): - result = rewriteDynamicsEqExpr(f, expr, result) - case expr.IsCall(): - result = rewriteDynamicsCallExpr(f, expr, result) - case expr.IsEvery(): - result = rewriteDynamicsEveryExpr(f, expr, result) - default: - result = rewriteDynamicsTermExpr(f, expr, result) - } - } - return result -} - -func appendExpr(body Body, expr *Expr) Body { - body.Append(expr) - return body -} - -func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { - if !validEqAssignArgCount(expr) { - return appendExpr(result, expr) - } - terms := expr.Terms.([]*Term) - result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) - result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) - return appendExpr(result, expr) -} - -func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { - terms := expr.Terms.([]*Term) - for i := 1; i < len(terms); i++ { - result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) - } - return appendExpr(result, expr) -} - -func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { - ev := expr.Terms.(*Every) - result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) - ev.Body = rewriteDynamics(f, ev.Body) - return appendExpr(result, expr) -} - -func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { - term := expr.Terms.(*Term) - result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) - return appendExpr(result, expr) -} - -func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - case *ArrayComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *SetComprehension: - v.Body = rewriteDynamics(f, v.Body) - case *ObjectComprehension: - v.Body = rewriteDynamics(f, v.Body) - default: - result, term = rewriteDynamicsOne(original, f, term, result) - } - return result, term -} - -func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { - switch v := term.Value.(type) { - case Ref: - for i := 1; i < len(v); i++ { - result, v[i] = rewriteDynamicsOne(original, f, v[i], result) - } - generated := f.Generate(term) - generated.With = original.With - result.Append(generated) - connectGeneratedExprs(original, generated) - return result, result[len(result)-1].Operand(0) - case *Array: - for i := 0; i < v.Len(); i++ { - var t *Term - result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) - v.set(i, t) - } - return result, term - case *object: - cpy := NewObject() - v.Foreach(func(key, value *Term) { - result, key = rewriteDynamicsOne(original, f, key, result) - result, value = rewriteDynamicsOne(original, f, value, result) - cpy.Insert(key, value) - }) - return result, NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy := NewSet() - for _, term := range v.Slice() { - var rw *Term - result, rw = rewriteDynamicsOne(original, f, term, result) - cpy.Add(rw) - } - return result, NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *SetComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - case *ObjectComprehension: - var extra *Expr - v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) - result.Append(extra) - connectGeneratedExprs(original, extra) - return result, result[len(result)-1].Operand(0) - } - return result, term -} - -func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { - body = rewriteDynamics(f, body) - generated := f.Generate(term) - generated.With = original.With - return body, generated -} - -func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { - for i := range rule.Head.Args { - support, output := expandExprTerm(gen, rule.Head.Args[i]) - for j := range support { - rule.Body.Append(support[j]) - } - rule.Head.Args[i] = output - } - if rule.Head.Key != nil { - support, output := expandExprTerm(gen, rule.Head.Key) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Key = output - } - if rule.Head.Value != nil { - support, output := expandExprTerm(gen, rule.Head.Value) - for i := range support { - rule.Body.Append(support[i]) - } - rule.Head.Value = output - } -} - -func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { - cpy := make(Body, 0, len(body)) - for i := 0; i < len(body); i++ { - for _, expr := range expandExpr(gen, body[i]) { - cpy.Append(expr) - } - } - return cpy -} - -func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { - for i := range expr.With { - extras, value := expandExprTerm(gen, expr.With[i].Value) - expr.With[i].Value = value - result = append(result, extras...) - } - switch terms := expr.Terms.(type) { - case *Term: - extras, term := expandExprTerm(gen, terms) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - expr.Terms = term - result = append(result, expr) - case []*Term: - for i := 1; i < len(terms); i++ { - var extras []*Expr - extras, terms[i] = expandExprTerm(gen, terms[i]) - connectGeneratedExprs(expr, extras...) - if len(expr.With) > 0 { - for i := range extras { - extras[i].With = expr.With - } - } - result = append(result, extras...) - } - result = append(result, expr) - case *Every: - var extras []*Expr - - term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) - eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) - eq.Generated = true - eq.With = expr.With - extras = expandExpr(gen, eq) - terms.Domain = term - - terms.Body = rewriteExprTermsInBody(gen, terms.Body) - result = append(result, extras...) - result = append(result, expr) - } - return -} - -func connectGeneratedExprs(parent *Expr, children ...*Expr) { - for _, child := range children { - child.generatedFrom = parent - parent.generates = append(parent.generates, child) - } -} - -func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { - output = term - switch v := term.Value.(type) { - case Call: - for i := 1; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - output = NewTerm(gen.Generate()).SetLocation(term.Location) - expr := v.MakeExpr(output).SetLocation(term.Location) - expr.Generated = true - support = append(support, expr) - case Ref: - support = expandExprRef(gen, v) - case *Array: - support = expandExprTermArray(gen, v) - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - extras1, expandedKey := expandExprTerm(gen, k) - extras2, expandedValue := expandExprTerm(gen, v) - support = append(support, extras1...) - support = append(support, extras2...) - return expandedKey, expandedValue, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case Set: - cpy, _ := v.Map(func(x *Term) (*Term, error) { - extras, expanded := expandExprTerm(gen, x) - support = append(support, extras...) - return expanded, nil - }) - output = NewTerm(cpy).SetLocation(term.Location) - case *ArrayComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *SetComprehension: - support, term := expandExprTerm(gen, v.Term) - for i := range support { - v.Body.Append(support[i]) - } - v.Term = term - v.Body = rewriteExprTermsInBody(gen, v.Body) - case *ObjectComprehension: - support, key := expandExprTerm(gen, v.Key) - for i := range support { - v.Body.Append(support[i]) - } - v.Key = key - support, value := expandExprTerm(gen, v.Value) - for i := range support { - v.Body.Append(support[i]) - } - v.Value = value - v.Body = rewriteExprTermsInBody(gen, v.Body) - } - return -} - -func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { - // Start by calling a normal expandExprTerm on all terms. - support = expandExprTermSlice(gen, v) - - // Rewrite references in order to support indirect references. We rewrite - // e.g. - // - // [1, 2, 3][i] - // - // to - // - // __local_var = [1, 2, 3] - // __local_var[i] - // - // to support these. This only impacts the reference subject, i.e. the - // first item in the slice. - var subject = v[0] - switch subject.Value.(type) { - case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: - f := newEqualityFactory(gen) - assignToLocal := f.Generate(subject) - support = append(support, assignToLocal) - v[0] = assignToLocal.Operand(0) - } - return -} - -func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { - for i := 0; i < arr.Len(); i++ { - extras, v := expandExprTerm(gen, arr.Elem(i)) - arr.set(i, v) - support = append(support, extras...) - } - return -} - -func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { - for i := 0; i < len(v); i++ { - var extras []*Expr - extras, v[i] = expandExprTerm(gen, v[i]) - support = append(support, extras...) - } - return -} - -type localDeclaredVars struct { - vars []*declaredVarSet - - // rewritten contains a mapping of *all* user-defined variables - // that have been rewritten whereas vars contains the state - // from the current query (not any nested queries, and all vars - // seen). - rewritten map[Var]Var - - // indicates if an assignment (:= operator) has been seen *ever* - assignment bool -} - -type varOccurrence int - -const ( - newVar varOccurrence = iota - argVar - seenVar - assignedVar - declaredVar -) - -type declaredVarSet struct { - vs map[Var]Var - reverse map[Var]Var - occurrence map[Var]varOccurrence - count map[Var]int -} - -func newDeclaredVarSet() *declaredVarSet { - return &declaredVarSet{ - vs: map[Var]Var{}, - reverse: map[Var]Var{}, - occurrence: map[Var]varOccurrence{}, - count: map[Var]int{}, - } -} - -func newLocalDeclaredVars() *localDeclaredVars { - return &localDeclaredVars{ - vars: []*declaredVarSet{newDeclaredVarSet()}, - rewritten: map[Var]Var{}, - } -} - -func (s *localDeclaredVars) Copy() *localDeclaredVars { - stack := &localDeclaredVars{ - vars: []*declaredVarSet{}, - rewritten: map[Var]Var{}, - } - - for i := range s.vars { - stack.vars = append(stack.vars, newDeclaredVarSet()) - for k, v := range s.vars[i].vs { - stack.vars[0].vs[k] = v - } - for k, v := range s.vars[i].reverse { - stack.vars[0].reverse[k] = v - } - for k, v := range s.vars[i].count { - stack.vars[0].count[k] = v - } - for k, v := range s.vars[i].occurrence { - stack.vars[0].occurrence[k] = v - } - } - - for k, v := range s.rewritten { - stack.rewritten[k] = v - } - - return stack -} - -func (s *localDeclaredVars) Push() { - s.vars = append(s.vars, newDeclaredVarSet()) -} - -func (s *localDeclaredVars) Pop() *declaredVarSet { - sl := s.vars - curr := sl[len(sl)-1] - s.vars = sl[:len(sl)-1] - return curr -} - -func (s localDeclaredVars) Peek() *declaredVarSet { - return s.vars[len(s.vars)-1] -} - -func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { - elem := s.vars[len(s.vars)-1] - elem.vs[x] = y - elem.reverse[y] = x - elem.occurrence[x] = occurrence - - elem.count[x] = 1 - - // If the variable has been rewritten (where x != y, with y being - // the generated value), store it in the map of rewritten vars. - // Assume that the generated values are unique for the compilation. - if !x.Equal(y) { - s.rewritten[y] = x - } -} - -func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if y, ok = s.vars[i].vs[x]; ok { - return - } - } - return -} - -// Occurrence returns a flag that indicates whether x has occurred in the -// current scope. -func (s localDeclaredVars) Occurrence(x Var) varOccurrence { - return s.vars[len(s.vars)-1].occurrence[x] -} - -// GlobalOccurrence returns a flag that indicates whether x has occurred in the -// global scope. -func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { - for i := len(s.vars) - 1; i >= 0; i-- { - if occ, ok := s.vars[i].occurrence[x]; ok { - return occ, true - } - } - return newVar, false -} - -// Seen marks x as seen by incrementing its counter -func (s localDeclaredVars) Seen(x Var) { - for i := len(s.vars) - 1; i >= 0; i-- { - dvs := s.vars[i] - if c, ok := dvs.count[x]; ok { - dvs.count[x] = c + 1 - return - } - } - - s.vars[len(s.vars)-1].count[x] = 1 -} - -// Count returns how many times x has been seen -func (s localDeclaredVars) Count(x Var) int { - for i := len(s.vars) - 1; i >= 0; i-- { - if c, ok := s.vars[i].count[x]; ok { - return c - } - } - - return 0 -} - -// rewriteLocalVars rewrites bodies to remove assignment/declaration -// expressions. For example: -// -// a := 1; p[a] -// -// Is rewritten to: -// -// __local0__ = 1; p[__local0__] -// -// During rewriting, assignees are validated to prevent use before declaration. -func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { - var errs Errors - body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) - return body, stack.Peek().vs, errs -} - -func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { - - var cpy Body - - for i := range body { - var expr *Expr - switch { - case body[i].IsAssignment(): - stack.assignment = true - expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) - case body[i].IsSome(): - expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) - case body[i].IsEvery(): - expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) - default: - expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) - } - if expr != nil { - cpy.Append(expr) - } - } - - // If the body only contained a var statement it will be empty at this - // point. Append true to the body to ensure that it's non-empty (zero length - // bodies are not supported.) - if len(cpy) == 0 { - cpy.Append(NewExpr(BooleanTerm(true))) - } - - errs = checkUnusedAssignedVars(body, stack, used, errs, strict) - return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) -} - -func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { - - if !strict || len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - unused := NewVarSet() - - for v, occ := range dvs.occurrence { - // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in - // the same, or nested, scope to be counted as used. - if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { - unused.Add(dvs.vs[v]) - } - } - - rewrittenUsed := NewVarSet() - for v := range used { - if gv, ok := stack.Declared(v); ok { - rewrittenUsed.Add(gv) - } else { - rewrittenUsed.Add(v) - } - } - - unused = unused.Diff(rewrittenUsed) - - for _, gv := range unused.Sorted() { - found := false - for i := range body { - if body[i].Vars(VarVisitorParams{}).Contains(gv) { - errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv])) - found = true - break - } - } - if !found { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv])) - } - } - - return errs -} - -func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { - - // NOTE(tsandall): Do not generate more errors if there are existing - // declaration errors. - if len(errs) > 0 { - return errs - } - - dvs := stack.Peek() - declared := NewVarSet() - - for v, occ := range dvs.occurrence { - if occ == declaredVar { - declared.Add(dvs.vs[v]) - } - } - - bodyvars := cpy.Vars(VarVisitorParams{}) - - for v := range used { - if gv, ok := stack.Declared(v); ok { - bodyvars.Add(gv) - } else { - bodyvars.Add(v) - } - } - - unused := declared.Diff(bodyvars).Diff(used) - - for _, gv := range unused.Sorted() { - rv := dvs.reverse[gv] - if !rv.IsGenerated() { - // Scan through body exprs, looking for a match between the - // bad var's original name, and each expr's declared vars. - foundUnusedVarByName := false - for i := range body { - varsDeclaredInExpr := declaredVars(body[i]) - if varsDeclaredInExpr.Contains(dvs.reverse[gv]) { - // TODO(philipc): Clean up the offset logic here when the parser - // reports more accurate locations. - errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv])) - foundUnusedVarByName = true - break - } - } - // Default error location returned. - if !foundUnusedVarByName { - errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv])) - } - } - } - - return errs -} - -func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - every := e.Terms.(*Every) - - errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) - - stack.Push() - defer stack.Pop() - - // if the key exists, rewrite - if every.Key != nil { - if v := every.Key.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Key.Value = gv - } - } else { // if the key doesn't exist, add dummy local - every.Key = NewTerm(g.Generate()) - } - - // value is always present - if v := every.Value.Value.(Var); !v.IsWildcard() { - gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) - if err != nil { - return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) - } - every.Value.Value = gv - } - - used := NewVarSet() - every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) - - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) -} - -func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - e := expr.Copy() - decl := e.Terms.(*SomeDecl) - for i := range decl.Symbols { - switch v := decl.Symbols[i].Value.(type) { - case Var: - if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - case Call: - var key, val, container *Term - switch len(v) { - case 4: // member3 - key = v[1] - val = v[2] - container = v[3] - case 3: // member - key = NewTerm(g.Generate()) - val = v[1] - container = v[2] - } - - var rhs *Term - switch c := container.Value.(type) { - case Ref: - rhs = RefTerm(append(c, key)...) - default: - rhs = RefTerm(container, key) - } - e.Terms = []*Term{ - RefTerm(VarTerm(Equality.Name)), val, rhs, - } - - for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() { - if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { - return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) - } - } - return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) - } - } - return nil, errs -} - -func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - vis := NewGenericVisitor(func(x interface{}) bool { - var stop bool - switch x := x.(type) { - case *Term: - stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) - case *With: - errs = rewriteDeclaredVarsInTermRecursive(g, stack, x.Value, errs, strict) - stop = true - } - return stop - }) - vis.Walk(expr) - return expr, errs -} - -func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { - - if expr.Negated { - errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) - return expr, errs - } - - numErrsBefore := len(errs) - - if !validEqAssignArgCount(expr) { - return expr, errs - } - - // Rewrite terms on right hand side capture seen vars and recursively - // process comprehensions before left hand side is processed. Also - // rewrite with modifier. - errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) - - for _, w := range expr.With { - errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) - } - - // Rewrite vars on left hand side with unique names. Catch redeclaration - // and invalid term types here. - var vis func(t *Term) bool - - vis = func(t *Term) bool { - switch v := t.Value.(type) { - case Var: - if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - case *Array: - return false - case *object: - v.Foreach(func(_, v *Term) { - WalkTerms(v, vis) - }) - return true - case Ref: - if RootDocumentRefs.Contains(t) { - if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { - errs = append(errs, NewError(CompileErr, t.Location, err.Error())) - } else { - t.Value = gv - } - return true - } - } - errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", TypeName(t.Value))) - return true - } - - WalkTerms(expr.Operand(0), vis) - - if len(errs) == numErrsBefore { - loc := expr.Operator()[0].Location - expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) - } - - return expr, errs -} - -func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { - switch v := term.Value.(type) { - case Var: - if gv, ok := stack.Declared(v); ok { - term.Value = gv - stack.Seen(v) - } else if stack.Occurrence(v) == newVar { - stack.Insert(v, v, seenVar) - } - case Ref: - if RootDocumentRefs.Contains(term) { - x := v[0].Value.(Var) - if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { - gv, _ := stack.Declared(x) - term.Value = gv - } - - return true, errs - } - return false, errs - case Call: - ref := v[0] - WalkVars(ref, func(v Var) bool { - if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { - // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. - errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) - return true - } - return false - }) - return false, errs - case *object: - cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { - kcpy := k.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) - return kcpy, v, nil - }) - term.Value = cpy - case Set: - cpy, _ := v.Map(func(elem *Term) (*Term, error) { - elemcpy := elem.Copy() - errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) - return elemcpy, nil - }) - term.Value = cpy - case *ArrayComprehension: - errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) - case *SetComprehension: - errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) - case *ObjectComprehension: - errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) - default: - return false, errs - } - return true, errs -} - -func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { - WalkNodes(term, func(n Node) bool { - var stop bool - switch n := n.(type) { - case *With: - errs = rewriteDeclaredVarsInTermRecursive(g, stack, n.Value, errs, strict) - stop = true - case *Term: - stop, errs = rewriteDeclaredVarsInTerm(g, stack, n, errs, strict) - } - return stop - }) - return errs -} - -func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Term.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { - used := NewVarSet() - used.Update(v.Key.Vars()) - used.Update(v.Value.Vars()) - - stack.Push() - v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) - errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) - stack.Pop() - return errs -} - -func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { - switch stack.Occurrence(v) { - case seenVar: - return gv, fmt.Errorf("var %v referenced above", v) - case assignedVar: - return gv, fmt.Errorf("var %v assigned above", v) - case declaredVar: - return gv, fmt.Errorf("var %v declared above", v) - case argVar: - return gv, fmt.Errorf("arg %v redeclared", v) - } - gv = g.Generate() - stack.Insert(v, gv, occ) - return -} - -// rewriteWithModifiersInBody will rewrite the body so that with modifiers do -// not contain terms that require evaluation as values. If this function -// encounters an invalid with modifier target then it will raise an error. -func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { - var result Body - for i := range body { - exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) - if err != nil { - return nil, err - } - if len(exprs) > 0 { - for _, expr := range exprs { - result.Append(expr) - } - } else { - result.Append(body[i]) - } - } - return result, nil -} - -func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { - - var result []*Expr - for i := range expr.With { - eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) - if err != nil { - return nil, err - } - - if eval { - eq := f.Generate(expr.With[i].Value) - result = append(result, eq) - expr.With[i].Value = eq.Operand(0) - } - } - - return append(result, expr), nil -} - -func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { - target, value := expr.With[i].Target, expr.With[i].Value - - // Ensure that values that are built-ins are rewritten to Ref (not Var) - if v, ok := value.Value.(Var); ok { - if _, ok := c.builtins[v.String()]; ok { - value.Value = Ref([]*Term{NewTerm(v)}) - } - } - isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) - if err != nil { - return false, err - } - - isAllowedUnknownFuncCall := false - if c.allowUndefinedFuncCalls { - switch target.Value.(type) { - case Ref, Var: - isAllowedUnknownFuncCall = true - } - } - - switch { - case isDataRef(target): - ref := target.Value.(Ref) - targetNode := c.RuleTree - for i := 0; i < len(ref)-1; i++ { - child := targetNode.Child(ref[i].Value) - if child == nil { - break - } else if len(child.Values) > 0 { - return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") - } - targetNode = child - } - - if targetNode != nil { - // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated - // TypeEnv yet -- so we have to make do with this check to see if the replacement - // target is a function. It's probably wrong for arity-0 functions, but those are - // and edge case anyways. - if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { - for _, v := range child.Values { - if len(v.(*Rule).Head.Args) > 0 { - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - } - } - } - } - - // If the with-value is a ref to a function, but not a call, we can't rewrite it - if r, ok := value.Value.(Ref); ok { - // TODO: check that target ref doesn't exist? - if valueNode := c.RuleTree.Find(r); valueNode != nil { - for _, v := range valueNode.Values { - if len(v.(*Rule).Head.Args) > 0 { - return false, nil - } - } - } - } - case isInputRef(target): // ok, valid - case isBuiltinRefOrVar: - - // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) - // are rewritten to their proper Ref convention - if v, ok := target.Value.(Var); ok { - target.Value = Ref([]*Term{NewTerm(v)}) - } - - targetRef := target.Value.(Ref) - bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this - if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { - return false, err - } - - if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { - return false, err // err may be nil - } - case isAllowedUnknownFuncCall: - // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. - return false, nil - default: - return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) - } - return requiresEval(value), nil -} - -func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { - switch bi.Name { - case Equality.Name, - RegoMetadataChain.Name, - RegoMetadataRule.Name: - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) - } - - switch { - case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): - return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) - - case bi.Relation: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") - - case bi.Decl.Result() == nil: - return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") - } - return nil -} - -func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { - if v, ok := value.Value.(Ref); ok { - if ruleTree.Find(v) != nil { // ref exists in rule tree - return true, nil - } - } - return isBuiltinRefOrVar(bs, unsafeMap, value) -} - -func isInputRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(InputRootRef) { - return true - } - } - return false -} - -func isDataRef(term *Term) bool { - if ref, ok := term.Value.(Ref); ok { - if ref.HasPrefix(DefaultRootRef) { - return true - } - } - return false -} - -func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { - switch v := term.Value.(type) { - case Ref, Var: - if _, ok := unsafeBuiltinsMap[v.String()]; ok { - return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) - } - _, ok := bs[v.String()] - return ok, nil - } - return false, nil -} - -func isVirtual(node *TreeNode, ref Ref) bool { - for i := range ref { - child := node.Child(ref[i].Value) - if child == nil { - return false - } else if len(child.Values) > 0 { - return true - } - node = child - } - return true -} - -func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { - - if len(unsafe) == 0 { - return - } - - for _, pair := range unsafe.Vars() { - v := pair.Var - if w, ok := rewritten[v]; ok { - v = w - } - if !v.IsGenerated() { - if _, ok := futureKeywords[string(v)]; ok { - result = append(result, NewError(UnsafeVarErr, pair.Loc, - "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) - continue - } - result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) - } - } - - if len(result) > 0 { - return - } - - // If the expression contains unsafe generated variables, report which - // expressions are unsafe instead of the variables that are unsafe (since - // the latter are not meaningful to the user.) - pairs := unsafe.Slice() - - sort.Slice(pairs, func(i, j int) bool { - return pairs[i].Expr.Location.Compare(pairs[j].Expr.Location) < 0 - }) - - // Report at most one error per generated variable. - seen := NewVarSet() - - for _, expr := range pairs { - before := len(seen) - for v := range expr.Vars { - if v.IsGenerated() { - seen.Add(v) - } - } - if len(seen) > before { - result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) - } - } - - return -} - -func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors { - errs := make(Errors, 0) - WalkExprs(node, func(x *Expr) bool { - if x.IsCall() { - operator := x.Operator().String() - if _, ok := unsafeBuiltinsMap[operator]; ok { - errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) - } - } - return false - }) - return errs -} - -func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { - return func(node Ref) Ref { - i, _ := TransformVars(node, func(v Var) (Value, error) { - for _, m := range vars { - if u, ok := m[v]; ok { - return u, nil - } - } - return v, nil - }) - return i.(Ref) - } -} - -// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location -// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it -// public in the ast package, the compile package could take it from there, but it would also -// increase our public interface. Let's reconsider if we need it in a third place. -type refSet struct { - s []Ref -} - -func newRefSet(x ...Ref) *refSet { - result := &refSet{} - for i := range x { - result.AddPrefix(x[i]) - } - return result -} - -// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. -func (rs *refSet) ContainsPrefix(r Ref) bool { - for i := range rs.s { - if r.HasPrefix(rs.s[i]) { - return true - } - } - return false -} - -// AddPrefix inserts r into the set if r is not prefixed by any existing -// refs in the set. If any existing refs are prefixed by r, those existing -// refs are removed. -func (rs *refSet) AddPrefix(r Ref) { - if rs.ContainsPrefix(r) { - return - } - cpy := []Ref{r} - for i := range rs.s { - if !rs.s[i].HasPrefix(r) { - cpy = append(cpy, rs.s[i]) - } - } - rs.s = cpy -} - -// Sorted returns a sorted slice of terms for refs in the set. -func (rs *refSet) Sorted() []*Term { - terms := make([]*Term, len(rs.s)) - for i := range rs.s { - terms[i] = NewTerm(rs.s[i]) - } - sort.Slice(terms, func(i, j int) bool { - return terms[i].Value.Compare(terms[j].Value) < 0 - }) - return terms +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return v1.OutputVarsFromExpr(c, expr, safe) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go index dd48884f9..37ede329e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/compilehelper.go @@ -4,41 +4,29 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // CompileModules takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModules(modules map[string]string) (*Compiler, error) { - return CompileModulesWithOpt(modules, CompileOpts{}) + return CompileModulesWithOpt(modules, CompileOpts{ + ParserOptions: ParserOptions{ + RegoVersion: DefaultRegoVersion, + }, + }) } // CompileOpts defines a set of options for the compiler. -type CompileOpts struct { - EnablePrintStatements bool - ParserOptions ParserOptions -} +type CompileOpts = v1.CompileOpts // CompileModulesWithOpt takes a set of Rego modules represented as strings and // compiles them for evaluation. The keys of the map are used as filenames. func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { - - parsed := make(map[string]*Module, len(modules)) - - for f, module := range modules { - var pm *Module - var err error - if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { - return nil, err - } - parsed[f] = pm - } - - compiler := NewCompiler().WithEnablePrintStatements(opts.EnablePrintStatements) - compiler.Compile(parsed) - - if compiler.Failed() { - return nil, compiler.Errors + if opts.ParserOptions.RegoVersion == RegoUndefined { + opts.ParserOptions.RegoVersion = DefaultRegoVersion } - return compiler, nil + return v1.CompileModulesWithOpt(modules, opts) } // MustCompileModules compiles a set of Rego modules represented as strings. If diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/conflicts.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/conflicts.go index c2713ad57..10edce382 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/conflicts.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/conflicts.go @@ -5,49 +5,11 @@ package ast import ( - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // CheckPathConflicts returns a set of errors indicating paths that // are in conflict with the result of the provided callable. func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { - var errs Errors - - root := c.RuleTree.Child(DefaultRootDocument.Value) - if root == nil { - return nil - } - - for _, node := range root.Children { - errs = append(errs, checkDocumentConflicts(node, exists, nil)...) - } - - return errs -} - -func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { - - switch key := node.Key.(type) { - case String: - path = append(path, string(key)) - default: // other key types cannot conflict with data - return nil - } - - if len(node.Values) > 0 { - s := strings.Join(path, "/") - if ok, err := exists(path); err != nil { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} - } else if ok { - return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} - } - } - - var errs Errors - - for _, child := range node.Children { - errs = append(errs, checkDocumentConflicts(child, exists, path)...) - } - - return errs + return v1.CheckPathConflicts(c, exists) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/doc.go index 62b04e301..ba974e5ba 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/doc.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/doc.go @@ -1,36 +1,8 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. -// -// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. -// -// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: -// -// Module -// | -// +--- Package (Reference) -// | -// +--- Imports -// | | -// | +--- Import (Term) -// | -// +--- Rules -// | -// +--- Rule -// | -// +--- Head -// | | -// | +--- Name (Variable) -// | | -// | +--- Key (Term) -// | | -// | +--- Value (Term) -// | -// +--- Body -// | -// +--- Expression (Term | Terms | Variable Declaration) -// -// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package ast diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/env.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/env.go index c767aafef..ef0ccf89c 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/env.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/env.go @@ -5,522 +5,8 @@ package ast import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeEnv contains type info for static analysis such as type checking. -type TypeEnv struct { - tree *typeTreeNode - next *TypeEnv - newChecker func() *typeChecker -} - -// newTypeEnv returns an empty TypeEnv. The constructor is not exported because -// type environments should only be created by the type checker. -func newTypeEnv(f func() *typeChecker) *TypeEnv { - return &TypeEnv{ - tree: newTypeTree(), - newChecker: f, - } -} - -// Get returns the type of x. -func (env *TypeEnv) Get(x interface{}) types.Type { - - if term, ok := x.(*Term); ok { - x = term.Value - } - - switch x := x.(type) { - - // Scalars. - case Null: - return types.NewNull() - case Boolean: - return types.NewBoolean() - case Number: - return types.NewNumber() - case String: - return types.NewString() - - // Composites. - case *Array: - static := make([]types.Type, x.Len()) - for i := range static { - tpe := env.Get(x.Elem(i).Value) - static[i] = tpe - } - - var dynamic types.Type - if len(static) == 0 { - dynamic = types.A - } - - return types.NewArray(static, dynamic) - - case *lazyObj: - return env.Get(x.force()) - case *object: - static := []*types.StaticProperty{} - var dynamic *types.DynamicProperty - - x.Foreach(func(k, v *Term) { - if IsConstant(k.Value) { - kjson, err := JSON(k.Value) - if err == nil { - tpe := env.Get(v) - static = append(static, types.NewStaticProperty(kjson, tpe)) - return - } - } - // Can't handle it as a static property, fallback to dynamic - typeK := env.Get(k.Value) - typeV := env.Get(v.Value) - dynamic = types.NewDynamicProperty(typeK, typeV) - }) - - if len(static) == 0 && dynamic == nil { - dynamic = types.NewDynamicProperty(types.A, types.A) - } - - return types.NewObject(static, dynamic) - - case Set: - var tpe types.Type - x.Foreach(func(elem *Term) { - other := env.Get(elem.Value) - tpe = types.Or(tpe, other) - }) - if tpe == nil { - tpe = types.A - } - return types.NewSet(tpe) - - // Comprehensions. - case *ArrayComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewArray(nil, cpy.Get(x.Term)) - } - return nil - case *ObjectComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value))) - } - return nil - case *SetComprehension: - cpy, errs := env.newChecker().CheckBody(env, x.Body) - if len(errs) == 0 { - return types.NewSet(cpy.Get(x.Term)) - } - return nil - - // Refs. - case Ref: - return env.getRef(x) - - // Vars. - case Var: - if node := env.tree.Child(x); node != nil { - return node.Value() - } - if env.next != nil { - return env.next.Get(x) - } - return nil - - // Calls. - case Call: - return nil - - default: - panic("unreachable") - } -} - -func (env *TypeEnv) getRef(ref Ref) types.Type { - - node := env.tree.Child(ref[0].Value) - if node == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(node, ref, ref[1:]) -} - -func (env *TypeEnv) getRefFallback(ref Ref) types.Type { - - if env.next != nil { - return env.next.Get(ref) - } - - if RootDocumentNames.Contains(ref[0]) { - return types.A - } - - return nil -} - -func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { - if len(tail) == 0 { - return env.getRefRecExtent(node) - } - - if node.Leaf() { - if node.children.Len() > 0 { - if child := node.Child(tail[0].Value); child != nil { - return env.getRefRec(child, ref, tail[1:]) - } - } - return selectRef(node.Value(), tail) - } - - if !IsConstant(tail[0].Value) { - return selectRef(env.getRefRecExtent(node), tail) - } - - child := node.Child(tail[0].Value) - if child == nil { - return env.getRefFallback(ref) - } - - return env.getRefRec(child, ref, tail[1:]) -} - -func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { - - if node.Leaf() { - return node.Value() - } - - children := []*types.StaticProperty{} - - node.Children().Iter(func(k, v util.T) bool { - key := k.(Value) - child := v.(*typeTreeNode) - - tpe := env.getRefRecExtent(child) - - // NOTE(sr): Converting to Golang-native types here is an extension of what we did - // before -- only supporting strings. But since we cannot differentiate sets and arrays - // that way, we could reconsider. - switch key.(type) { - case String, Number, Boolean: // skip anything else - propKey, err := JSON(key) - if err != nil { - panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) - } - children = append(children, types.NewStaticProperty(propKey, tpe)) - } - return false - }) - - // TODO(tsandall): for now, these objects can have any dynamic properties - // because we don't have schema for base docs. Once schemas are supported - // we can improve this. - return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) -} - -func (env *TypeEnv) wrap() *TypeEnv { - cpy := *env - cpy.next = env - cpy.tree = newTypeTree() - return &cpy -} - -// typeTreeNode is used to store type information in a tree. -type typeTreeNode struct { - key Value - value types.Type - children *util.HashMap -} - -func newTypeTree() *typeTreeNode { - return &typeTreeNode{ - key: nil, - value: nil, - children: util.NewHashMap(valueEq, valueHash), - } -} - -func (n *typeTreeNode) Child(key Value) *typeTreeNode { - value, ok := n.children.Get(key) - if !ok { - return nil - } - return value.(*typeTreeNode) -} - -func (n *typeTreeNode) Children() *util.HashMap { - return n.children -} - -func (n *typeTreeNode) Get(path Ref) types.Type { - curr := n - for _, term := range path { - child, ok := curr.children.Get(term.Value) - if !ok { - return nil - } - curr = child.(*typeTreeNode) - } - return curr.Value() -} - -func (n *typeTreeNode) Leaf() bool { - return n.value != nil -} - -func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { - c, ok := n.children.Get(key) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = key - n.children.Put(key, child) - } else { - child = c.(*typeTreeNode) - } - - child.value = tpe -} - -func (n *typeTreeNode) Put(path Ref, tpe types.Type) { - curr := n - for _, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - } - - curr = child - } - curr.value = tpe -} - -// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. -// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. -// path must be ground. -func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { - curr := n - for i, term := range path { - c, ok := curr.children.Get(term.Value) - - var child *typeTreeNode - if !ok { - child = newTypeTree() - child.key = term.Value - curr.children.Put(child.key, child) - } else { - child = c.(*typeTreeNode) - - if child.value != nil && i+1 < len(path) { - // If child has an object value, merge the new value into it. - if o, ok := child.value.(*types.Object); ok { - var err error - child.value, err = insertIntoObject(o, path[i+1:], tpe, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } - } - - curr = child - } - - curr.value = mergeTypes(curr.value, tpe) - - if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { - // merge all leafs into the inserted object - leafs := curr.Leafs() - for p, t := range leafs { - var err error - curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) - if err != nil { - panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) - } - } - } -} - -// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. -// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types -// are recursively merged (using mergeTypes). -// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined -// with an types.Or, instead of being merged. -// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no -// static properties, they are merged. -// If 'a' and 'b' are different types, they are joined with an types.Or. -func mergeTypes(a, b types.Type) types.Type { - if a == nil { - return b - } - - if b == nil { - return a - } - - switch a := a.(type) { - case *types.Object: - if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { - if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { - return types.Or(a, bObj) - } - - aDynProps := a.DynamicProperties() - bDynProps := bObj.DynamicProperties() - dynProps := types.NewDynamicProperty( - types.Or(aDynProps.Key, bDynProps.Key), - mergeTypes(aDynProps.Value, bDynProps.Value)) - return types.NewObject(nil, dynProps) - } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { - // If a is an object type with no static components ... - for _, t := range bAny { - if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { - // ... and b is a types.Any containing an object with no static components, we merge them. - aDynProps := a.DynamicProperties() - tDynProps := tObj.DynamicProperties() - tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) - tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) - return bAny - } - } - } - case *types.Set: - if bSet, ok := b.(*types.Set); ok { - return types.NewSet(types.Or(a.Of(), bSet.Of())) - } - case types.Any: - if _, ok := b.(types.Any); !ok { - return mergeTypes(b, a) - } - } - - return types.Or(a, b) -} - -func (n *typeTreeNode) String() string { - b := strings.Builder{} - - if k := n.key; k != nil { - b.WriteString(k.String()) - } else { - b.WriteString("-") - } - - if v := n.value; v != nil { - b.WriteString(": ") - b.WriteString(v.String()) - } - - n.children.Iter(func(_, v util.T) bool { - if child, ok := v.(*typeTreeNode); ok { - b.WriteString("\n\t+ ") - s := child.String() - s = strings.ReplaceAll(s, "\n", "\n\t") - b.WriteString(s) - } - return false - }) - - return b.String() -} - -func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { - if len(path) == 0 { - return o, nil - } - - key := env.Get(path[0].Value) - - if len(path) == 1 { - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) - } else { - dynamicProps = types.NewDynamicProperty(key, tpe) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil - } - - child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) - if err != nil { - return nil, err - } - - var dynamicProps *types.DynamicProperty - if dp := o.DynamicProperties(); dp != nil { - dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) - } else { - dynamicProps = types.NewDynamicProperty(key, child) - } - return types.NewObject(o.StaticProperties(), dynamicProps), nil -} - -func (n *typeTreeNode) Leafs() map[*Ref]types.Type { - leafs := map[*Ref]types.Type{} - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nil, leafs) - return false - }) - return leafs -} - -func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { - nPath := append(path, NewTerm(n.key)) - if n.Leaf() { - leafs[&nPath] = n.Value() - return - } - n.children.Iter(func(_, v util.T) bool { - collectLeafs(v.(*typeTreeNode), nPath, leafs) - return false - }) -} - -func (n *typeTreeNode) Value() types.Type { - return n.value -} - -// selectConstant returns the attribute of the type referred to by the term. If -// the attribute type cannot be determined, nil is returned. -func selectConstant(tpe types.Type, term *Term) types.Type { - x, err := JSON(term.Value) - if err == nil { - return types.Select(tpe, x) - } - return nil -} - -// selectRef returns the type of the nested attribute referred to by ref. If -// the attribute type cannot be determined, nil is returned. If the ref -// contains vars or refs, then the returned type will be a union of the -// possible types. -func selectRef(tpe types.Type, ref Ref) types.Type { - - if tpe == nil || len(ref) == 0 { - return tpe - } - - head, tail := ref[0], ref[1:] - - switch head.Value.(type) { - case Var, Ref, *Array, Object, Set: - return selectRef(types.Values(tpe), tail) - default: - return selectRef(selectConstant(tpe, head), tail) - } -} +type TypeEnv = v1.TypeEnv diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/errors.go index 066dfcdd6..0cb8ee28f 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/errors.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/errors.go @@ -5,119 +5,42 @@ package ast import ( - "fmt" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Errors represents a series of errors encountered during parsing, compiling, // etc. -type Errors []*Error - -func (e Errors) Error() string { - - if len(e) == 0 { - return "no error(s)" - } - - if len(e) == 1 { - return fmt.Sprintf("1 error occurred: %v", e[0].Error()) - } - - s := make([]string, len(e)) - for i, err := range e { - s[i] = err.Error() - } - - return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) -} - -// Sort sorts the error slice by location. If the locations are equal then the -// error message is compared. -func (e Errors) Sort() { - sort.Slice(e, func(i, j int) bool { - a := e[i] - b := e[j] - - if cmp := a.Location.Compare(b.Location); cmp != 0 { - return cmp < 0 - } - - return a.Error() < b.Error() - }) -} +type Errors = v1.Errors const ( // ParseErr indicates an unclassified parse error occurred. - ParseErr = "rego_parse_error" + ParseErr = v1.ParseErr // CompileErr indicates an unclassified compile error occurred. - CompileErr = "rego_compile_error" + CompileErr = v1.CompileErr // TypeErr indicates a type error was caught. - TypeErr = "rego_type_error" + TypeErr = v1.TypeErr // UnsafeVarErr indicates an unsafe variable was found during compilation. - UnsafeVarErr = "rego_unsafe_var_error" + UnsafeVarErr = v1.UnsafeVarErr // RecursionErr indicates recursion was found during compilation. - RecursionErr = "rego_recursion_error" + RecursionErr = v1.RecursionErr ) // IsError returns true if err is an AST error with code. func IsError(code string, err error) bool { - if err, ok := err.(*Error); ok { - return err.Code == code - } - return false + return v1.IsError(code, err) } // ErrorDetails defines the interface for detailed error messages. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails // Error represents a single error caught during parsing, compiling, etc. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` - Location *Location `json:"location,omitempty"` - Details ErrorDetails `json:"details,omitempty"` -} - -func (e *Error) Error() string { - - var prefix string - - if e.Location != nil { - - if len(e.Location.File) > 0 { - prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row) - } else { - prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col) - } - } - - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - - if len(prefix) > 0 { - msg = prefix + ": " + msg - } - - if e.Details != nil { - for _, line := range e.Details.Lines() { - msg += "\n\t" + line - } - } - - return msg -} +type Error = v1.Error // NewError returns a new Error object. func NewError(code string, loc *Location, f string, a ...interface{}) *Error { - return &Error{ - Code: code, - Location: loc, - Message: fmt.Sprintf(f, a...), - } + return v1.NewError(code, loc, f, a...) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/index.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/index.go index cb0cbea32..7e80bb771 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/index.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/index.go @@ -5,904 +5,16 @@ package ast import ( - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // RuleIndex defines the interface for rule indices. -type RuleIndex interface { - - // Build tries to construct an index for the given rules. If the index was - // constructed, it returns true, otherwise false. - Build(rules []*Rule) bool - - // Lookup searches the index for rules that will match the provided - // resolver. If the resolver returns an error, it is returned via err. - Lookup(resolver ValueResolver) (*IndexResult, error) - - // AllRules traverses the index and returns all rules that will match - // the provided resolver without any optimizations (effectively with - // indexing disabled). If the resolver returns an error, it is returned - // via err. - AllRules(resolver ValueResolver) (*IndexResult, error) -} +type RuleIndex v1.RuleIndex // IndexResult contains the result of an index lookup. -type IndexResult struct { - Kind RuleKind - Rules []*Rule - Else map[*Rule][]*Rule - Default *Rule - EarlyExit bool - OnlyGroundRefs bool -} +type IndexResult = v1.IndexResult // NewIndexResult returns a new IndexResult object. func NewIndexResult(kind RuleKind) *IndexResult { - return &IndexResult{ - Kind: kind, - Else: map[*Rule][]*Rule{}, - } -} - -// Empty returns true if there are no rules to evaluate. -func (ir *IndexResult) Empty() bool { - return len(ir.Rules) == 0 && ir.Default == nil -} - -type baseDocEqIndex struct { - skipIndexing Set - isVirtual func(Ref) bool - root *trieNode - defaultRule *Rule - kind RuleKind - onlyGroundRefs bool -} - -func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { - return &baseDocEqIndex{ - skipIndexing: NewSet(NewTerm(InternalPrint.Ref())), - isVirtual: isVirtual, - root: newTrieNodeImpl(), - onlyGroundRefs: true, - } -} - -func (i *baseDocEqIndex) Build(rules []*Rule) bool { - if len(rules) == 0 { - return false - } - - i.kind = rules[0].Head.RuleKind() - indices := newrefindices(i.isVirtual) - - // build indices for each rule. - for idx := range rules { - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - i.defaultRule = rule - return false - } - if i.onlyGroundRefs { - i.onlyGroundRefs = rule.Head.Reference.IsGround() - } - var skip bool - for _, expr := range rule.Body { - if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) { - skip = true - break - } - } - if !skip { - for _, expr := range rule.Body { - indices.Update(rule, expr) - } - } - return false - }) - } - - // build trie out of indices. - for idx := range rules { - var prio int - WalkRules(rules[idx], func(rule *Rule) bool { - if rule.Default { - return false - } - node := i.root - if indices.Indexed(rule) { - for _, ref := range indices.Sorted() { - node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) - } - } - // Insert rule into trie with (insertion order, priority order) - // tuple. Retaining the insertion order allows us to return rules - // in the order they were passed to this function. - node.append([...]int{idx, prio}, rule) - prio++ - return false - }) - } - return true -} - -func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { - - tr := newTrieTraversalResult() - - err := i.root.Traverse(resolver, tr) - if err != nil { - return nil, err - } - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { - tr := newTrieTraversalResult() - - // Walk over the rule trie and accumulate _all_ rules - rw := &ruleWalker{result: tr} - i.root.Do(rw) - - result := NewIndexResult(i.kind) - result.Default = i.defaultRule - result.OnlyGroundRefs = i.onlyGroundRefs - result.Rules = make([]*Rule, 0, len(tr.ordering)) - - for _, pos := range tr.ordering { - sort.Slice(tr.unordered[pos], func(i, j int) bool { - return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] - }) - nodes := tr.unordered[pos] - root := nodes[0].rule - result.Rules = append(result.Rules, root) - if len(nodes) > 1 { - result.Else[root] = make([]*Rule, len(nodes)-1) - for i := 1; i < len(nodes); i++ { - result.Else[root][i-1] = nodes[i].rule - } - } - } - - result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() - - return result, nil -} - -type ruleWalker struct { - result *trieTraversalResult -} - -func (r *ruleWalker) Do(x interface{}) trieWalker { - tn := x.(*trieNode) - r.result.Add(tn) - return r -} - -type valueMapper struct { - Key string - MapValue func(Value) Value -} - -type refindex struct { - Ref Ref - Value Value - Mapper *valueMapper -} - -type refindices struct { - isVirtual func(Ref) bool - rules map[*Rule][]*refindex - frequency *util.HashMap - sorted []Ref -} - -func newrefindices(isVirtual func(Ref) bool) *refindices { - return &refindices{ - isVirtual: isVirtual, - rules: map[*Rule][]*refindex{}, - frequency: util.NewHashMap(func(a, b util.T) bool { - r1, r2 := a.(Ref), b.(Ref) - return r1.Equal(r2) - }, func(x util.T) int { - return x.(Ref).Hash() - }), - } -} - -// Update attempts to update the refindices for the given expression in the -// given rule. If the expression cannot be indexed the update does not affect -// the indices. -func (i *refindices) Update(rule *Rule, expr *Expr) { - - if expr.Negated { - return - } - - if len(expr.With) > 0 { - // NOTE(tsandall): In the future, we may need to consider expressions - // that have with statements applied to them. - return - } - - op := expr.Operator() - - switch { - case op.Equal(Equality.Ref()): - i.updateEq(rule, expr) - - case op.Equal(Equal.Ref()) && len(expr.Operands()) == 2: - // NOTE(tsandall): if equal() is called with more than two arguments the - // output value is being captured in which case the indexer cannot - // exclude the rule if the equal() call would return false (because the - // false value must still be produced.) - i.updateEq(rule, expr) - - case op.Equal(GlobMatch.Ref()) && len(expr.Operands()) == 3: - // NOTE(sr): Same as with equal() above -- 4 operands means the output - // of `glob.match` is captured and the rule can thus not be excluded. - i.updateGlobMatch(rule, expr) - } -} - -// Sorted returns a sorted list of references that the indices were built from. -// References that appear more frequently in the indexed rules are ordered -// before less frequently appearing references. -func (i *refindices) Sorted() []Ref { - - if i.sorted == nil { - counts := make([]int, 0, i.frequency.Len()) - i.sorted = make([]Ref, 0, i.frequency.Len()) - - i.frequency.Iter(func(k, v util.T) bool { - counts = append(counts, v.(int)) - i.sorted = append(i.sorted, k.(Ref)) - return false - }) - - sort.Slice(i.sorted, func(a, b int) bool { - if counts[a] > counts[b] { - return true - } else if counts[b] > counts[a] { - return false - } - return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 - }) - } - - return i.sorted -} - -func (i *refindices) Indexed(rule *Rule) bool { - return len(i.rules[rule]) > 0 -} - -func (i *refindices) Value(rule *Rule, ref Ref) Value { - if index := i.index(rule, ref); index != nil { - return index.Value - } - return nil -} - -func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { - if index := i.index(rule, ref); index != nil { - return index.Mapper - } - return nil -} - -func (i *refindices) updateEq(rule *Rule, expr *Expr) { - a, b := expr.Operand(0), expr.Operand(1) - args := rule.Head.Args - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { - i.insert(rule, idx) - return - } - if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { - i.insert(rule, idx) - return - } -} - -func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { - args := rule.Head.Args - - delim, ok := globDelimiterToString(expr.Operand(1)) - if !ok { - return - } - - if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { - // The 3rd operand of glob.match is the value to match. We assume the - // 3rd operand was a reference that has been rewritten and bound to a - // variable earlier in the query OR a function argument variable. - match := expr.Operand(2) - if _, ok := match.Value.(Var); ok { - var ref Ref - for _, other := range i.rules[rule] { - if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { - ref = other.Ref - } - } - if ref == nil { - for j, arg := range args { - if arg.Equal(match) { - ref = Ref{FunctionArgRootDocument, IntNumberTerm(j)} - } - } - } - if ref != nil { - i.insert(rule, &refindex{ - Ref: ref, - Value: arr.Value, - Mapper: &valueMapper{ - Key: delim, - MapValue: func(v Value) Value { - if s, ok := v.(String); ok { - return stringSliceToArray(splitStringEscaped(string(s), delim)) - } - return v - }, - }, - }) - } - } - } -} - -func (i *refindices) insert(rule *Rule, index *refindex) { - - count, ok := i.frequency.Get(index.Ref) - if !ok { - count = 0 - } - - i.frequency.Put(index.Ref, count.(int)+1) - - for pos, other := range i.rules[rule] { - if other.Ref.Equal(index.Ref) { - i.rules[rule][pos] = index - return - } - } - - i.rules[rule] = append(i.rules[rule], index) -} - -func (i *refindices) index(rule *Rule, ref Ref) *refindex { - for _, index := range i.rules[rule] { - if index.Ref.Equal(ref) { - return index - } - } - return nil -} - -type trieWalker interface { - Do(x interface{}) trieWalker -} - -type trieTraversalResult struct { - unordered map[int][]*ruleNode - ordering []int - values Set -} - -func newTrieTraversalResult() *trieTraversalResult { - return &trieTraversalResult{ - unordered: map[int][]*ruleNode{}, - values: NewSet(), - } -} - -func (tr *trieTraversalResult) Add(t *trieNode) { - for _, node := range t.rules { - root := node.prio[0] - nodes, ok := tr.unordered[root] - if !ok { - tr.ordering = append(tr.ordering, root) - } - tr.unordered[root] = append(nodes, node) - } - if t.values != nil { - t.values.Foreach(func(v *Term) { tr.values.Add(v) }) - } -} - -type trieNode struct { - ref Ref - values Set - mappers []*valueMapper - next *trieNode - any *trieNode - undefined *trieNode - scalars *util.HashMap - array *trieNode - rules []*ruleNode -} - -func (node *trieNode) String() string { - var flags []string - flags = append(flags, fmt.Sprintf("self:%p", node)) - if len(node.ref) > 0 { - flags = append(flags, node.ref.String()) - } - if node.next != nil { - flags = append(flags, fmt.Sprintf("next:%p", node.next)) - } - if node.any != nil { - flags = append(flags, fmt.Sprintf("any:%p", node.any)) - } - if node.undefined != nil { - flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) - } - if node.array != nil { - flags = append(flags, fmt.Sprintf("array:%p", node.array)) - } - if node.scalars.Len() > 0 { - buf := make([]string, 0, node.scalars.Len()) - node.scalars.Iter(func(k, v util.T) bool { - key := k.(Value) - val := v.(*trieNode) - buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) - return false - }) - sort.Strings(buf) - flags = append(flags, strings.Join(buf, " ")) - } - if len(node.rules) > 0 { - flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) - } - if len(node.mappers) > 0 { - flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) - } - if node.values != nil { - if l := node.values.Len(); l > 0 { - flags = append(flags, fmt.Sprintf("%d value(s)", l)) - } - } - return strings.Join(flags, " ") -} - -func (node *trieNode) append(prio [2]int, rule *Rule) { - node.rules = append(node.rules, &ruleNode{prio, rule}) - - if node.values != nil && rule.Head.Value != nil { - node.values.Add(rule.Head.Value) - return - } - - if node.values == nil && rule.Head.DocKind() == CompleteDoc { - node.values = NewSet(rule.Head.Value) - } -} - -type ruleNode struct { - prio [2]int - rule *Rule -} - -func newTrieNodeImpl() *trieNode { - return &trieNode{ - scalars: util.NewHashMap(valueEq, valueHash), - } -} - -func (node *trieNode) Do(walker trieWalker) { - next := walker.Do(node) - if next == nil { - return - } - if node.any != nil { - node.any.Do(next) - } - if node.undefined != nil { - node.undefined.Do(next) - } - - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - child.Do(next) - return false - }) - - if node.array != nil { - node.array.Do(next) - } - if node.next != nil { - node.next.Do(next) - } -} - -func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { - - if node.next == nil { - node.next = newTrieNodeImpl() - node.next.ref = ref - } - - if mapper != nil { - node.next.addMapper(mapper) - } - - return node.next.insertValue(value) -} - -func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - tr.Add(node) - - return node.next.traverse(resolver, tr) -} - -func (node *trieNode) addMapper(mapper *valueMapper) { - for i := range node.mappers { - if node.mappers[i].Key == mapper.Key { - return - } - } - node.mappers = append(node.mappers, mapper) -} - -func (node *trieNode) insertValue(value Value) *trieNode { - - switch value := value.(type) { - case nil: - if node.undefined == nil { - node.undefined = newTrieNodeImpl() - } - return node.undefined - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(value, child) - } - return child.(*trieNode) - case *Array: - if node.array == nil { - node.array = newTrieNodeImpl() - } - return node.array.insertArray(value) - } - - panic("illegal value") -} - -func (node *trieNode) insertArray(arr *Array) *trieNode { - - if arr.Len() == 0 { - return node - } - - switch head := arr.Elem(0).Value.(type) { - case Var: - if node.any == nil { - node.any = newTrieNodeImpl() - } - return node.any.insertArray(arr.Slice(1, -1)) - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(head) - if !ok { - child = newTrieNodeImpl() - node.scalars.Put(head, child) - } - return child.(*trieNode).insertArray(arr.Slice(1, -1)) - } - - panic("illegal value") -} - -func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - v, err := resolver.Resolve(node.ref) - if err != nil { - if IsUnknownValueErr(err) { - return node.traverseUnknown(resolver, tr) - } - return err - } - - if node.undefined != nil { - err = node.undefined.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if v == nil { - return nil - } - - if node.any != nil { - err = node.any.Traverse(resolver, tr) - if err != nil { - return err - } - } - - if err := node.traverseValue(resolver, tr, v); err != nil { - return err - } - - for i := range node.mappers { - if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { - return err - } - } - - return nil -} - -func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { - - switch value := value.(type) { - case *Array: - if node.array == nil { - return nil - } - return node.array.traverseArray(resolver, tr, value) - - case Null, Boolean, Number, String: - child, ok := node.scalars.Get(value) - if !ok { - return nil - } - return child.(*trieNode).Traverse(resolver, tr) - } - - return nil -} - -func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { - - if arr.Len() == 0 { - return node.Traverse(resolver, tr) - } - - if node.any != nil { - err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) - if err != nil { - return err - } - } - - head := arr.Elem(0).Value - - if !IsScalar(head) { - return nil - } - - child, ok := node.scalars.Get(head) - if !ok { - return nil - } - return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1)) -} - -func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { - - if node == nil { - return nil - } - - if err := node.Traverse(resolver, tr); err != nil { - return err - } - - if err := node.undefined.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.any.traverseUnknown(resolver, tr); err != nil { - return err - } - - if err := node.array.traverseUnknown(resolver, tr); err != nil { - return err - } - - var iterErr error - node.scalars.Iter(func(_, v util.T) bool { - child := v.(*trieNode) - if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil { - return true - } - return false - }) - - return iterErr -} - -// If term `a` is one of the function's operands, we store a Ref: `args[0]` -// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll -// bind `args[0]` and `args[1]` to this rule when called for (x=10) and -// (y=12) respectively. -func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { - switch v := a.Value.(type) { - case Var: - for i, arg := range args { - if arg.Value.Compare(v) == 0 { - if bval, ok := indexValue(b); ok { - return &refindex{Ref: Ref{FunctionArgRootDocument, IntNumberTerm(i)}, Value: bval}, true - } - } - } - case Ref: - if !RootDocumentNames.Contains(v[0]) { - return nil, false - } - if isVirtual(v) { - return nil, false - } - if v.IsNested() || !v.IsGround() { - return nil, false - } - if bval, ok := indexValue(b); ok { - return &refindex{Ref: v, Value: bval}, true - } - } - return nil, false -} - -func indexValue(b *Term) (Value, bool) { - switch b := b.Value.(type) { - case Null, Boolean, Number, String, Var: - return b, true - case *Array: - stop := false - first := true - vis := NewGenericVisitor(func(x interface{}) bool { - if first { - first = false - return false - } - switch x.(type) { - // No nested structures or values that require evaluation (other than var). - case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: - stop = true - } - return stop - }) - vis.Walk(b) - if !stop { - return b, true - } - } - - return nil, false -} - -func globDelimiterToString(delim *Term) (string, bool) { - - arr, ok := delim.Value.(*Array) - if !ok { - return "", false - } - - var result string - - if arr.Len() == 0 { - result = "." - } else { - for i := 0; i < arr.Len(); i++ { - term := arr.Elem(i) - s, ok := term.Value.(String) - if !ok { - return "", false - } - result += string(s) - } - } - - return result, true -} - -func globPatternToArray(pattern *Term, delim string) *Term { - - s, ok := pattern.Value.(String) - if !ok { - return nil - } - - parts := splitStringEscaped(string(s), delim) - arr := make([]*Term, len(parts)) - - for i := range parts { - if parts[i] == "*" { - arr[i] = VarTerm("$globwildcard") - } else { - var escaped bool - for _, c := range parts[i] { - if c == '\\' { - escaped = !escaped - continue - } - if !escaped { - switch c { - case '[', '?', '{', '*': - // TODO(tsandall): super glob and character pattern - // matching not supported yet. - return nil - } - } - escaped = false - } - arr[i] = StringTerm(parts[i]) - } - } - - return NewTerm(NewArray(arr...)) -} - -// splits s on characters in delim except if delim characters have been escaped -// with reverse solidus. -func splitStringEscaped(s string, delim string) []string { - - var last, curr int - var escaped bool - var result []string - - for ; curr < len(s); curr++ { - if s[curr] == '\\' || escaped { - escaped = !escaped - continue - } - if strings.ContainsRune(delim, rune(s[curr])) { - result = append(result, s[last:curr]) - last = curr + 1 - } - } - - result = append(result, s[last:]) - - return result -} - -func stringSliceToArray(s []string) *Array { - arr := make([]*Term, len(s)) - for i, v := range s { - arr[i] = StringTerm(v) - } - return NewArray(arr...) + return v1.NewIndexResult(kind) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/interning.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/interning.go new file mode 100644 index 000000000..239293664 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/interning.go @@ -0,0 +1,24 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + v1 "github.com/open-policy-agent/opa/v1/ast" +) + +func InternedBooleanTerm(b bool) *Term { + return v1.InternedBooleanTerm(b) +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to ast.IntNumberTerm. +func InternedIntNumberTerm(i int) *Term { + return v1.InternedIntNumberTerm(i) +} + +func HasInternedIntNumberTerm(i int) bool { + return v1.HasInternedIntNumberTerm(i) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/json/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/json/doc.go new file mode 100644 index 000000000..26aee9b99 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/json/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package json diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/json/json.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/json/json.go index 565017d58..8a3a36bb9 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/json/json.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/json/json.go @@ -1,36 +1,15 @@ package json +import v1 "github.com/open-policy-agent/opa/v1/ast/json" + // Options defines the options for JSON operations, // currently only marshaling can be configured -type Options struct { - MarshalOptions MarshalOptions -} +type Options = v1.Options // MarshalOptions defines the options for JSON marshaling, // currently only toggling the marshaling of location information is supported -type MarshalOptions struct { - // IncludeLocation toggles the marshaling of location information - IncludeLocation NodeToggle - // IncludeLocationText additionally/optionally includes the text of the location - IncludeLocationText bool - // ExcludeLocationFile additionally/optionally excludes the file of the location - // Note that this is inverted (i.e. not "include" as the default needs to remain false) - ExcludeLocationFile bool -} +type MarshalOptions = v1.MarshalOptions // NodeToggle is a generic struct to allow the toggling of // settings for different ast node types -type NodeToggle struct { - Term bool - Package bool - Comment bool - Import bool - Rule bool - Head bool - Expr bool - SomeDecl bool - Every bool - With bool - Annotations bool - AnnotationsRef bool -} +type NodeToggle = v1.NodeToggle diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/map.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/map.go index b0cc9eb60..070ad3e5d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/map.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/map.go @@ -5,129 +5,14 @@ package ast import ( - "encoding/json" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // ValueMap represents a key/value map between AST term values. Any type of term // can be used as a key in the map. -type ValueMap struct { - hashMap *util.HashMap -} +type ValueMap = v1.ValueMap // NewValueMap returns a new ValueMap. func NewValueMap() *ValueMap { - vs := &ValueMap{ - hashMap: util.NewHashMap(valueEq, valueHash), - } - return vs -} - -// MarshalJSON provides a custom marshaller for the ValueMap which -// will include the key, value, and value type. -func (vs *ValueMap) MarshalJSON() ([]byte, error) { - var tmp []map[string]interface{} - vs.Iter(func(k Value, v Value) bool { - tmp = append(tmp, map[string]interface{}{ - "name": k.String(), - "type": TypeName(v), - "value": v, - }) - return false - }) - return json.Marshal(tmp) -} - -// Copy returns a shallow copy of the ValueMap. -func (vs *ValueMap) Copy() *ValueMap { - if vs == nil { - return nil - } - cpy := NewValueMap() - cpy.hashMap = vs.hashMap.Copy() - return cpy -} - -// Equal returns true if this ValueMap equals the other. -func (vs *ValueMap) Equal(other *ValueMap) bool { - if vs == nil { - return other == nil || other.Len() == 0 - } - if other == nil { - return vs == nil || vs.Len() == 0 - } - return vs.hashMap.Equal(other.hashMap) -} - -// Len returns the number of elements in the map. -func (vs *ValueMap) Len() int { - if vs == nil { - return 0 - } - return vs.hashMap.Len() -} - -// Get returns the value in the map for k. -func (vs *ValueMap) Get(k Value) Value { - if vs != nil { - if v, ok := vs.hashMap.Get(k); ok { - return v.(Value) - } - } - return nil -} - -// Hash returns a hash code for this ValueMap. -func (vs *ValueMap) Hash() int { - if vs == nil { - return 0 - } - return vs.hashMap.Hash() -} - -// Iter calls the iter function for each key/value pair in the map. If the iter -// function returns true, iteration stops. -func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { - if vs == nil { - return false - } - return vs.hashMap.Iter(func(kt, vt util.T) bool { - k := kt.(Value) - v := vt.(Value) - return iter(k, v) - }) -} - -// Put inserts a key k into the map with value v. -func (vs *ValueMap) Put(k, v Value) { - if vs == nil { - panic("put on nil value map") - } - vs.hashMap.Put(k, v) -} - -// Delete removes a key k from the map. -func (vs *ValueMap) Delete(k Value) { - if vs == nil { - return - } - vs.hashMap.Delete(k) -} - -func (vs *ValueMap) String() string { - if vs == nil { - return "{}" - } - return vs.hashMap.String() -} - -func valueHash(v util.T) int { - return v.(Value).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(Value) - bv := b.(Value) - return av.Compare(bv) == 0 + return v1.NewValueMap() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/marshal.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/marshal.go deleted file mode 100644 index 53fb11204..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/marshal.go +++ /dev/null @@ -1,11 +0,0 @@ -package ast - -import ( - astJSON "github.com/open-policy-agent/opa/ast/json" -) - -// customJSON is an interface that can be implemented by AST nodes that -// allows the parser to set options for JSON operations on that node. -type customJSON interface { - setJSONOptions(astJSON.Options) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/parser.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/parser.go index 0ad15f631..45cd4da06 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/parser.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/parser.go @@ -1,2701 +1,49 @@ -// Copyright 2020 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "io" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" - - "gopkg.in/yaml.v2" - - "github.com/open-policy-agent/opa/ast/internal/scanner" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")} +var RegoV1CompatibleRef = v1.RegoV1CompatibleRef // RegoVersion defines the Rego syntax requirements for a module. -type RegoVersion int +type RegoVersion = v1.RegoVersion + +const DefaultRegoVersion = RegoV0 const ( + RegoUndefined = v1.RegoUndefined // RegoV0 is the default, original Rego syntax. - RegoV0 RegoVersion = iota + RegoV0 = v1.RegoV0 // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. - RegoV0CompatV1 + RegoV0CompatV1 = v1.RegoV0CompatV1 // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: // future.keywords part of default keyword set, and don't require imports; // 'if' and 'contains' required in rule heads; // (some) strict checks on by default. - RegoV1 + RegoV1 = v1.RegoV1 ) -func (v RegoVersion) Int() int { - if v == RegoV1 { - return 1 - } - return 0 -} - -func (v RegoVersion) String() string { - switch v { - case RegoV0: - return "v0" - case RegoV1: - return "v1" - case RegoV0CompatV1: - return "v0v1" - default: - return "unknown" - } -} - func RegoVersionFromInt(i int) RegoVersion { - if i == 1 { - return RegoV1 - } - return RegoV0 -} - -// Note: This state is kept isolated from the parser so that we -// can do efficient shallow copies of these values when doing a -// save() and restore(). -type state struct { - s *scanner.Scanner - lastEnd int - skippedNL bool - tok tokens.Token - tokEnd int - lit string - loc Location - errors Errors - hints []string - comments []*Comment - wildcard int -} - -func (s *state) String() string { - return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) -} - -func (s *state) Loc() *location.Location { - cpy := s.loc - return &cpy -} - -func (s *state) Text(offset, end int) []byte { - bs := s.s.Bytes() - if offset >= 0 && offset < len(bs) { - if end >= offset && end <= len(bs) { - return bs[offset:end] - } - } - return nil + return v1.RegoVersionFromInt(i) } // Parser is used to parse Rego statements. -type Parser struct { - r io.Reader - s *state - po ParserOptions - cache parsedTermCache -} - -type parsedTermCacheItem struct { - t *Term - post *state // post is the post-state that's restored on a cache-hit - offset int - next *parsedTermCacheItem -} - -type parsedTermCache struct { - m *parsedTermCacheItem -} - -func (c parsedTermCache) String() string { - s := strings.Builder{} - s.WriteRune('{') - var e *parsedTermCacheItem - for e = c.m; e != nil; e = e.next { - s.WriteString(fmt.Sprintf("%v", e)) - } - s.WriteRune('}') - return s.String() -} - -func (e *parsedTermCacheItem) String() string { - return fmt.Sprintf("<%d:%v>", e.offset, e.t) -} +type Parser = v1.Parser // ParserOptions defines the options for parsing Rego statements. -type ParserOptions struct { - Capabilities *Capabilities - ProcessAnnotation bool - AllFutureKeywords bool - FutureKeywords []string - SkipRules bool - JSONOptions *astJSON.Options - // RegoVersion is the version of Rego to parse for. - RegoVersion RegoVersion - unreleasedKeywords bool // TODO(sr): cleanup -} - -// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. -// Deprecated: Use RegoVersion instead. -func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { - return po.RegoVersion -} +type ParserOptions = v1.ParserOptions // NewParser creates and initializes a Parser. func NewParser() *Parser { - p := &Parser{ - s: &state{}, - po: ParserOptions{}, - } - return p -} - -// WithFilename provides the filename for Location details -// on parsed statements. -func (p *Parser) WithFilename(filename string) *Parser { - p.s.loc.File = filename - return p -} - -// WithReader provides the io.Reader that the parser will -// use as its source. -func (p *Parser) WithReader(r io.Reader) *Parser { - p.r = r - return p -} - -// WithProcessAnnotation enables or disables the processing of -// annotations by the Parser -func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { - p.po.ProcessAnnotation = processAnnotation - return p -} - -// WithFutureKeywords enables "future" keywords, i.e., keywords that can -// be imported via -// -// import future.keywords.kw -// import future.keywords.other -// -// but in a more direct way. The equivalent of this import would be -// -// WithFutureKeywords("kw", "other") -func (p *Parser) WithFutureKeywords(kws ...string) *Parser { - p.po.FutureKeywords = kws - return p -} - -// WithAllFutureKeywords enables all "future" keywords, i.e., the -// ParserOption equivalent of -// -// import future.keywords -func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { - p.po.AllFutureKeywords = yes - return p -} - -// withUnreleasedKeywords allows using keywords that haven't surfaced -// as future keywords (see above) yet, but have tests that require -// them to be parsed -func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { - p.po.unreleasedKeywords = yes - return p -} - -// WithCapabilities sets the capabilities structure on the parser. -func (p *Parser) WithCapabilities(c *Capabilities) *Parser { - p.po.Capabilities = c - return p -} - -// WithSkipRules instructs the parser not to attempt to parse Rule statements. -func (p *Parser) WithSkipRules(skip bool) *Parser { - p.po.SkipRules = skip - return p -} - -// WithJSONOptions sets the Options which will be set on nodes to configure -// their JSON marshaling behavior. -func (p *Parser) WithJSONOptions(jsonOptions *astJSON.Options) *Parser { - p.po.JSONOptions = jsonOptions - return p -} - -func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { - p.po.RegoVersion = version - return p -} - -func (p *Parser) parsedTermCacheLookup() (*Term, *state) { - l := p.s.loc.Offset - // stop comparing once the cached offsets are lower than l - for h := p.cache.m; h != nil && h.offset >= l; h = h.next { - if h.offset == l { - return h.t, h.post - } - } - return nil, nil -} - -func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { - s1 := p.save() - o0 := s0.loc.Offset - entry := parsedTermCacheItem{t: t, post: s1, offset: o0} - - // find the first one whose offset is smaller than ours - var e *parsedTermCacheItem - for e = p.cache.m; e != nil; e = e.next { - if e.offset < o0 { - break - } - } - entry.next = e - p.cache.m = &entry -} - -// futureParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows all future keywords. -// It's used to present hints in errors, when statements would -// only parse successfully if some future keyword is enabled. -func (p *Parser) futureParser() *Parser { - q := *p - q.s = p.save() - q.s.s = p.s.s.WithKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q -} - -// presentParser returns a shallow copy of `p` with an empty -// cache, and a scanner that knows none of the future keywords. -// It is used to successfully parse keyword imports, like -// -// import future.keywords.in -// -// even when the parser has already been informed about the -// future keyword "in". This parser won't error out because -// "in" is an identifier. -func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { - var cpy map[string]tokens.Token - q := *p - q.s = p.save() - q.s.s, cpy = p.s.s.WithoutKeywords(futureKeywords) - q.cache = parsedTermCache{} - return &q, cpy -} - -// Parse will read the Rego source and parse statements and -// comments as they are found. Any errors encountered while -// parsing will be accumulated and returned as a list of Errors. -func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { - - if p.po.Capabilities == nil { - p.po.Capabilities = CapabilitiesForThisVersion() - } - - allowedFutureKeywords := map[string]tokens.Token{} - - if p.po.RegoVersion == RegoV1 { - // RegoV1 includes all future keywords in the default language definition - for k, v := range futureKeywords { - allowedFutureKeywords[k] = v - } - } else { - for _, kw := range p.po.Capabilities.FutureKeywords { - var ok bool - allowedFutureKeywords[kw], ok = futureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, - } - } - } - } - - var err error - p.s.s, err = scanner.New(p.r) - if err != nil { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: err.Error(), - Location: nil, - }, - } - } - - selected := map[string]tokens.Token{} - if p.po.AllFutureKeywords || p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - selected[kw] = tok - } - } else { - for _, kw := range p.po.FutureKeywords { - tok, ok := allowedFutureKeywords[kw] - if !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("unknown future keyword: %v", kw), - Location: nil, - }, - } - } - selected[kw] = tok - } - } - p.s.s = p.s.s.WithKeywords(selected) - - if p.po.RegoVersion == RegoV1 { - for kw, tok := range allowedFutureKeywords { - p.s.s.AddKeyword(kw, tok) - } - } - - // read the first token to initialize the parser - p.scan() - - var stmts []Statement - - // Read from the scanner until the last token is reached or no statements - // can be parsed. Attempt to parse package statements, import statements, - // rule statements, and then body/query statements (in that order). If a - // statement cannot be parsed, restore the parser state before trying the - // next type of statement. If a statement can be parsed, continue from that - // point trying to parse packages, imports, etc. in the same order. - for p.s.tok != tokens.EOF { - - s := p.save() - - if pkg := p.parsePackage(); pkg != nil { - stmts = append(stmts, pkg) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - s = p.save() - - if imp := p.parseImport(); imp != nil { - if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.regoV1Import(imp) - } - - if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { - p.futureImport(imp, allowedFutureKeywords) - } - - stmts = append(stmts, imp) - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - - if !p.po.SkipRules { - s = p.save() - - if rules := p.parseRules(); rules != nil { - for i := range rules { - stmts = append(stmts, rules[i]) - } - continue - } else if len(p.s.errors) > 0 { - break - } - - p.restore(s) - } - - if body := p.parseQuery(true, tokens.EOF); body != nil { - stmts = append(stmts, body) - continue - } - - break - } - - if p.po.ProcessAnnotation { - stmts = p.parseAnnotations(stmts) - } - - if p.po.JSONOptions != nil { - for i := range stmts { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*p.po.JSONOptions) - } - return false - }) - - vis.Walk(stmts[i]) - } - } - - return stmts, p.s.comments, p.s.errors -} - -func (p *Parser) parseAnnotations(stmts []Statement) []Statement { - - annotStmts, errs := parseAnnotations(p.s.comments) - for _, err := range errs { - p.error(err.Location, err.Message) - } - - for _, annotStmt := range annotStmts { - stmts = append(stmts, annotStmt) - } - - return stmts -} - -func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { - - var hint = []byte("METADATA") - var curr *metadataParser - var blocks []*metadataParser - - for i := 0; i < len(comments); i++ { - if curr != nil { - if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { - curr.Append(comments[i]) - continue - } - curr = nil - } - if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { - curr = newMetadataParser(comments[i].Location) - blocks = append(blocks, curr) - } - } - - var stmts []*Annotations - var errs Errors - for _, b := range blocks { - a, err := b.Parse() - if err != nil { - errs = append(errs, &Error{ - Code: ParseErr, - Message: err.Error(), - Location: b.loc, - }) - } else { - stmts = append(stmts, a) - } - } - - return stmts, errs -} - -func (p *Parser) parsePackage() *Package { - - var pkg Package - pkg.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Package { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.illegalToken() - return nil - } - - term := p.parseTerm() - - if term != nil { - switch v := term.Value.(type) { - case Var: - pkg.Path = Ref{ - DefaultRootDocument.Copy().SetLocation(term.Location), - StringTerm(string(v)).SetLocation(term.Location), - } - case Ref: - pkg.Path = make(Ref, len(v)+1) - pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) - first, ok := v[0].Value.(Var) - if !ok { - p.errorf(v[0].Location, "unexpected %v token: expecting var", TypeName(v[0].Value)) - return nil - } - pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) - for i := 2; i < len(pkg.Path); i++ { - switch v[i-1].Value.(type) { - case String: - pkg.Path[i] = v[i-1] - default: - p.errorf(v[i-1].Location, "unexpected %v token: expecting string", TypeName(v[i-1].Value)) - return nil - } - } - default: - p.illegalToken() - return nil - } - } - - if pkg.Path == nil { - if len(p.s.errors) == 0 { - p.error(p.s.Loc(), "expected path") - } - return nil - } - - return &pkg -} - -func (p *Parser) parseImport() *Import { - - var imp Import - imp.SetLoc(p.s.Loc()) - - if p.s.tok != tokens.Import { - return nil - } - - p.scan() - if p.s.tok != tokens.Ident { - p.error(p.s.Loc(), "expected ident") - return nil - } - q, prev := p.presentParser() - term := q.parseTerm() - if term != nil { - switch v := term.Value.(type) { - case Var: - imp.Path = RefTerm(term).SetLocation(term.Location) - case Ref: - for i := 1; i < len(v); i++ { - if _, ok := v[i].Value.(String); !ok { - p.errorf(v[i].Location, "unexpected %v token: expecting string", TypeName(v[i].Value)) - return nil - } - } - imp.Path = term - } - } - // keep advanced parser state, reset known keywords - p.s = q.s - p.s.s = q.s.s.WithKeywords(prev) - - if imp.Path == nil { - p.error(p.s.Loc(), "expected path") - return nil - } - - path := imp.Path.Value.(Ref) - - switch { - case RootDocumentNames.Contains(path[0]): - case FutureRootDocument.Equal(path[0]): - case RegoRootDocument.Equal(path[0]): - default: - p.hint("if this is unexpected, try updating OPA") - p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", - RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), - path[0]) - return nil - } - - if p.s.tok == tokens.As { - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - if alias := p.parseTerm(); alias != nil { - v, ok := alias.Value.(Var) - if ok { - imp.Alias = v - return &imp - } - } - p.illegal("expected var") - return nil - } - - return &imp -} - -func (p *Parser) parseRules() []*Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - if p.s.tok == tokens.Default { - p.scan() - rule.Default = true - } - - if p.s.tok != tokens.Ident { - return nil - } - - usesContains := false - if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { - return nil - } - - if usesContains { - rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) - } - - if rule.Default { - if !p.validateDefaultRuleValue(&rule) { - return nil - } - - if len(rule.Head.Args) > 0 { - if !p.validateDefaultRuleArgs(&rule) { - return nil - } - } - - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - return []*Rule{&rule} - } - - // back-compat with `p[x] { ... }`` - hasIf := p.s.tok == tokens.If - - // p[x] if ... becomes a single-value rule p[x] - if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { - if rule.Head.Value == nil { - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) - } else { - // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - } - } - - // p[x] becomes a multi-value rule p - if !hasIf && !usesContains && - len(rule.Head.Args) == 0 && // not a function - len(rule.Head.Ref()) == 2 { // ref like 'p[x]' - v, ok := rule.Head.Ref()[0].Value.(Var) - if !ok { - return nil - } - rule.Head.Name = v - rule.Head.Key = rule.Head.Ref()[1] - if rule.Head.Value == nil { - rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) - } - } - - switch { - case hasIf: - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - s := p.save() - if expr := p.parseLiteral(); expr != nil { - // NOTE(sr): set literals are never false or undefined, so parsing this as - // p if { true } - // ^^^^^^^^ set of one element, `true` - // isn't valid. - isSetLiteral := false - if t, ok := expr.Terms.(*Term); ok { - _, isSetLiteral = t.Value.(Set) - } - // expr.Term is []*Term or Every - if !isSetLiteral { - rule.Body.Append(expr) - break - } - } - - // parsing as literal didn't work out, expect '{ BODY }' - p.restore(s) - fallthrough - - case p.s.tok == tokens.LBrace: - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - - case usesContains: - rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) - rule.generatedBody = true - return []*Rule{&rule} - - default: - return nil - } - - if p.s.tok == tokens.Else { - if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { - p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") - return nil - } - if rule.Head.Key != nil { - p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") - return nil - } - - if rule.Else = p.parseElse(rule.Head); rule.Else == nil { - return nil - } - } - - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - - rules := []*Rule{&rule} - - for p.s.tok == tokens.LBrace { - - if rule.Else != nil { - p.error(p.s.Loc(), "expected else keyword") - return nil - } - - loc := p.s.Loc() - - p.scan() - var next Rule - - if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { - return nil - } - p.scan() - - loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) - next.SetLoc(loc) - - // Chained rule head's keep the original - // rule's head AST but have their location - // set to the rule body. - next.Head = rule.Head.Copy() - next.Head.keywords = rule.Head.keywords - for i := range next.Head.Args { - if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - next.Head.Args[i].Value = Var(p.genwildcard()) - } - } - setLocRecursive(next.Head, loc) - - rules = append(rules, &next) - } - - return rules -} - -func (p *Parser) parseElse(head *Head) *Rule { - - var rule Rule - rule.SetLoc(p.s.Loc()) - - rule.Head = head.Copy() - rule.Head.generatedValue = false - for i := range rule.Head.Args { - if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { - rule.Head.Args[i].Value = Var(p.genwildcard()) - } - } - rule.Head.SetLoc(p.s.Loc()) - - defer func() { - rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) - }() - - p.scan() - - switch p.s.tok { - case tokens.LBrace, tokens.If: // no value, but a body follows directly - rule.Head.generatedValue = true - rule.Head.Value = BooleanTerm(true) - case tokens.Assign, tokens.Unify: - rule.Head.Assign = tokens.Assign == p.s.tok - p.scan() - rule.Head.Value = p.parseTermInfixCall() - if rule.Head.Value == nil { - return nil - } - rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) - default: - p.illegal("expected else value term or rule body") - return nil - } - - hasIf := p.s.tok == tokens.If - hasLBrace := p.s.tok == tokens.LBrace - - if !hasIf && !hasLBrace { - rule.Body = NewBody(NewExpr(BooleanTerm(true))) - rule.generatedBody = true - setLocRecursive(rule.Body, rule.Location) - return &rule - } - - if hasIf { - rule.Head.keywords = append(rule.Head.keywords, tokens.If) - p.scan() - } - - if p.s.tok == tokens.LBrace { - p.scan() - if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { - return nil - } - p.scan() - } else if p.s.tok != tokens.EOF { - expr := p.parseLiteral() - if expr == nil { - return nil - } - rule.Body.Append(expr) - setLocRecursive(rule.Body, rule.Location) - } else { - p.illegal("rule body expected") - return nil - } - - if p.s.tok == tokens.Else { - if rule.Else = p.parseElse(head); rule.Else == nil { - return nil - } - } - return &rule -} - -func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { - head := &Head{} - loc := p.s.Loc() - defer func() { - if head != nil { - head.SetLoc(loc) - head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) - } - }() - - term := p.parseVar() - if term == nil { - return nil, false - } - - ref := p.parseTermFinish(term, true) - if ref == nil { - p.illegal("expected rule head name") - return nil, false - } - - switch x := ref.Value.(type) { - case Var: - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(x, ref.Location, p.po.JSONOptions) - case Ref: - head = RefHead(x) - case Call: - op, args := x[0], x[1:] - var ref Ref - switch y := op.Value.(type) { - case Var: - ref = Ref{op} - case Ref: - if _, ok := y[0].Value.(Var); !ok { - p.illegal("rule head ref %v invalid", y) - return nil, false - } - ref = y - } - head = RefHead(ref) - head.Args = append([]*Term{}, args...) - - default: - return nil, false - } - - name := head.Ref().String() - - switch p.s.tok { - case tokens.Contains: // NOTE: no Value for `contains` heads, we return here - // Catch error case of using 'contains' with a function definition rule head. - if head.Args != nil { - p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) - } - p.scan() - head.Key = p.parseTermInfixCall() - if head.Key == nil { - p.illegal("expected rule key term (e.g., %s contains { ... })", name) - } - return head, true - - case tokens.Unify: - p.scan() - head.Value = p.parseTermInfixCall() - if head.Value == nil { - // FIX HEAD.String() - p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) - } - case tokens.Assign: - p.scan() - head.Assign = true - head.Value = p.parseTermInfixCall() - if head.Value == nil { - switch { - case len(head.Args) > 0: - p.illegal("expected function value term (e.g., %s(...) := { ... })", name) - case head.Key != nil: - p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) - case defaultRule: - p.illegal("expected default rule value term (e.g., default %s := )", name) - default: - p.illegal("expected rule value term (e.g., %s := { ... })", name) - } - } - } - - if head.Value == nil && head.Key == nil { - if len(head.Ref()) != 2 || len(head.Args) > 0 { - head.generatedValue = true - head.Value = BooleanTerm(true).SetLocation(head.Location) - } - } - return head, false -} - -func (p *Parser) parseBody(end tokens.Token) Body { - return p.parseQuery(false, end) -} - -func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { - body := Body{} - - if p.s.tok == end { - p.error(p.s.Loc(), "found empty body") - return nil - } - - for { - expr := p.parseLiteral() - if expr == nil { - return nil - } - - body.Append(expr) - - if p.s.tok == tokens.Semicolon { - p.scan() - continue - } - - if p.s.tok == end || requireSemi { - return body - } - - if !p.s.skippedNL { - // If there was already an error then don't pile this one on - if len(p.s.errors) == 0 { - p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) - } - return nil - } - } -} - -func (p *Parser) parseLiteral() (expr *Expr) { - - offset := p.s.loc.Offset - loc := p.s.Loc() - - defer func() { - if expr != nil { - loc.Text = p.s.Text(offset, p.s.lastEnd) - expr.SetLoc(loc) - } - }() - - var negated bool - if p.s.tok == tokens.Not { - p.scan() - negated = true - } - - switch p.s.tok { - case tokens.Some: - if negated { - p.illegal("illegal negation of 'some'") - return nil - } - return p.parseSome() - case tokens.Every: - if negated { - p.illegal("illegal negation of 'every'") - return nil - } - return p.parseEvery() - default: - s := p.save() - expr := p.parseExpr() - if expr != nil { - expr.Negated = negated - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - // If we find a plain `every` identifier, attempt to parse an every expression, - // add hint if it succeeds. - if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { - var hint bool - t := p.save() - p.restore(s) - if expr := p.futureParser().parseEvery(); expr != nil { - _, hint = expr.Terms.(*Every) - } - p.restore(t) - if hint { - p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") - } - } - return expr - } - return nil - } -} - -func (p *Parser) parseWith() []*With { - - withs := []*With{} - - for { - - with := With{ - Location: p.s.Loc(), - } - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected ident") - return nil - } - - with.Target = p.parseTerm() - if with.Target == nil { - return nil - } - - switch with.Target.Value.(type) { - case Ref, Var: - break - default: - p.illegal("expected with target path") - } - - if p.s.tok != tokens.As { - p.illegal("expected as keyword") - return nil - } - - p.scan() - - if with.Value = p.parseTermInfixCall(); with.Value == nil { - return nil - } - - with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) - - withs = append(withs, &with) - - if p.s.tok != tokens.With { - break - } - } - - return withs -} - -func (p *Parser) parseSome() *Expr { - - decl := &SomeDecl{} - decl.SetLoc(p.s.Loc()) - - // Attempt to parse "some x in xs", which will end up in - // SomeDecl{Symbols: ["member(x, xs)"]} - s := p.save() - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name: - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - case MemberWithKey.Name: - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - default: - p.illegal("expected `x in xs` or `x, y in xs` expression") - return nil - } - - decl.Symbols = []*Term{term} - expr := NewExpr(decl).SetLocation(decl.Location) - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - } - - p.restore(s) - s = p.save() // new copy for later - var hint bool - p.scan() - if term := p.futureParser().parseTermInfixCall(); term != nil { - if call, ok := term.Value.(Call); ok { - switch call[0].String() { - case Member.Name, MemberWithKey.Name: - hint = true - } - } - } - - // go on as before, it's `some x[...]` or illegal - p.restore(s) - if hint { - p.hint("`import future.keywords.in` for `some x in xs` expressions") - } - - for { // collecting var args - - p.scan() - - if p.s.tok != tokens.Ident { - p.illegal("expected var") - return nil - } - - decl.Symbols = append(decl.Symbols, p.parseVar()) - - p.scan() - - if p.s.tok != tokens.Comma { - break - } - } - - return NewExpr(decl).SetLocation(decl.Location) -} - -func (p *Parser) parseEvery() *Expr { - qb := &Every{} - qb.SetLoc(p.s.Loc()) - - // TODO(sr): We'd get more accurate error messages if we didn't rely on - // parseTermInfixCall here, but parsed "var [, var] in term" manually. - p.scan() - term := p.parseTermInfixCall() - if term == nil { - return nil - } - call, ok := term.Value.(Call) - if !ok { - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - switch call[0].String() { - case Member.Name: // x in xs - if len(call) != 3 { - p.illegal("illegal domain") - return nil - } - qb.Value = call[1] - qb.Domain = call[2] - case MemberWithKey.Name: // k, v in xs - if len(call) != 4 { - p.illegal("illegal domain") - return nil - } - qb.Key = call[1] - qb.Value = call[2] - qb.Domain = call[3] - if _, ok := qb.Key.Value.(Var); !ok { - p.illegal("expected key to be a variable") - return nil - } - default: - p.illegal("expected `x[, y] in xs { ... }` expression") - return nil - } - if _, ok := qb.Value.Value.(Var); !ok { - p.illegal("expected value to be a variable") - return nil - } - if p.s.tok == tokens.LBrace { // every x in xs { ... } - p.scan() - body := p.parseBody(tokens.RBrace) - if body == nil { - return nil - } - p.scan() - qb.Body = body - expr := NewExpr(qb).SetLocation(qb.Location) - - if p.s.tok == tokens.With { - if expr.With = p.parseWith(); expr.With == nil { - return nil - } - } - return expr - } - - p.illegal("missing body") - return nil -} - -func (p *Parser) parseExpr() *Expr { - - lhs := p.parseTermInfixCall() - if lhs == nil { - return nil - } - - if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { - if rhs := p.parseTermInfixCall(); rhs != nil { - return NewExpr([]*Term{op, lhs, rhs}) - } - return nil - } - - // NOTE(tsandall): the top-level call term is converted to an expr because - // the evaluator does not support the call term type (nested calls are - // rewritten by the compiler.) - if call, ok := lhs.Value.(Call); ok { - return NewExpr([]*Term(call)) - } - - return NewExpr(lhs) -} - -// parseTermInfixCall consumes the next term from the input and returns it. If a -// term cannot be parsed the return value is nil and error will be recorded. The -// scanner will be advanced to the next token before returning. -// By starting out with infix relations (==, !=, <, etc) and further calling the -// other binary operators (|, &, arithmetics), it constitutes the binding -// precedence. -func (p *Parser) parseTermInfixCall() *Term { - return p.parseTermIn(nil, true, p.s.loc.Offset) -} - -func (p *Parser) parseTermInfixCallInList() *Term { - return p.parseTermIn(nil, false, p.s.loc.Offset) -} - -func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { - // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also - // supports `key, val in rhs`, so it can have an optional second lhs. - // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). - if lhs == nil { - lhs = p.parseTermRelation(nil, offset) - } - if lhs != nil { - if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" - s := p.save() - p.scan() - if mhs := p.parseTermRelation(nil, offset); mhs != nil { - if op := p.parseTermOpName(MemberWithKey.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - p.restore(s) - } - if op := p.parseTermOpName(Member.Ref(), tokens.In); op != nil { - if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.In: - return p.parseTermIn(call, keyVal, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermOr(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { - if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: - return p.parseTermRelation(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermAnd(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Or); op != nil { - if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Or: - return p.parseTermOr(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermArith(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.And); op != nil { - if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.And: - return p.parseTermAnd(call, offset) - default: - return call - } - } - } - return lhs - } - return nil -} - -func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTermFactor(nil, offset) - } - if lhs != nil { - if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { - if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Add, tokens.Sub: - return p.parseTermArith(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { - if lhs == nil { - lhs = p.parseTerm() - } - if lhs != nil { - if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { - if rhs := p.parseTerm(); rhs != nil { - call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) - switch p.s.tok { - case tokens.Mul, tokens.Quo, tokens.Rem: - return p.parseTermFactor(call, offset) - default: - return call - } - } - } - } - return lhs -} - -func (p *Parser) parseTerm() *Term { - if term, s := p.parsedTermCacheLookup(); s != nil { - p.restore(s) - return term - } - s0 := p.save() - - var term *Term - switch p.s.tok { - case tokens.Null: - term = NullTerm().SetLocation(p.s.Loc()) - case tokens.True: - term = BooleanTerm(true).SetLocation(p.s.Loc()) - case tokens.False: - term = BooleanTerm(false).SetLocation(p.s.Loc()) - case tokens.Sub, tokens.Dot, tokens.Number: - term = p.parseNumber() - case tokens.String: - term = p.parseString() - case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment - term = p.parseVar() - case tokens.LBrack: - term = p.parseArray() - case tokens.LBrace: - term = p.parseSetOrObject() - case tokens.LParen: - offset := p.s.loc.Offset - p.scan() - if r := p.parseTermInfixCall(); r != nil { - if p.s.tok == tokens.RParen { - r.Location.Text = p.s.Text(offset, p.s.tokEnd) - term = r - } else { - p.error(p.s.Loc(), "non-terminated expression") - } - } - default: - p.illegalToken() - } - - term = p.parseTermFinish(term, false) - p.parsedTermCachePush(term, s0) - return term -} - -func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { - if head == nil { - return nil - } - offset := p.s.loc.Offset - p.doScan(skipws) - - switch p.s.tok { - case tokens.LParen, tokens.Dot, tokens.LBrack: - return p.parseRef(head, offset) - case tokens.Whitespace: - p.scan() - fallthrough - default: - if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { - return RefTerm(head).SetLocation(head.Location) - } - return head - } -} - -func (p *Parser) parseNumber() *Term { - var prefix string - loc := p.s.Loc() - if p.s.tok == tokens.Sub { - prefix = "-" - p.scan() - switch p.s.tok { - case tokens.Number, tokens.Dot: - break - default: - p.illegal("expected number") - return nil - } - } - if p.s.tok == tokens.Dot { - prefix += "." - p.scan() - if p.s.tok != tokens.Number { - p.illegal("expected number") - return nil - } - } - - // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0: - // https://golang.org/pkg/math/big/#Float.Parse - if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) && - len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' { - p.illegal("expected number") - return nil - } - - // Ensure that the number is valid - s := prefix + p.s.lit - f, ok := new(big.Float).SetString(s) - if !ok { - p.illegal("invalid float") - return nil - } - - // Put limit on size of exponent to prevent non-linear cost of String() - // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 - // - // n == sign * mantissa * 2^exp - // 0.5 <= mantissa < 1.0 - // - // The limit is arbitrary. - exp := f.MantExp(nil) - if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 - p.error(p.s.Loc(), "number too big") - return nil - } - - // Note: Use the original string, do *not* round trip from - // the big.Float as it can cause precision loss. - r := NumberTerm(json.Number(s)).SetLocation(loc) - return r -} - -func (p *Parser) parseString() *Term { - if p.s.lit[0] == '"' { - var s string - err := json.Unmarshal([]byte(p.s.lit), &s) - if err != nil { - p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) - return nil - } - term := StringTerm(s).SetLocation(p.s.Loc()) - return term - } - return p.parseRawString() -} - -func (p *Parser) parseRawString() *Term { - if len(p.s.lit) < 2 { - return nil - } - term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) - return term -} - -// this is the name to use for instantiating an empty set, e.g., `set()`. -var setConstructor = RefTerm(VarTerm("set")) - -func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { - - loc := operator.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - p.scan() // steps over '(' - - if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() - end = p.s.tokEnd - p.scanWS() - if operator.Equal(setConstructor) { - return SetTerm() - } - return CallTerm(operator) - } - - if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { - end = p.s.tokEnd - p.scanWS() - return CallTerm(r...) - } - - return nil -} - -func (p *Parser) parseRef(head *Term, offset int) (term *Term) { - - loc := head.Location - var end int - - defer func() { - p.setLoc(term, loc, offset, end) - }() - - switch h := head.Value.(type) { - case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - // ok - default: - p.errorf(loc, "illegal ref (head cannot be %v)", TypeName(h)) - } - - ref := []*Term{head} - - for { - switch p.s.tok { - case tokens.Dot: - p.scanWS() - if p.s.tok != tokens.Ident { - p.illegal("expected %v", tokens.Ident) - return nil - } - ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) - p.scanWS() - case tokens.LParen: - term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) - if term != nil { - switch p.s.tok { - case tokens.Whitespace: - p.scan() - end = p.s.lastEnd - return term - case tokens.Dot, tokens.LBrack: - term = p.parseRef(term, offset) - } - } - end = p.s.tokEnd - return term - case tokens.LBrack: - p.scan() - if term := p.parseTermInfixCall(); term != nil { - if p.s.tok != tokens.RBrack { - p.illegal("expected %v", tokens.LBrack) - return nil - } - ref = append(ref, term) - p.scanWS() - } else { - return nil - } - case tokens.Whitespace: - end = p.s.lastEnd - p.scan() - return RefTerm(ref...) - default: - end = p.s.lastEnd - return RefTerm(ref...) - } - } -} - -func (p *Parser) parseArray() (term *Term) { - - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrack { - return ArrayTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg [, x, y] - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // NOTE(tsandall): The parser cannot attempt a relational term here because - // of ambiguity around comprehensions. For example, given: - // - // {1 | 1} - // - // Does this represent a set comprehension or a set containing binary OR - // call? We resolve the ambiguity by prioritizing comprehensions. - head := p.parseTerm() - - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrack: - return ArrayTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is an array comprehension - p.scan() - if body := p.parseBody(tokens.RBrack); body != nil { - return ArrayComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // fall back to parsing as a normal array definition - } - - p.restore(s) - - if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { - return NewTerm(NewArray(terms...)) - } - return nil -} - -func (p *Parser) parseSetOrObject() (term *Term) { - loc := p.s.Loc() - offset := p.s.loc.Offset - - defer func() { - p.setLoc(term, loc, offset, p.s.tokEnd) - }() - - p.scan() - - if p.s.tok == tokens.RBrace { - return ObjectTerm() - } - - potentialComprehension := true - - // Skip leading commas, eg {, x, y} - // Supported for backwards compatibility. In the future - // we should make this a parse error. - if p.s.tok == tokens.Comma { - potentialComprehension = false - p.scan() - } - - s := p.save() - - // Try parsing just a single term first to give comprehensions higher - // priority to "or" calls in ambiguous situations. Eg: { a | b } - // will be a set comprehension. - // - // Note: We don't know yet if it is a set or object being defined. - head := p.parseTerm() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.Or: - if potentialComprehension { - return p.parseSet(s, head, potentialComprehension) - } - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, potentialComprehension) - case tokens.Colon: - return p.parseObject(head, potentialComprehension) - } - - p.restore(s) - - head = p.parseTermInfixCallInList() - if head == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseSet(s, head, false) - case tokens.Colon: - // It still might be an object comprehension, eg { a+1: b | ... } - return p.parseObject(head, potentialComprehension) - } - - p.illegal("non-terminated set") - return nil -} - -func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return SetTerm(head) - case tokens.Comma: - p.scan() - if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { - return SetTerm(terms...) - } - case tokens.Or: - if potentialComprehension { - // Try to parse as if it is a set comprehension - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return SetComprehensionTerm(head, body) - } - if p.s.tok != tokens.Comma { - return nil - } - } - // Fall back to parsing as normal set definition - p.restore(s) - if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { - return SetTerm(terms...) - } - } - return nil -} - -func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { - // NOTE(tsandall): Assumption: this function is called after parsing the key - // of the head element and then receiving a colon token from the scanner. - // Advance beyond the colon and attempt to parse an object. - if p.s.tok != tokens.Colon { - panic("expected colon") - } - p.scan() - - s := p.save() - - // NOTE(sr): We first try to parse the value as a term (`v`), and see - // if we can parse `{ x: v | ...}` as a comprehension. - // However, if we encounter either a Comma or an RBace, it cannot be - // parsed as a comprehension -- so we save double work further down - // where `parseObjectFinish(k, v, false)` would only exercise the - // same code paths once more. - v := p.parseTerm() - if v == nil { - return nil - } - - potentialRelation := true - if potentialComprehension { - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - potentialRelation = false - fallthrough - case tokens.Or: - if term := p.parseObjectFinish(k, v, true); term != nil { - return term - } - } - } - - p.restore(s) - - if potentialRelation { - v := p.parseTermInfixCallInList() - if v == nil { - return nil - } - - switch p.s.tok { - case tokens.RBrace, tokens.Comma: - return p.parseObjectFinish(k, v, false) - } - } - - p.illegal("non-terminated object") - return nil -} - -func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { - switch p.s.tok { - case tokens.RBrace: - return ObjectTerm([2]*Term{key, val}) - case tokens.Or: - if potentialComprehension { - p.scan() - if body := p.parseBody(tokens.RBrace); body != nil { - return ObjectComprehensionTerm(key, val, body) - } - } else { - p.illegal("non-terminated object") - } - case tokens.Comma: - p.scan() - if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { - return ObjectTerm(r...) - } - } - return nil -} - -func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { - if p.s.tok == end { - return r - } - for { - term := p.parseTermInfixCallInList() - if term != nil { - r = append(r, term) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { - if p.s.tok == end { - return r - } - for { - key := p.parseTermInfixCallInList() - if key != nil { - switch p.s.tok { - case tokens.Colon: - p.scan() - if val := p.parseTermInfixCallInList(); val != nil { - r = append(r, [2]*Term{key, val}) - switch p.s.tok { - case end: - return r - case tokens.Comma: - p.scan() - if p.s.tok == end { - return r - } - continue - default: - p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) - return nil - } - } - default: - p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) - return nil - } - } - return nil - } -} - -func (p *Parser) parseTermOp(values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) - p.scan() - return r - } - } - return nil -} - -func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { - for i := range values { - if p.s.tok == values[i] { - for _, r := range ref { - r.SetLocation(p.s.Loc()) - } - t := RefTerm(ref...) - t.SetLocation(p.s.Loc()) - p.scan() - return t - } - } - return nil -} - -func (p *Parser) parseVar() *Term { - - s := p.s.lit - - term := VarTerm(s).SetLocation(p.s.Loc()) - - // Update wildcard values with unique identifiers - if term.Equal(Wildcard) { - term.Value = Var(p.genwildcard()) - } - - return term -} - -func (p *Parser) genwildcard() string { - c := p.s.wildcard - p.s.wildcard++ - return fmt.Sprintf("%v%d", WildcardPrefix, c) -} - -func (p *Parser) error(loc *location.Location, reason string) { - p.errorf(loc, reason) -} - -func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) { - msg := strings.Builder{} - msg.WriteString(fmt.Sprintf(f, a...)) - - switch len(p.s.hints) { - case 0: // nothing to do - case 1: - msg.WriteString(" (hint: ") - msg.WriteString(p.s.hints[0]) - msg.WriteRune(')') - default: - msg.WriteString(" (hints: ") - for i, h := range p.s.hints { - if i > 0 { - msg.WriteString(", ") - } - msg.WriteString(h) - } - msg.WriteRune(')') - } - - p.s.errors = append(p.s.errors, &Error{ - Code: ParseErr, - Message: msg.String(), - Location: loc, - Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), - }) - p.s.hints = nil -} - -func (p *Parser) hint(f string, a ...interface{}) { - p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) -} - -func (p *Parser) illegal(note string, a ...interface{}) { - tok := p.s.tok.String() - - if p.s.tok == tokens.Illegal { - p.errorf(p.s.Loc(), "illegal token") - return - } - - tokType := "token" - if tokens.IsKeyword(p.s.tok) { - tokType = "keyword" - } - if _, ok := futureKeywords[p.s.tok.String()]; ok { - tokType = "keyword" - } - - note = fmt.Sprintf(note, a...) - if len(note) > 0 { - p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) - } else { - p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) - } -} - -func (p *Parser) illegalToken() { - p.illegal("") -} - -func (p *Parser) scan() { - p.doScan(true) -} - -func (p *Parser) scanWS() { - p.doScan(false) -} - -func (p *Parser) doScan(skipws bool) { - - // NOTE(tsandall): the last position is used to compute the "text" field for - // complex AST nodes. Whitespace never affects the last position of an AST - // node so do not update it when scanning. - if p.s.tok != tokens.Whitespace { - p.s.lastEnd = p.s.tokEnd - p.s.skippedNL = false - } - - var errs []scanner.Error - for { - var pos scanner.Position - p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() - - p.s.tokEnd = pos.End - p.s.loc.Row = pos.Row - p.s.loc.Col = pos.Col - p.s.loc.Offset = pos.Offset - p.s.loc.Text = p.s.Text(pos.Offset, pos.End) - p.s.loc.Tabs = pos.Tabs - - for _, err := range errs { - p.error(p.s.Loc(), err.Message) - } - - if len(errs) > 0 { - p.s.tok = tokens.Illegal - } - - if p.s.tok == tokens.Whitespace { - if p.s.lit == "\n" { - p.s.skippedNL = true - } - if skipws { - continue - } - } - - if p.s.tok != tokens.Comment { - break - } - - // For backwards compatibility leave a nil - // Text value if there is no text rather than - // an empty string. - var commentText []byte - if len(p.s.lit) > 1 { - commentText = []byte(p.s.lit[1:]) - } - comment := NewComment(commentText) - comment.SetLoc(p.s.Loc()) - p.s.comments = append(p.s.comments, comment) - } -} - -func (p *Parser) save() *state { - cpy := *p.s - s := *cpy.s - cpy.s = &s - return &cpy -} - -func (p *Parser) restore(s *state) { - p.s = s -} - -func setLocRecursive(x interface{}, loc *location.Location) { - NewGenericVisitor(func(x interface{}) bool { - if node, ok := x.(Node); ok { - node.SetLoc(loc) - } - return false - }).Walk(x) -} - -func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { - if term != nil { - cpy := *loc - term.Location = &cpy - term.Location.Text = p.s.Text(offset, end) - } - return term -} - -func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { - if rule.Head.Value == nil { - p.error(rule.Loc(), "illegal default rule (must have a value)") - return false - } - - valid := true - vis := NewGenericVisitor(func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures - return true - case Ref, Var, Call: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) - valid = false - return true - } - return false - }) - - vis.Walk(rule.Head.Value.Value) - return valid -} - -func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { - - valid := true - vars := NewVarSet() - - vis := NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case Var: - if vars.Contains(x) { - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) - valid = false - return true - } - vars.Add(x) - - case *Term: - switch v := x.Value.(type) { - case Var: // do nothing - default: - p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", TypeName(v))) - valid = false - return true - } - } - - return false - }) - - vis.Walk(rule.Head.Args) - return valid -} - -// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', -// which isn't handled properly by json for some reason. -type rawAnnotation struct { - Scope string `yaml:"scope"` - Title string `yaml:"title"` - Entrypoint bool `yaml:"entrypoint"` - Description string `yaml:"description"` - Organizations []string `yaml:"organizations"` - RelatedResources []interface{} `yaml:"related_resources"` - Authors []interface{} `yaml:"authors"` - Schemas []rawSchemaAnnotation `yaml:"schemas"` - Custom map[string]interface{} `yaml:"custom"` -} - -type rawSchemaAnnotation map[string]interface{} - -type metadataParser struct { - buf *bytes.Buffer - comments []*Comment - loc *location.Location -} - -func newMetadataParser(loc *Location) *metadataParser { - return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} -} - -func (b *metadataParser) Append(c *Comment) { - b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) - b.buf.WriteByte('\n') - b.comments = append(b.comments, c) -} - -var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) - -func (b *metadataParser) Parse() (*Annotations, error) { - - var raw rawAnnotation - - if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { - return nil, fmt.Errorf("expected METADATA block, found whitespace") - } - - if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { - var comment *Comment - match := yamlLineErrRegex.FindStringSubmatch(err.Error()) - if len(match) == 2 { - n, err2 := strconv.Atoi(match[1]) - if err2 == nil { - index := n - 1 // line numbering is 1-based so subtract one from row - if index >= len(b.comments) { - comment = b.comments[len(b.comments)-1] - } else { - comment = b.comments[index] - } - b.loc = comment.Location - } - } - - if match == nil && len(b.comments) > 0 { - b.loc = b.comments[0].Location - } - - return nil, augmentYamlError(err, b.comments) - } - - var result Annotations - result.comments = b.comments - result.Scope = raw.Scope - result.Entrypoint = raw.Entrypoint - result.Title = raw.Title - result.Description = raw.Description - result.Organizations = raw.Organizations - - for _, v := range raw.RelatedResources { - rr, err := parseRelatedResource(v) - if err != nil { - return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) - } - result.RelatedResources = append(result.RelatedResources, rr) - } - - for _, pair := range raw.Schemas { - k, v := unwrapPair(pair) - - var a SchemaAnnotation - var err error - - a.Path, err = ParseRef(k) - if err != nil { - return nil, fmt.Errorf("invalid document reference") - } - - switch v := v.(type) { - case string: - a.Schema, err = parseSchemaRef(v) - if err != nil { - return nil, err - } - case map[interface{}]interface{}: - w, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, fmt.Errorf("invalid schema definition: %w", err) - } - a.Definition = &w - default: - return nil, fmt.Errorf("invalid schema declaration for path %q", k) - } - - result.Schemas = append(result.Schemas, &a) - } - - for _, v := range raw.Authors { - author, err := parseAuthor(v) - if err != nil { - return nil, fmt.Errorf("invalid author definition %s: %w", v, err) - } - result.Authors = append(result.Authors, author) - } - - result.Custom = make(map[string]interface{}) - for k, v := range raw.Custom { - val, err := convertYAMLMapKeyTypes(v, nil) - if err != nil { - return nil, err - } - result.Custom[k] = val - } - - result.Location = b.loc - - // recreate original text of entire metadata block for location text attribute - sb := strings.Builder{} - sb.WriteString("# METADATA\n") - - lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) - - for _, line := range lines[:len(lines)-1] { - sb.WriteString("# ") - sb.Write(line) - sb.WriteByte('\n') - } - - result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) - - return &result, nil -} - -// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise cryptic error. -// These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed to be correct. -func augmentYamlError(err error, comments []*Comment) error { - // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol - for _, comment := range comments { - txt := string(comment.Text) - parts := strings.Split(txt, ":") - if len(parts) > 1 { - parts = parts[1:] - var invalidSpaces []string - for partIndex, part := range parts { - if len(part) == 0 && partIndex == len(parts)-1 { - invalidSpaces = []string{} - break - } - - r, _ := utf8.DecodeRuneInString(part) - if r == ' ' || r == '\t' { - invalidSpaces = []string{} - break - } - - invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) - } - if len(invalidSpaces) > 0 { - err = fmt.Errorf( - "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", - err.Error(), comment.Location.Row, invalidSpaces) - } - } - } - return err -} - -func unwrapPair(pair map[string]interface{}) (string, interface{}) { - for k, v := range pair { - return k, v - } - return "", nil -} - -var errInvalidSchemaRef = fmt.Errorf("invalid schema reference") - -// NOTE(tsandall): 'schema' is not registered as a root because it's not -// supported by the compiler or evaluator today. Once we fix that, we can remove -// this function. -func parseSchemaRef(s string) (Ref, error) { - - term, err := ParseTerm(s) - if err == nil { - switch v := term.Value.(type) { - case Var: - if term.Equal(SchemaRootDocument) { - return SchemaRootRef.Copy(), nil - } - case Ref: - if v.HasPrefix(SchemaRootRef) { - return v, nil - } - } - } - - return nil, errInvalidSchemaRef -} - -func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) { - rr, err := convertYAMLMapKeyTypes(rr, nil) - if err != nil { - return nil, err - } - - switch rr := rr.(type) { - case string: - if len(rr) > 0 { - u, err := url.Parse(rr) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Ref: *u}, nil - } - return nil, fmt.Errorf("ref URL may not be empty string") - case map[string]interface{}: - description := strings.TrimSpace(getSafeString(rr, "description")) - ref := strings.TrimSpace(getSafeString(rr, "ref")) - if len(ref) > 0 { - u, err := url.Parse(ref) - if err != nil { - return nil, err - } - return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil - } - return nil, fmt.Errorf("'ref' value required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func parseAuthor(a interface{}) (*AuthorAnnotation, error) { - a, err := convertYAMLMapKeyTypes(a, nil) - if err != nil { - return nil, err - } - - switch a := a.(type) { - case string: - return parseAuthorString(a) - case map[string]interface{}: - name := strings.TrimSpace(getSafeString(a, "name")) - email := strings.TrimSpace(getSafeString(a, "email")) - if len(name) > 0 || len(email) > 0 { - return &AuthorAnnotation{name, email}, nil - } - return nil, fmt.Errorf("'name' and/or 'email' values required in object") - } - - return nil, fmt.Errorf("invalid value type, must be string or map") -} - -func getSafeString(m map[string]interface{}, k string) string { - if v, found := m[k]; found { - if s, ok := v.(string); ok { - return s - } - } - return "" -} - -const emailPrefix = "<" -const emailSuffix = ">" - -// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, -// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as -// multiple words. -func parseAuthorString(s string) (*AuthorAnnotation, error) { - parts := strings.Fields(s) - - if len(parts) == 0 { - return nil, fmt.Errorf("author is an empty string") - } - - namePartCount := len(parts) - trailing := parts[namePartCount-1] - var email string - if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && - strings.HasSuffix(trailing, emailSuffix) { - email = trailing[len(emailPrefix):] - email = email[0 : len(email)-len(emailSuffix)] - namePartCount = namePartCount - 1 - } - - name := strings.Join(parts[0:namePartCount], " ") - - return &AuthorAnnotation{Name: name, Email: email}, nil -} - -func convertYAMLMapKeyTypes(x interface{}, path []string) (interface{}, error) { - var err error - switch x := x.(type) { - case map[interface{}]interface{}: - result := make(map[string]interface{}, len(x)) - for k, v := range x { - str, ok := k.(string) - if !ok { - return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) - } - result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) - if err != nil { - return nil, err - } - } - return result, nil - case []interface{}: - for i := range x { - x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i))) - if err != nil { - return nil, err - } - } - return x, nil - default: - return x, nil - } -} - -// futureKeywords is the source of truth for future keywords that will -// eventually become standard keywords inside of Rego. -var futureKeywords = map[string]tokens.Token{ - "in": tokens.In, - "every": tokens.Every, - "contains": tokens.Contains, - "if": tokens.If, + return v1.NewParser().WithRegoVersion(DefaultRegoVersion) } func IsFutureKeyword(s string) bool { - _, ok := futureKeywords[s] - return ok -} - -func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { - path := imp.Path.Value.(Ref) - - if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`future` imports cannot be aliased") - return - } - - if p.s.s.RegoV1Compatible() { - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - kwds := make([]string, 0, len(allowedFutureKeywords)) - for k := range allowedFutureKeywords { - kwds = append(kwds, k) - } - - switch len(path) { - case 2: // all keywords imported, nothing to do - case 3: // one keyword imported - kw, ok := path[2].Value.(String) - if !ok { - p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") - return - } - keyword := string(kw) - _, ok = allowedFutureKeywords[keyword] - if !ok { - sort.Strings(kwds) // so the error message is stable - p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) - return - } - - kwds = []string{keyword} // overwrite - } - for _, kw := range kwds { - p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) - } -} - -func (p *Parser) regoV1Import(imp *Import) { - if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) { - p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) - return - } - - if p.po.RegoVersion == RegoV1 { - // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. - return - } - - path := imp.Path.Value.(Ref) - - if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { - p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) - return - } - - if imp.Alias != "" { - p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") - return - } - - // import all future keywords with the rego.v1 import - kwds := make([]string, 0, len(futureKeywords)) - for k := range futureKeywords { - kwds = append(kwds, k) - } - - if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() { - // We have imported future keywords, but they didn't come from another `rego.v1` import. - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - - p.s.s.SetRegoV1Compatible() - for _, kw := range kwds { - p.s.s.AddKeyword(kw, futureKeywords[kw]) - } + return v1.IsFutureKeywordForRegoVersion(s, RegoV0) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go index afaa1d890..3b8b40682 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/parser_ext.go @@ -1,24 +1,13 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// This file contains extra functions for parsing Rego. -// Most of the parsing is handled by the code in parser.go, -// however, there are additional utilities that are -// helpful for dealing with Rego source inputs (e.g., REPL -// statements, source files, etc.) - package ast import ( - "bytes" - "errors" "fmt" - "strings" - "unicode" - "github.com/open-policy-agent/opa/ast/internal/tokens" - astJSON "github.com/open-policy-agent/opa/ast/json" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // MustParseBody returns a parsed body. @@ -30,11 +19,7 @@ func MustParseBody(input string) Body { // MustParseBodyWithOpts returns a parsed body. // If an error occurs during parsing, panic. func MustParseBodyWithOpts(input string, opts ParserOptions) Body { - parsed, err := ParseBodyWithOpts(input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseBodyWithOpts(input, setDefaultRegoVersion(opts)) } // MustParseExpr returns a parsed expression. @@ -66,11 +51,7 @@ func MustParseModule(input string) *Module { // MustParseModuleWithOpts returns a parsed module. // If an error occurs during parsing, panic. func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { - parsed, err := ParseModuleWithOpts("", input, opts) - if err != nil { - panic(err) - } - return parsed + return v1.MustParseModuleWithOpts(input, setDefaultRegoVersion(opts)) } // MustParsePackage returns a Package. @@ -103,6 +84,10 @@ func MustParseStatement(input string) Statement { return parsed } +func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { + return v1.MustParseStatementWithOpts(input, setDefaultRegoVersion(popts)) +} + // MustParseRef returns a parsed reference. // If an error occurs during parsing, panic. func MustParseRef(input string) Ref { @@ -123,6 +108,12 @@ func MustParseRule(input string) *Rule { return parsed } +// MustParseRuleWithOpts returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { + return v1.MustParseRuleWithOpts(input, setDefaultRegoVersion(opts)) +} + // MustParseTerm returns a parsed term. // If an error occurs during parsing, panic. func MustParseTerm(input string) *Term { @@ -136,330 +127,59 @@ func MustParseTerm(input string) *Term { // ParseRuleFromBody returns a rule if the body can be interpreted as a rule // definition. Otherwise, an error is returned. func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { - - if len(body) != 1 { - return nil, fmt.Errorf("multiple expressions cannot be used for rule head") - } - - return ParseRuleFromExpr(module, body[0]) + return v1.ParseRuleFromBody(module, body) } // ParseRuleFromExpr returns a rule if the expression can be interpreted as a // rule definition. func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { - - if len(expr.With) > 0 { - return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head") - } - - if expr.Negated { - return nil, fmt.Errorf("negated expressions cannot be used for rule head") - } - - if _, ok := expr.Terms.(*SomeDecl); ok { - return nil, errors.New("'some' declarations cannot be used for rule head") - } - - if term, ok := expr.Terms.(*Term); ok { - switch v := term.Value.(type) { - case Ref: - if len(v) > 2 { // 2+ dots - return ParseCompleteDocRuleWithDotsFromTerm(module, term) - } - return ParsePartialSetDocRuleFromTerm(module, term) - default: - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(v)) - } - } - - if _, ok := expr.Terms.([]*Term); !ok { - // This is a defensive check in case other kinds of expression terms are - // introduced in the future. - return nil, errors.New("expression cannot be used for rule head") - } - - if expr.IsEquality() { - return parseCompleteRuleFromEq(module, expr) - } else if expr.IsAssignment() { - rule, err := parseCompleteRuleFromEq(module, expr) - if err != nil { - return nil, err - } - rule.Head.Assign = true - return rule, nil - } - - if _, ok := BuiltinMap[expr.Operator().String()]; ok { - return nil, fmt.Errorf("rule name conflicts with built-in function") - } - - return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) -} - -func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { - - // ensure the rule location is set to the expr location - // the helper functions called below try to set the location based - // on the terms they've been provided but that is not as accurate. - defer func() { - if rule != nil { - rule.Location = expr.Location - rule.Head.Location = expr.Location - } - }() - - lhs, rhs := expr.Operand(0), expr.Operand(1) - if lhs == nil || rhs == nil { - return nil, errors.New("assignment requires two operands") - } - - rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) - if err == nil { - return rule, nil - } - - return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + return v1.ParseRuleFromExpr(module, expr) } // ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can // be interpreted as a complete document definition declared with the assignment // operator. func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) - if err != nil { - return nil, err - } - - rule.Head.Assign = true - - return rule, nil + return v1.ParseCompleteDocRuleFromAssignmentExpr(module, lhs, rhs) } // ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a complete document definition. func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - var head *Head - - if v, ok := lhs.Value.(Var); ok { - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, lhs.Location, &lhs.jsonOptions) - } else if r, ok := lhs.Value.(Ref); ok { // groundness ? - if _, ok := r[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", r) - } - head = RefHead(r) - if len(r) > 1 && !r[len(r)-1].IsGround() { - return nil, fmt.Errorf("ref not ground") - } - } else { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(lhs.Value)) - } - head.Value = rhs - head.Location = lhs.Location - head.setJSONOptions(lhs.jsonOptions) - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - return &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - }, nil + return v1.ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) } func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { - ref, ok := term.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used for rule name", TypeName(term.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) - head.generatedValue = true - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - return &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - - jsonOptions: term.jsonOptions, - }, nil + return v1.ParseCompleteDocRuleWithDotsFromTerm(module, term) } // ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be // interpreted as a partial object document definition. func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - ref, ok := lhs.Value.(Ref) - if !ok { - return nil, fmt.Errorf("%v cannot be used as rule name", TypeName(lhs.Value)) - } - - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements - head.Name = ref[0].Value.(Var) - head.Key = ref[1] - } - head.Location = rhs.Location - head.jsonOptions = rhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: rhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: rhs.jsonOptions, - } - - return rule, nil + return v1.ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) } // ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted // as a partial set document definition. func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { - - ref, ok := term.Value.(Ref) - if !ok || len(ref) == 1 { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref) - if len(ref) == 2 { - v, ok := ref[0].Value.(Var) - if !ok { - return nil, fmt.Errorf("%vs cannot be used for rule head", TypeName(term.Value)) - } - // Modify the code to add the location to the head ref - // and set the head ref's jsonOptions. - head = VarHead(v, ref[0].Location, &ref[0].jsonOptions) - head.Key = ref[1] - } - head.Location = term.Location - head.jsonOptions = term.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) - setJSONOptions(body, &term.jsonOptions) - - rule := &Rule{ - Location: term.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: term.jsonOptions, - } - - return rule, nil + return v1.ParsePartialSetDocRuleFromTerm(module, term) } // ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a // function definition (e.g., f(x) = y => f(x) = y { true }). func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { - - call, ok := lhs.Value.(Call) - if !ok { - return nil, fmt.Errorf("must be call") - } - - ref, ok := call[0].Value.(Ref) - if !ok { - return nil, fmt.Errorf("%vs cannot be used in function signature", TypeName(call[0].Value)) - } - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - - head := RefHead(ref, rhs) - head.Location = lhs.Location - head.Args = Args(call[1:]) - head.jsonOptions = lhs.jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) - setJSONOptions(body, &rhs.jsonOptions) - - rule := &Rule{ - Location: lhs.Location, - Head: head, - Body: body, - Module: module, - jsonOptions: lhs.jsonOptions, - } - - return rule, nil + return v1.ParseRuleFromCallEqExpr(module, lhs, rhs) } // ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a // function returning true or some value (e.g., f(x) => f(x) = true { true }). func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { - - if len(terms) <= 1 { - return nil, fmt.Errorf("rule argument list must take at least one argument") - } - - loc := terms[0].Location - ref := terms[0].Value.(Ref) - if _, ok := ref[0].Value.(Var); !ok { - return nil, fmt.Errorf("invalid rule head: %v", ref) - } - head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) - head.Location = loc - head.Args = terms[1:] - head.jsonOptions = terms[0].jsonOptions - - body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) - setJSONOptions(body, &terms[0].jsonOptions) - - rule := &Rule{ - Location: loc, - Head: head, - Module: module, - Body: body, - jsonOptions: terms[0].jsonOptions, - } - return rule, nil + return v1.ParseRuleFromCallExpr(module, terms) } // ParseImports returns a slice of Import objects. func ParseImports(input string) ([]*Import, error) { - stmts, _, err := ParseStatements("", input) - if err != nil { - return nil, err - } - result := []*Import{} - for _, stmt := range stmts { - if imp, ok := stmt.(*Import); ok { - result = append(result, imp) - } else { - return nil, fmt.Errorf("expected import but got %T", stmt) - } - } - return result, nil + return v1.ParseImports(input) } // ParseModule returns a parsed Module object. @@ -473,11 +193,7 @@ func ParseModule(filename, input string) (*Module, error) { // For details on Module objects and their fields, see policy.go. // Empty input will return nil, nil. func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { - stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) - if err != nil { - return nil, err - } - return parseModule(filename, stmts, comments, popts.RegoVersion) + return v1.ParseModuleWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParseBody returns exactly one body. @@ -489,28 +205,7 @@ func ParseBody(input string) (Body, error) { // ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, // but respects whatever ParserOptions it's been given. func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { - - stmts, _, err := ParseStatementsWithOpts("", input, popts) - if err != nil { - return nil, err - } - - result := Body{} - - for _, stmt := range stmts { - switch stmt := stmt.(type) { - case Body: - for i := range stmt { - result.Append(stmt[i]) - } - case *Comment: - // skip - default: - return nil, fmt.Errorf("expected body but got %T", stmt) - } - } - - return result, nil + return v1.ParseBodyWithOpts(input, setDefaultRegoVersion(popts)) } // ParseExpr returns exactly one expression. @@ -529,15 +224,7 @@ func ParseExpr(input string) (*Expr, error) { // ParsePackage returns exactly one Package. // If multiple statements are parsed, an error is returned. func ParsePackage(input string) (*Package, error) { - stmt, err := ParseStatement(input) - if err != nil { - return nil, err - } - pkg, ok := stmt.(*Package) - if !ok { - return nil, fmt.Errorf("expected package but got %T", stmt) - } - return pkg, nil + return v1.ParsePackage(input) } // ParseTerm returns exactly one term. @@ -573,18 +260,7 @@ func ParseRef(input string) (Ref, error) { // ParseRuleWithOpts returns exactly one rule. // If multiple rules are parsed, an error is returned. func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { - stmts, _, err := ParseStatementsWithOpts("", input, opts) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) - } - rule, ok := stmts[0].(*Rule) - if !ok { - return nil, fmt.Errorf("expected rule but got %T", stmts[0]) - } - return rule, nil + return v1.ParseRuleWithOpts(input, setDefaultRegoVersion(opts)) } // ParseRule returns exactly one rule. @@ -608,6 +284,10 @@ func ParseStatement(input string) (Statement, error) { return stmts[0], nil } +func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { + return v1.ParseStatementWithOpts(input, setDefaultRegoVersion(popts)) +} + // ParseStatements is deprecated. Use ParseStatementWithOpts instead. func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { return ParseStatementsWithOpts(filename, input, ParserOptions{}) @@ -616,204 +296,15 @@ func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { // ParseStatementsWithOpts returns a slice of parsed statements. This is the // default return value from the parser. func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { - - parser := NewParser(). - WithFilename(filename). - WithReader(bytes.NewBufferString(input)). - WithProcessAnnotation(popts.ProcessAnnotation). - WithFutureKeywords(popts.FutureKeywords...). - WithAllFutureKeywords(popts.AllFutureKeywords). - WithCapabilities(popts.Capabilities). - WithSkipRules(popts.SkipRules). - WithJSONOptions(popts.JSONOptions). - WithRegoVersion(popts.RegoVersion). - withUnreleasedKeywords(popts.unreleasedKeywords) - - stmts, comments, errs := parser.Parse() - - if len(errs) > 0 { - return nil, nil, errs - } - - return stmts, comments, nil -} - -func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { - - if len(stmts) == 0 { - return nil, NewError(ParseErr, &Location{File: filename}, "empty module") - } - - var errs Errors - - pkg, ok := stmts[0].(*Package) - if !ok { - loc := stmts[0].Loc() - errs = append(errs, NewError(ParseErr, loc, "package expected")) - } - - mod := &Module{ - Package: pkg, - stmts: stmts, - } - - // The comments slice only holds comments that were not their own statements. - mod.Comments = append(mod.Comments, comments...) - mod.regoVersion = regoCompatibilityMode - - for i, stmt := range stmts[1:] { - switch stmt := stmt.(type) { - case *Import: - mod.Imports = append(mod.Imports, stmt) - if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { - mod.regoVersion = RegoV0CompatV1 - } - case *Rule: - setRuleModule(stmt, mod) - mod.Rules = append(mod.Rules, stmt) - case Body: - rule, err := ParseRuleFromBody(mod, stmt) - if err != nil { - errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error())) - continue - } - rule.generatedBody = true - mod.Rules = append(mod.Rules, rule) - - // NOTE(tsandall): the statement should now be interpreted as a - // rule so update the statement list. This is important for the - // logic below that associates annotations with statements. - stmts[i+1] = rule - case *Package: - errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) - case *Annotations: - mod.Annotations = append(mod.Annotations, stmt) - case *Comment: - // Ignore comments, they're handled above. - default: - panic("illegal value") // Indicates grammar is out-of-sync with code. - } - } - - if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { - for _, rule := range mod.Rules { - for r := rule; r != nil; r = r.Else { - errs = append(errs, CheckRegoV1(r)...) - } - } - } - - if len(errs) > 0 { - return nil, errs - } - - errs = append(errs, attachAnnotationsNodes(mod)...) - - if len(errs) > 0 { - return nil, errs - } - - attachRuleAnnotations(mod) - - return mod, nil -} - -func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { - for _, kw := range rule.Head.keywords { - if kw == keyword { - return true - } - } - return false -} - -func newScopeAttachmentErr(a *Annotations, want string) *Error { - var have string - if a.node != nil { - have = fmt.Sprintf(" (have %v)", TypeName(a.node)) - } - return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) -} - -func setRuleModule(rule *Rule, module *Module) { - rule.Module = module - if rule.Else != nil { - setRuleModule(rule.Else, module) - } -} - -func setJSONOptions(x interface{}, jsonOptions *astJSON.Options) { - vis := NewGenericVisitor(func(x interface{}) bool { - if x, ok := x.(customJSON); ok { - x.setJSONOptions(*jsonOptions) - } - return false - }) - vis.Walk(x) + return v1.ParseStatementsWithOpts(filename, input, setDefaultRegoVersion(popts)) } // ParserErrorDetail holds additional details for parser errors. -type ParserErrorDetail struct { - Line string `json:"line"` - Idx int `json:"idx"` -} - -func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { - - // Find first non-space character at or before offset position. - if offset >= len(bs) { - offset = len(bs) - 1 - } else if offset < 0 { - offset = 0 - } - - for offset > 0 && unicode.IsSpace(rune(bs[offset])) { - offset-- - } - - // Find beginning of line containing offset. - begin := offset - - for begin > 0 && !isNewLineChar(bs[begin]) { - begin-- - } - - if isNewLineChar(bs[begin]) { - begin++ - } +type ParserErrorDetail = v1.ParserErrorDetail - // Find end of line containing offset. - end := offset - - for end < len(bs) && !isNewLineChar(bs[end]) { - end++ - } - - if begin > end { - begin = end - } - - // Extract line and compute index of offset byte in line. - line := bs[begin:end] - index := offset - begin - - return &ParserErrorDetail{ - Line: string(line), - Idx: index, +func setDefaultRegoVersion(opts ParserOptions) ParserOptions { + if opts.RegoVersion == RegoUndefined { + opts.RegoVersion = DefaultRegoVersion } -} - -// Lines returns the pretty formatted line output for the error details. -func (d ParserErrorDetail) Lines() []string { - line := strings.TrimLeft(d.Line, "\t") // remove leading tabs - tabCount := len(d.Line) - len(line) - indent := d.Idx - tabCount - if indent < 0 { - indent = 0 - } - return []string{line, strings.Repeat(" ", indent) + "^"} -} - -func isNewLineChar(b byte) bool { - return b == '\r' || b == '\n' + return opts } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/policy.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/policy.go index f07cf7b37..a29f0dcc7 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/policy.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/policy.go @@ -1,182 +1,113 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. package ast import ( - "bytes" - "encoding/json" - "fmt" - "math/rand" - "strings" - "time" - - "github.com/open-policy-agent/opa/ast/internal/tokens" astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) -// Initialize seed for term hashing. This is intentionally placed before the -// root document sets are constructed to ensure they use the same hash seed as -// subsequent lookups. If the hash seeds are out of sync, lookups will fail. -var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano())) -var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32()) - // DefaultRootDocument is the default root document. // // All package directives inside source files are implicitly prefixed with the // DefaultRootDocument value. -var DefaultRootDocument = VarTerm("data") +var DefaultRootDocument = v1.DefaultRootDocument // InputRootDocument names the document containing query arguments. -var InputRootDocument = VarTerm("input") +var InputRootDocument = v1.InputRootDocument // SchemaRootDocument names the document containing external data schemas. -var SchemaRootDocument = VarTerm("schema") +var SchemaRootDocument = v1.SchemaRootDocument // FunctionArgRootDocument names the document containing function arguments. // It's only for internal usage, for referencing function arguments between // the index and topdown. -var FunctionArgRootDocument = VarTerm("args") +var FunctionArgRootDocument = v1.FunctionArgRootDocument // FutureRootDocument names the document containing new, to-become-default, // features. -var FutureRootDocument = VarTerm("future") +var FutureRootDocument = v1.FutureRootDocument // RegoRootDocument names the document containing new, to-become-default, // features in a future versioned release. -var RegoRootDocument = VarTerm("rego") +var RegoRootDocument = v1.RegoRootDocument // RootDocumentNames contains the names of top-level documents that can be // referred to in modules and queries. // // Note, the schema document is not currently implemented in the evaluator so it // is not registered as a root document name (yet). -var RootDocumentNames = NewSet( - DefaultRootDocument, - InputRootDocument, -) +var RootDocumentNames = v1.RootDocumentNames // DefaultRootRef is a reference to the root of the default document. // // All refs to data in the policy engine's storage layer are prefixed with this ref. -var DefaultRootRef = Ref{DefaultRootDocument} +var DefaultRootRef = v1.DefaultRootRef // InputRootRef is a reference to the root of the input document. // // All refs to query arguments are prefixed with this ref. -var InputRootRef = Ref{InputRootDocument} +var InputRootRef = v1.InputRootRef // SchemaRootRef is a reference to the root of the schema document. // // All refs to schema documents are prefixed with this ref. Note, the schema // document is not currently implemented in the evaluator so it is not // registered as a root document ref (yet). -var SchemaRootRef = Ref{SchemaRootDocument} +var SchemaRootRef = v1.SchemaRootRef // RootDocumentRefs contains the prefixes of top-level documents that all // non-local references start with. -var RootDocumentRefs = NewSet( - NewTerm(DefaultRootRef), - NewTerm(InputRootRef), -) +var RootDocumentRefs = v1.RootDocumentRefs // SystemDocumentKey is the name of the top-level key that identifies the system // document. -var SystemDocumentKey = String("system") +const SystemDocumentKey = v1.SystemDocumentKey // ReservedVars is the set of names that refer to implicitly ground vars. -var ReservedVars = NewVarSet( - DefaultRootDocument.Value.(Var), - InputRootDocument.Value.(Var), -) +var ReservedVars = v1.ReservedVars // Wildcard represents the wildcard variable as defined in the language. -var Wildcard = &Term{Value: Var("_")} +var Wildcard = v1.Wildcard // WildcardPrefix is the special character that all wildcard variables are // prefixed with when the statement they are contained in is parsed. -var WildcardPrefix = "$" +const WildcardPrefix = v1.WildcardPrefix // Keywords contains strings that map to language keywords. -var Keywords = KeywordsV0 +var Keywords = v1.Keywords -var KeywordsV0 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", -} +var KeywordsV0 = v1.KeywordsV0 -var KeywordsV1 = [...]string{ - "not", - "package", - "import", - "as", - "default", - "else", - "with", - "null", - "true", - "false", - "some", - "if", - "contains", - "in", - "every", +var KeywordsV1 = v1.KeywordsV1 + +func KeywordsForRegoVersion(v RegoVersion) []string { + return v1.KeywordsForRegoVersion(v) } // IsKeyword returns true if s is a language keyword. func IsKeyword(s string) bool { - for _, x := range Keywords { - if x == s { - return true - } - } - return false + return v1.IsKeyword(s) +} + +func IsInKeywords(s string, keywords []string) bool { + return v1.IsInKeywords(s, keywords) } // IsKeywordInRegoVersion returns true if s is a language keyword. func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { - switch regoVersion { - case RegoV0: - for _, x := range KeywordsV0 { - if x == s { - return true - } - } - case RegoV1, RegoV0CompatV1: - for _, x := range KeywordsV1 { - if x == s { - return true - } - } - } - - return false + return v1.IsKeywordInRegoVersion(s, regoVersion) } type ( // Node represents a node in an AST. Nodes may be statements in a policy module // or elements of an ad-hoc query, expression, etc. - Node interface { - fmt.Stringer - Loc() *Location - SetLoc(*Location) - } + Node = v1.Node // Statement represents a single statement in a policy module. - Statement interface { - Node - } + Statement = v1.Statement ) type ( @@ -184,712 +115,72 @@ type ( // Module represents a collection of policies (defined by rules) // within a namespace (defined by the package) and optional // dependencies on external documents (defined by imports). - Module struct { - Package *Package `json:"package"` - Imports []*Import `json:"imports,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - Rules []*Rule `json:"rules,omitempty"` - Comments []*Comment `json:"comments,omitempty"` - stmts []Statement - regoVersion RegoVersion - } + Module = v1.Module // Comment contains the raw text from the comment in the definition. - Comment struct { - // TODO: these fields have inconsistent JSON keys with other structs in this package. - Text []byte - Location *Location - - jsonOptions astJSON.Options - } + Comment = v1.Comment // Package represents the namespace of the documents produced // by rules inside the module. - Package struct { - Path Ref `json:"path"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Package = v1.Package // Import represents a dependency on a document outside of the policy // namespace. Imports are optional. - Import struct { - Path *Term `json:"path"` - Alias Var `json:"alias,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Import = v1.Import // Rule represents a rule as defined in the language. Rules define the // content of documents that represent policy decisions. - Rule struct { - Default bool `json:"default,omitempty"` - Head *Head `json:"head"` - Body Body `json:"body"` - Else *Rule `json:"else,omitempty"` - Location *Location `json:"location,omitempty"` - Annotations []*Annotations `json:"annotations,omitempty"` - - // Module is a pointer to the module containing this rule. If the rule - // was NOT created while parsing/constructing a module, this should be - // left unset. The pointer is not included in any standard operations - // on the rule (e.g., printing, comparison, visiting, etc.) - Module *Module `json:"-"` - - generatedBody bool - jsonOptions astJSON.Options - } + Rule = v1.Rule // Head represents the head of a rule. - Head struct { - Name Var `json:"name,omitempty"` - Reference Ref `json:"ref,omitempty"` - Args Args `json:"args,omitempty"` - Key *Term `json:"key,omitempty"` - Value *Term `json:"value,omitempty"` - Assign bool `json:"assign,omitempty"` - Location *Location `json:"location,omitempty"` - - keywords []tokens.Token - generatedValue bool - jsonOptions astJSON.Options - } + Head = v1.Head // Args represents zero or more arguments to a rule. - Args []*Term + Args = v1.Args // Body represents one or more expressions contained inside a rule or user // function. - Body []*Expr + Body = v1.Body // Expr represents a single expression contained inside the body of a rule. - Expr struct { - With []*With `json:"with,omitempty"` - Terms interface{} `json:"terms"` - Index int `json:"index"` - Generated bool `json:"generated,omitempty"` - Negated bool `json:"negated,omitempty"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - generatedFrom *Expr - generates []*Expr - } + Expr = v1.Expr // SomeDecl represents a variable declaration statement. The symbols are variables. - SomeDecl struct { - Symbols []*Term `json:"symbols"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + SomeDecl = v1.SomeDecl - Every struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Domain *Term `json:"domain"` - Body Body `json:"body"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + Every = v1.Every // With represents a modifier on an expression. - With struct { - Target *Term `json:"target"` - Value *Term `json:"value"` - Location *Location `json:"location,omitempty"` - - jsonOptions astJSON.Options - } + With = v1.With ) -// Compare returns an integer indicating whether mod is less than, equal to, -// or greater than other. -func (mod *Module) Compare(other *Module) int { - if mod == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := mod.Package.Compare(other.Package); cmp != 0 { - return cmp - } - if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { - return cmp - } - if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { - return cmp - } - return rulesCompare(mod.Rules, other.Rules) -} - -// Copy returns a deep copy of mod. -func (mod *Module) Copy() *Module { - cpy := *mod - cpy.Rules = make([]*Rule, len(mod.Rules)) - - nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) - - for i := range mod.Rules { - cpy.Rules[i] = mod.Rules[i].Copy() - cpy.Rules[i].Module = &cpy - nodes[mod.Rules[i]] = cpy.Rules[i] - } - - cpy.Imports = make([]*Import, len(mod.Imports)) - for i := range mod.Imports { - cpy.Imports[i] = mod.Imports[i].Copy() - nodes[mod.Imports[i]] = cpy.Imports[i] - } - - cpy.Package = mod.Package.Copy() - nodes[mod.Package] = cpy.Package - - cpy.Annotations = make([]*Annotations, len(mod.Annotations)) - for i, a := range mod.Annotations { - cpy.Annotations[i] = a.Copy(nodes[a.node]) - } - - cpy.Comments = make([]*Comment, len(mod.Comments)) - for i := range mod.Comments { - cpy.Comments[i] = mod.Comments[i].Copy() - } - - cpy.stmts = make([]Statement, len(mod.stmts)) - for i := range mod.stmts { - cpy.stmts[i] = nodes[mod.stmts[i]] - } - - return &cpy -} - -// Equal returns true if mod equals other. -func (mod *Module) Equal(other *Module) bool { - return mod.Compare(other) == 0 -} - -func (mod *Module) String() string { - byNode := map[Node][]*Annotations{} - for _, a := range mod.Annotations { - byNode[a.node] = append(byNode[a.node], a) - } - - appendAnnotationStrings := func(buf []string, node Node) []string { - if as, ok := byNode[node]; ok { - for i := range as { - buf = append(buf, "# METADATA") - buf = append(buf, "# "+as[i].String()) - } - } - return buf - } - - buf := []string{} - buf = appendAnnotationStrings(buf, mod.Package) - buf = append(buf, mod.Package.String()) - - if len(mod.Imports) > 0 { - buf = append(buf, "") - for _, imp := range mod.Imports { - buf = appendAnnotationStrings(buf, imp) - buf = append(buf, imp.String()) - } - } - if len(mod.Rules) > 0 { - buf = append(buf, "") - for _, rule := range mod.Rules { - buf = appendAnnotationStrings(buf, rule) - buf = append(buf, rule.String()) - } - } - return strings.Join(buf, "\n") -} - -// RuleSet returns a RuleSet containing named rules in the mod. -func (mod *Module) RuleSet(name Var) RuleSet { - rs := NewRuleSet() - for _, rule := range mod.Rules { - if rule.Head.Name.Equal(name) { - rs.Add(rule) - } - } - return rs -} - -// UnmarshalJSON parses bs and stores the result in mod. The rules in the module -// will have their module pointer set to mod. -func (mod *Module) UnmarshalJSON(bs []byte) error { - - // Declare a new type and use a type conversion to avoid recursively calling - // Module#UnmarshalJSON. - type module Module - - if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { - return err - } - - WalkRules(mod, func(rule *Rule) bool { - rule.Module = mod - return false - }) - - return nil -} - -func (mod *Module) regoV1Compatible() bool { - return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 -} - -func (mod *Module) RegoVersion() RegoVersion { - return mod.regoVersion -} - -// SetRegoVersion sets the RegoVersion for the module. -// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. -func (mod *Module) SetRegoVersion(v RegoVersion) { - mod.regoVersion = v -} - // NewComment returns a new Comment object. func NewComment(text []byte) *Comment { - return &Comment{ - Text: text, - } -} - -// Loc returns the location of the comment in the definition. -func (c *Comment) Loc() *Location { - if c == nil { - return nil - } - return c.Location -} - -// SetLoc sets the location on c. -func (c *Comment) SetLoc(loc *Location) { - c.Location = loc -} - -func (c *Comment) String() string { - return "#" + string(c.Text) -} - -// Copy returns a deep copy of c. -func (c *Comment) Copy() *Comment { - cpy := *c - cpy.Text = make([]byte, len(c.Text)) - copy(cpy.Text, c.Text) - return &cpy -} - -// Equal returns true if this comment equals the other comment. -// Unlike other equality checks on AST nodes, comment equality -// depends on location. -func (c *Comment) Equal(other *Comment) bool { - return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) -} - -func (c *Comment) setJSONOptions(opts astJSON.Options) { - // Note: this is not used for location since Comments use default JSON marshaling - // behavior with struct field names in JSON. - c.jsonOptions = opts - if c.Location != nil { - c.Location.JSONOptions = opts - } -} - -// Compare returns an integer indicating whether pkg is less than, equal to, -// or greater than other. -func (pkg *Package) Compare(other *Package) int { - return Compare(pkg.Path, other.Path) -} - -// Copy returns a deep copy of pkg. -func (pkg *Package) Copy() *Package { - cpy := *pkg - cpy.Path = pkg.Path.Copy() - return &cpy -} - -// Equal returns true if pkg is equal to other. -func (pkg *Package) Equal(other *Package) bool { - return pkg.Compare(other) == 0 -} - -// Loc returns the location of the Package in the definition. -func (pkg *Package) Loc() *Location { - if pkg == nil { - return nil - } - return pkg.Location -} - -// SetLoc sets the location on pkg. -func (pkg *Package) SetLoc(loc *Location) { - pkg.Location = loc -} - -func (pkg *Package) String() string { - if pkg == nil { - return "" - } else if len(pkg.Path) <= 1 { - return fmt.Sprintf("package ", pkg.Path) - } - // Omit head as all packages have the DefaultRootDocument prepended at parse time. - path := make(Ref, len(pkg.Path)-1) - path[0] = VarTerm(string(pkg.Path[1].Value.(String))) - copy(path[1:], pkg.Path[2:]) - return fmt.Sprintf("package %v", path) -} - -func (pkg *Package) setJSONOptions(opts astJSON.Options) { - pkg.jsonOptions = opts - if pkg.Location != nil { - pkg.Location.JSONOptions = opts - } -} - -func (pkg *Package) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": pkg.Path, - } - - if pkg.jsonOptions.MarshalOptions.IncludeLocation.Package { - if pkg.Location != nil { - data["location"] = pkg.Location - } - } - - return json.Marshal(data) + return v1.NewComment(text) } // IsValidImportPath returns an error indicating if the import path is invalid. // If the import path is valid, err is nil. func IsValidImportPath(v Value) (err error) { - switch v := v.(type) { - case Var: - if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - case Ref: - if err := IsValidImportPath(v[0].Value); err != nil { - return fmt.Errorf("invalid path %v: path must begin with input or data", v) - } - for _, e := range v[1:] { - if _, ok := e.Value.(String); !ok { - return fmt.Errorf("invalid path %v: path elements must be strings", v) - } - } - default: - return fmt.Errorf("invalid path %v: path must be ref or var", v) - } - return nil -} - -// Compare returns an integer indicating whether imp is less than, equal to, -// or greater than other. -func (imp *Import) Compare(other *Import) int { - if imp == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(imp.Path, other.Path); cmp != 0 { - return cmp - } - return Compare(imp.Alias, other.Alias) -} - -// Copy returns a deep copy of imp. -func (imp *Import) Copy() *Import { - cpy := *imp - cpy.Path = imp.Path.Copy() - return &cpy -} - -// Equal returns true if imp is equal to other. -func (imp *Import) Equal(other *Import) bool { - return imp.Compare(other) == 0 -} - -// Loc returns the location of the Import in the definition. -func (imp *Import) Loc() *Location { - if imp == nil { - return nil - } - return imp.Location -} - -// SetLoc sets the location on imp. -func (imp *Import) SetLoc(loc *Location) { - imp.Location = loc -} - -// Name returns the variable that is used to refer to the imported virtual -// document. This is the alias if defined otherwise the last element in the -// path. -func (imp *Import) Name() Var { - if len(imp.Alias) != 0 { - return imp.Alias - } - switch v := imp.Path.Value.(type) { - case Var: - return v - case Ref: - if len(v) == 1 { - return v[0].Value.(Var) - } - return Var(v[len(v)-1].Value.(String)) - } - panic("illegal import") -} - -func (imp *Import) String() string { - buf := []string{"import", imp.Path.String()} - if len(imp.Alias) > 0 { - buf = append(buf, "as "+imp.Alias.String()) - } - return strings.Join(buf, " ") -} - -func (imp *Import) setJSONOptions(opts astJSON.Options) { - imp.jsonOptions = opts - if imp.Location != nil { - imp.Location.JSONOptions = opts - } -} - -func (imp *Import) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "path": imp.Path, - } - - if len(imp.Alias) != 0 { - data["alias"] = imp.Alias - } - - if imp.jsonOptions.MarshalOptions.IncludeLocation.Import { - if imp.Location != nil { - data["location"] = imp.Location - } - } - - return json.Marshal(data) -} - -// Compare returns an integer indicating whether rule is less than, equal to, -// or greater than other. -func (rule *Rule) Compare(other *Rule) int { - if rule == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := rule.Head.Compare(other.Head); cmp != 0 { - return cmp - } - if cmp := util.Compare(rule.Default, other.Default); cmp != 0 { - return cmp - } - if cmp := rule.Body.Compare(other.Body); cmp != 0 { - return cmp - } - - if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { - return cmp - } - - return rule.Else.Compare(other.Else) -} - -// Copy returns a deep copy of rule. -func (rule *Rule) Copy() *Rule { - cpy := *rule - cpy.Head = rule.Head.Copy() - cpy.Body = rule.Body.Copy() - - cpy.Annotations = make([]*Annotations, len(rule.Annotations)) - for i, a := range rule.Annotations { - cpy.Annotations[i] = a.Copy(&cpy) - } - - if cpy.Else != nil { - cpy.Else = rule.Else.Copy() - } - return &cpy -} - -// Equal returns true if rule is equal to other. -func (rule *Rule) Equal(other *Rule) bool { - return rule.Compare(other) == 0 -} - -// Loc returns the location of the Rule in the definition. -func (rule *Rule) Loc() *Location { - if rule == nil { - return nil - } - return rule.Location -} - -// SetLoc sets the location on rule. -func (rule *Rule) SetLoc(loc *Location) { - rule.Location = loc -} - -// Path returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. -// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. -func (rule *Rule) Path() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) -} - -// Ref returns a ref referring to the document produced by this rule. If rule -// is not contained in a module, this function panics. The returned ref may -// contain variables in the last position. -func (rule *Rule) Ref() Ref { - if rule.Module == nil { - panic("assertion failed") - } - return rule.Module.Package.Path.Extend(rule.Head.Ref()) -} - -func (rule *Rule) String() string { - buf := []string{} - if rule.Default { - buf = append(buf, "default") - } - buf = append(buf, rule.Head.String()) - if !rule.Default { - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - } - if rule.Else != nil { - buf = append(buf, rule.Else.elseString()) - } - return strings.Join(buf, " ") -} - -func (rule *Rule) isFunction() bool { - return len(rule.Head.Args) > 0 -} - -func (rule *Rule) setJSONOptions(opts astJSON.Options) { - rule.jsonOptions = opts - if rule.Location != nil { - rule.Location.JSONOptions = opts - } -} - -func (rule *Rule) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "head": rule.Head, - "body": rule.Body, - } - - if rule.Default { - data["default"] = true - } - - if rule.Else != nil { - data["else"] = rule.Else - } - - if rule.jsonOptions.MarshalOptions.IncludeLocation.Rule { - if rule.Location != nil { - data["location"] = rule.Location - } - } - - if len(rule.Annotations) != 0 { - data["annotations"] = rule.Annotations - } - - return json.Marshal(data) -} - -func (rule *Rule) elseString() string { - var buf []string - - buf = append(buf, "else") - - value := rule.Head.Value - if value != nil { - buf = append(buf, "=") - buf = append(buf, value.String()) - } - - buf = append(buf, "{") - buf = append(buf, rule.Body.String()) - buf = append(buf, "}") - - if rule.Else != nil { - buf = append(buf, rule.Else.elseString()) - } - - return strings.Join(buf, " ") + return v1.IsValidImportPath(v) } // NewHead returns a new Head object. If args are provided, the first will be // used for the key and the second will be used for the value. func NewHead(name Var, args ...*Term) *Head { - head := &Head{ - Name: name, // backcompat - Reference: []*Term{NewTerm(name)}, - } - if len(args) == 0 { - return head - } - head.Key = args[0] - if len(args) == 1 { - return head - } - head.Value = args[1] - if head.Key != nil && head.Value != nil { - head.Reference = head.Reference.Append(args[0]) - } - return head + return v1.NewHead(name, args...) } // VarHead creates a head object, initializes its Name, Location, and Options, // and returns the new head. func VarHead(name Var, location *Location, jsonOpts *astJSON.Options) *Head { - h := NewHead(name) - h.Reference[0].Location = location - if jsonOpts != nil { - h.Reference[0].setJSONOptions(*jsonOpts) - } - return h + return v1.VarHead(name, location, jsonOpts) } // RefHead returns a new Head object with the passed Ref. If args are provided, // the first will be used for the value. func RefHead(ref Ref, args ...*Term) *Head { - head := &Head{} - head.SetRef(ref) - if len(ref) < 2 { - head.Name = ref[0].Value.(Var) - } - if len(args) >= 1 { - head.Value = args[0] - } - return head + return v1.RefHead(ref, args...) } // DocKind represents the collection of document types that can be produced by rules. @@ -897,1152 +188,48 @@ type DocKind int const ( // CompleteDoc represents a document that is completely defined by the rule. - CompleteDoc = iota + CompleteDoc = v1.CompleteDoc // PartialSetDoc represents a set document that is partially defined by the rule. - PartialSetDoc + PartialSetDoc = v1.PartialSetDoc // PartialObjectDoc represents an object document that is partially defined by the rule. - PartialObjectDoc -) // TODO(sr): Deprecate? - -// DocKind returns the type of document produced by this rule. -func (head *Head) DocKind() DocKind { - if head.Key != nil { - if head.Value != nil { - return PartialObjectDoc - } - return PartialSetDoc - } - return CompleteDoc -} + PartialObjectDoc = v1.PartialObjectDoc +) -type RuleKind int +type RuleKind = v1.RuleKind const ( - SingleValue = iota - MultiValue + SingleValue = v1.SingleValue + MultiValue = v1.MultiValue ) -// RuleKind returns the type of rule this is -func (head *Head) RuleKind() RuleKind { - // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs - // multi value, but as good a spot as to assert the invariant. - switch { - case head.Value != nil: - return SingleValue - case head.Key != nil: - return MultiValue - default: - panic("unreachable") - } -} - -// Ref returns the Ref of the rule. If it doesn't have one, it's filled in -// via the Head's Name. -func (head *Head) Ref() Ref { - if len(head.Reference) > 0 { - return head.Reference - } - return Ref{&Term{Value: head.Name}} -} - -// SetRef can be used to set a rule head's Reference -func (head *Head) SetRef(r Ref) { - head.Reference = r -} - -// Compare returns an integer indicating whether head is less than, equal to, -// or greater than other. -func (head *Head) Compare(other *Head) int { - if head == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if head.Assign && !other.Assign { - return -1 - } else if !head.Assign && other.Assign { - return 1 - } - if cmp := Compare(head.Args, other.Args); cmp != 0 { - return cmp - } - if cmp := Compare(head.Reference, other.Reference); cmp != 0 { - return cmp - } - if cmp := Compare(head.Name, other.Name); cmp != 0 { - return cmp - } - if cmp := Compare(head.Key, other.Key); cmp != 0 { - return cmp - } - return Compare(head.Value, other.Value) -} - -// Copy returns a deep copy of head. -func (head *Head) Copy() *Head { - cpy := *head - cpy.Reference = head.Reference.Copy() - cpy.Args = head.Args.Copy() - cpy.Key = head.Key.Copy() - cpy.Value = head.Value.Copy() - cpy.keywords = nil - return &cpy -} - -// Equal returns true if this head equals other. -func (head *Head) Equal(other *Head) bool { - return head.Compare(other) == 0 -} - -func (head *Head) String() string { - buf := strings.Builder{} - buf.WriteString(head.Ref().String()) - - switch { - case len(head.Args) != 0: - buf.WriteString(head.Args.String()) - case len(head.Reference) == 1 && head.Key != nil: - buf.WriteRune('[') - buf.WriteString(head.Key.String()) - buf.WriteRune(']') - } - if head.Value != nil { - if head.Assign { - buf.WriteString(" := ") - } else { - buf.WriteString(" = ") - } - buf.WriteString(head.Value.String()) - } else if head.Name == "" && head.Key != nil { - buf.WriteString(" contains ") - buf.WriteString(head.Key.String()) - } - return buf.String() -} - -func (head *Head) setJSONOptions(opts astJSON.Options) { - head.jsonOptions = opts - if head.Location != nil { - head.Location.JSONOptions = opts - } -} - -func (head *Head) MarshalJSON() ([]byte, error) { - var loc *Location - includeLoc := head.jsonOptions.MarshalOptions.IncludeLocation - if includeLoc.Head { - if head.Location != nil { - loc = head.Location - } - - for _, term := range head.Reference { - if term.Location != nil { - term.jsonOptions.MarshalOptions.IncludeLocation.Term = includeLoc.Term - } - } - } - - // NOTE(sr): we do this to override the rendering of `head.Reference`. - // It's still what'll be used via the default means of encoding/json - // for unmarshaling a json object into a Head struct! - type h Head - return json.Marshal(struct { - h - Ref Ref `json:"ref"` - Location *Location `json:"location,omitempty"` - }{ - h: h(*head), - Ref: head.Ref(), - Location: loc, - }) -} - -// Vars returns a set of vars found in the head. -func (head *Head) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - // TODO: improve test coverage for this. - if head.Args != nil { - vis.Walk(head.Args) - } - if head.Key != nil { - vis.Walk(head.Key) - } - if head.Value != nil { - vis.Walk(head.Value) - } - if len(head.Reference) > 0 { - vis.Walk(head.Reference[1:]) - } - return vis.vars -} - -// Loc returns the Location of head. -func (head *Head) Loc() *Location { - if head == nil { - return nil - } - return head.Location -} - -// SetLoc sets the location on head. -func (head *Head) SetLoc(loc *Location) { - head.Location = loc -} - -func (head *Head) HasDynamicRef() bool { - pos := head.Reference.Dynamic() - // Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule. - return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue) -} - -// Copy returns a deep copy of a. -func (a Args) Copy() Args { - cpy := Args{} - for _, t := range a { - cpy = append(cpy, t.Copy()) - } - return cpy -} - -func (a Args) String() string { - buf := make([]string, 0, len(a)) - for _, t := range a { - buf = append(buf, t.String()) - } - return "(" + strings.Join(buf, ", ") + ")" -} - -// Loc returns the Location of a. -func (a Args) Loc() *Location { - if len(a) == 0 { - return nil - } - return a[0].Location -} - -// SetLoc sets the location on a. -func (a Args) SetLoc(loc *Location) { - if len(a) != 0 { - a[0].SetLocation(loc) - } -} - -// Vars returns a set of vars that appear in a. -func (a Args) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(a) - return vis.vars -} - // NewBody returns a new Body containing the given expressions. The indices of // the immediate expressions will be reset. func NewBody(exprs ...*Expr) Body { - for i, expr := range exprs { - expr.Index = i - } - return Body(exprs) -} - -// MarshalJSON returns JSON encoded bytes representing body. -func (body Body) MarshalJSON() ([]byte, error) { - // Serialize empty Body to empty array. This handles both the empty case and the - // nil case (whereas by default the result would be null if body was nil.) - if len(body) == 0 { - return []byte(`[]`), nil - } - ret, err := json.Marshal([]*Expr(body)) - return ret, err -} - -// Append adds the expr to the body and updates the expr's index accordingly. -func (body *Body) Append(expr *Expr) { - n := len(*body) - expr.Index = n - *body = append(*body, expr) -} - -// Set sets the expr in the body at the specified position and updates the -// expr's index accordingly. -func (body Body) Set(expr *Expr, pos int) { - body[pos] = expr - expr.Index = pos -} - -// Compare returns an integer indicating whether body is less than, equal to, -// or greater than other. -// -// If body is a subset of other, it is considered less than (and vice versa). -func (body Body) Compare(other Body) int { - minLen := len(body) - if len(other) < minLen { - minLen = len(other) - } - for i := 0; i < minLen; i++ { - if cmp := body[i].Compare(other[i]); cmp != 0 { - return cmp - } - } - if len(body) < len(other) { - return -1 - } - if len(other) < len(body) { - return 1 - } - return 0 -} - -// Copy returns a deep copy of body. -func (body Body) Copy() Body { - cpy := make(Body, len(body)) - for i := range body { - cpy[i] = body[i].Copy() - } - return cpy -} - -// Contains returns true if this body contains the given expression. -func (body Body) Contains(x *Expr) bool { - for _, e := range body { - if e.Equal(x) { - return true - } - } - return false -} - -// Equal returns true if this Body is equal to the other Body. -func (body Body) Equal(other Body) bool { - return body.Compare(other) == 0 -} - -// Hash returns the hash code for the Body. -func (body Body) Hash() int { - s := 0 - for _, e := range body { - s += e.Hash() - } - return s -} - -// IsGround returns true if all of the expressions in the Body are ground. -func (body Body) IsGround() bool { - for _, e := range body { - if !e.IsGround() { - return false - } - } - return true -} - -// Loc returns the location of the Body in the definition. -func (body Body) Loc() *Location { - if len(body) == 0 { - return nil - } - return body[0].Location -} - -// SetLoc sets the location on body. -func (body Body) SetLoc(loc *Location) { - if len(body) != 0 { - body[0].SetLocation(loc) - } -} - -func (body Body) String() string { - buf := make([]string, 0, len(body)) - for _, v := range body { - buf = append(buf, v.String()) - } - return strings.Join(buf, "; ") -} - -// Vars returns a VarSet containing variables in body. The params can be set to -// control which vars are included. -func (body Body) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(body) - return vis.Vars() + return v1.NewBody(exprs...) } // NewExpr returns a new Expr object. func NewExpr(terms interface{}) *Expr { - switch terms.(type) { - case *SomeDecl, *Every, *Term, []*Term: // ok - default: - panic("unreachable") - } - return &Expr{ - Negated: false, - Terms: terms, - Index: 0, - With: nil, - } -} - -// Complement returns a copy of this expression with the negation flag flipped. -func (expr *Expr) Complement() *Expr { - cpy := *expr - cpy.Negated = !cpy.Negated - return &cpy -} - -// Equal returns true if this Expr equals the other Expr. -func (expr *Expr) Equal(other *Expr) bool { - return expr.Compare(other) == 0 -} - -// Compare returns an integer indicating whether expr is less than, equal to, -// or greater than other. -// -// Expressions are compared as follows: -// -// 1. Declarations are always less than other expressions. -// 2. Preceding expression (by Index) is always less than the other expression. -// 3. Non-negated expressions are always less than negated expressions. -// 4. Single term expressions are always less than built-in expressions. -// -// Otherwise, the expression terms are compared normally. If both expressions -// have the same terms, the modifiers are compared. -func (expr *Expr) Compare(other *Expr) int { - - if expr == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - - o1 := expr.sortOrder() - o2 := other.sortOrder() - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - - switch { - case expr.Index < other.Index: - return -1 - case expr.Index > other.Index: - return 1 - } - - switch { - case expr.Negated && !other.Negated: - return 1 - case !expr.Negated && other.Negated: - return -1 - } - - switch t := expr.Terms.(type) { - case *Term: - if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { - return cmp - } - case []*Term: - if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { - return cmp - } - case *SomeDecl: - if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { - return cmp - } - case *Every: - if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { - return cmp - } - } - - return withSliceCompare(expr.With, other.With) -} - -func (expr *Expr) sortOrder() int { - switch expr.Terms.(type) { - case *SomeDecl: - return 0 - case *Term: - return 1 - case []*Term: - return 2 - case *Every: - return 3 - } - return -1 -} - -// CopyWithoutTerms returns a deep copy of expr without its Terms -func (expr *Expr) CopyWithoutTerms() *Expr { - cpy := *expr - - cpy.With = make([]*With, len(expr.With)) - for i := range expr.With { - cpy.With[i] = expr.With[i].Copy() - } - - return &cpy -} - -// Copy returns a deep copy of expr. -func (expr *Expr) Copy() *Expr { - - cpy := expr.CopyWithoutTerms() - - switch ts := expr.Terms.(type) { - case *SomeDecl: - cpy.Terms = ts.Copy() - case []*Term: - cpyTs := make([]*Term, len(ts)) - for i := range ts { - cpyTs[i] = ts[i].Copy() - } - cpy.Terms = cpyTs - case *Term: - cpy.Terms = ts.Copy() - case *Every: - cpy.Terms = ts.Copy() - } - - return cpy -} - -// Hash returns the hash code of the Expr. -func (expr *Expr) Hash() int { - s := expr.Index - switch ts := expr.Terms.(type) { - case *SomeDecl: - s += ts.Hash() - case []*Term: - for _, t := range ts { - s += t.Value.Hash() - } - case *Term: - s += ts.Value.Hash() - } - if expr.Negated { - s++ - } - for _, w := range expr.With { - s += w.Hash() - } - return s -} - -// IncludeWith returns a copy of expr with the with modifier appended. -func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { - cpy := *expr - cpy.With = append(cpy.With, &With{Target: target, Value: value}) - return &cpy -} - -// NoWith returns a copy of expr where the with modifier has been removed. -func (expr *Expr) NoWith() *Expr { - cpy := *expr - cpy.With = nil - return &cpy -} - -// IsEquality returns true if this is an equality expression. -func (expr *Expr) IsEquality() bool { - return isGlobalBuiltin(expr, Var(Equality.Name)) -} - -// IsAssignment returns true if this an assignment expression. -func (expr *Expr) IsAssignment() bool { - return isGlobalBuiltin(expr, Var(Assign.Name)) -} - -// IsCall returns true if this expression calls a function. -func (expr *Expr) IsCall() bool { - _, ok := expr.Terms.([]*Term) - return ok -} - -// IsEvery returns true if this expression is an 'every' expression. -func (expr *Expr) IsEvery() bool { - _, ok := expr.Terms.(*Every) - return ok -} - -// IsSome returns true if this expression is a 'some' expression. -func (expr *Expr) IsSome() bool { - _, ok := expr.Terms.(*SomeDecl) - return ok -} - -// Operator returns the name of the function or built-in this expression refers -// to. If this expression is not a function call, returns nil. -func (expr *Expr) Operator() Ref { - op := expr.OperatorTerm() - if op == nil { - return nil - } - return op.Value.(Ref) -} - -// OperatorTerm returns the name of the function or built-in this expression -// refers to. If this expression is not a function call, returns nil. -func (expr *Expr) OperatorTerm() *Term { - terms, ok := expr.Terms.([]*Term) - if !ok || len(terms) == 0 { - return nil - } - return terms[0] -} - -// Operand returns the term at the zero-based pos. If the expr does not include -// at least pos+1 terms, this function returns nil. -func (expr *Expr) Operand(pos int) *Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - idx := pos + 1 - if idx < len(terms) { - return terms[idx] - } - return nil -} - -// Operands returns the built-in function operands. -func (expr *Expr) Operands() []*Term { - terms, ok := expr.Terms.([]*Term) - if !ok { - return nil - } - return terms[1:] -} - -// IsGround returns true if all of the expression terms are ground. -func (expr *Expr) IsGround() bool { - switch ts := expr.Terms.(type) { - case []*Term: - for _, t := range ts[1:] { - if !t.IsGround() { - return false - } - } - case *Term: - return ts.IsGround() - } - return true -} - -// SetOperator sets the expr's operator and returns the expr itself. If expr is -// not a call expr, this function will panic. -func (expr *Expr) SetOperator(term *Term) *Expr { - expr.Terms.([]*Term)[0] = term - return expr -} - -// SetLocation sets the expr's location and returns the expr itself. -func (expr *Expr) SetLocation(loc *Location) *Expr { - expr.Location = loc - return expr -} - -// Loc returns the Location of expr. -func (expr *Expr) Loc() *Location { - if expr == nil { - return nil - } - return expr.Location -} - -// SetLoc sets the location on expr. -func (expr *Expr) SetLoc(loc *Location) { - expr.SetLocation(loc) -} - -func (expr *Expr) String() string { - buf := make([]string, 0, 2+len(expr.With)) - if expr.Negated { - buf = append(buf, "not") - } - switch t := expr.Terms.(type) { - case []*Term: - if expr.IsEquality() && validEqAssignArgCount(expr) { - buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) - } else { - buf = append(buf, Call(t).String()) - } - case fmt.Stringer: - buf = append(buf, t.String()) - } - - for i := range expr.With { - buf = append(buf, expr.With[i].String()) - } - - return strings.Join(buf, " ") -} - -func (expr *Expr) setJSONOptions(opts astJSON.Options) { - expr.jsonOptions = opts - if expr.Location != nil { - expr.Location.JSONOptions = opts - } -} - -func (expr *Expr) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "terms": expr.Terms, - "index": expr.Index, - } - - if len(expr.With) > 0 { - data["with"] = expr.With - } - - if expr.Generated { - data["generated"] = true - } - - if expr.Negated { - data["negated"] = true - } - - if expr.jsonOptions.MarshalOptions.IncludeLocation.Expr { - if expr.Location != nil { - data["location"] = expr.Location - } - } - - return json.Marshal(data) -} - -// UnmarshalJSON parses the byte array and stores the result in expr. -func (expr *Expr) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - return unmarshalExpr(expr, v) -} - -// Vars returns a VarSet containing variables in expr. The params can be set to -// control which vars are included. -func (expr *Expr) Vars(params VarVisitorParams) VarSet { - vis := NewVarVisitor().WithParams(params) - vis.Walk(expr) - return vis.Vars() + return v1.NewExpr(terms) } // NewBuiltinExpr creates a new Expr object with the supplied terms. // The builtin operator must be the first term. func NewBuiltinExpr(terms ...*Term) *Expr { - return &Expr{Terms: terms} -} - -func (expr *Expr) CogeneratedExprs() []*Expr { - visited := map[*Expr]struct{}{} - visitCogeneratedExprs(expr, func(e *Expr) bool { - if expr.Equal(e) { - return true - } - if _, ok := visited[e]; ok { - return true - } - visited[e] = struct{}{} - return false - }) - - result := make([]*Expr, 0, len(visited)) - for e := range visited { - result = append(result, e) - } - return result -} - -func (expr *Expr) BaseCogeneratedExpr() *Expr { - if expr.generatedFrom == nil { - return expr - } - return expr.generatedFrom.BaseCogeneratedExpr() -} - -func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { - if parent := expr.generatedFrom; parent != nil { - if stop := f(parent); !stop { - visitCogeneratedExprs(parent, f) - } - } - for _, child := range expr.generates { - if stop := f(child); !stop { - visitCogeneratedExprs(child, f) - } - } -} - -func (d *SomeDecl) String() string { - if call, ok := d.Symbols[0].Value.(Call); ok { - if len(call) == 4 { - return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() - } - return "some " + call[1].String() + " in " + call[2].String() - } - buf := make([]string, len(d.Symbols)) - for i := range buf { - buf[i] = d.Symbols[i].String() - } - return "some " + strings.Join(buf, ", ") -} - -// SetLoc sets the Location on d. -func (d *SomeDecl) SetLoc(loc *Location) { - d.Location = loc -} - -// Loc returns the Location of d. -func (d *SomeDecl) Loc() *Location { - return d.Location -} - -// Copy returns a deep copy of d. -func (d *SomeDecl) Copy() *SomeDecl { - cpy := *d - cpy.Symbols = termSliceCopy(d.Symbols) - return &cpy -} - -// Compare returns an integer indicating whether d is less than, equal to, or -// greater than other. -func (d *SomeDecl) Compare(other *SomeDecl) int { - return termSliceCompare(d.Symbols, other.Symbols) -} - -// Hash returns a hash code of d. -func (d *SomeDecl) Hash() int { - return termSliceHash(d.Symbols) -} - -func (d *SomeDecl) setJSONOptions(opts astJSON.Options) { - d.jsonOptions = opts - if d.Location != nil { - d.Location.JSONOptions = opts - } -} - -func (d *SomeDecl) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "symbols": d.Symbols, - } - - if d.jsonOptions.MarshalOptions.IncludeLocation.SomeDecl { - if d.Location != nil { - data["location"] = d.Location - } - } - - return json.Marshal(data) -} - -func (q *Every) String() string { - if q.Key != nil { - return fmt.Sprintf("every %s, %s in %s { %s }", - q.Key, - q.Value, - q.Domain, - q.Body) - } - return fmt.Sprintf("every %s in %s { %s }", - q.Value, - q.Domain, - q.Body) -} - -func (q *Every) Loc() *Location { - return q.Location -} - -func (q *Every) SetLoc(l *Location) { - q.Location = l -} - -// Copy returns a deep copy of d. -func (q *Every) Copy() *Every { - cpy := *q - cpy.Key = q.Key.Copy() - cpy.Value = q.Value.Copy() - cpy.Domain = q.Domain.Copy() - cpy.Body = q.Body.Copy() - return &cpy -} - -func (q *Every) Compare(other *Every) int { - for _, terms := range [][2]*Term{ - {q.Key, other.Key}, - {q.Value, other.Value}, - {q.Domain, other.Domain}, - } { - if d := Compare(terms[0], terms[1]); d != 0 { - return d - } - } - return q.Body.Compare(other.Body) -} - -// KeyValueVars returns the key and val arguments of an `every` -// expression, if they are non-nil and not wildcards. -func (q *Every) KeyValueVars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - if q.Key != nil { - vis.Walk(q.Key) - } - vis.Walk(q.Value) - return vis.vars -} - -func (q *Every) setJSONOptions(opts astJSON.Options) { - q.jsonOptions = opts - if q.Location != nil { - q.Location.JSONOptions = opts - } -} - -func (q *Every) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "key": q.Key, - "value": q.Value, - "domain": q.Domain, - "body": q.Body, - } - - if q.jsonOptions.MarshalOptions.IncludeLocation.Every { - if q.Location != nil { - data["location"] = q.Location - } - } - - return json.Marshal(data) -} - -func (w *With) String() string { - return "with " + w.Target.String() + " as " + w.Value.String() -} - -// Equal returns true if this With is equals the other With. -func (w *With) Equal(other *With) bool { - return Compare(w, other) == 0 -} - -// Compare returns an integer indicating whether w is less than, equal to, or -// greater than other. -func (w *With) Compare(other *With) int { - if w == nil { - if other == nil { - return 0 - } - return -1 - } else if other == nil { - return 1 - } - if cmp := Compare(w.Target, other.Target); cmp != 0 { - return cmp - } - return Compare(w.Value, other.Value) -} - -// Copy returns a deep copy of w. -func (w *With) Copy() *With { - cpy := *w - cpy.Value = w.Value.Copy() - cpy.Target = w.Target.Copy() - return &cpy -} - -// Hash returns the hash code of the With. -func (w With) Hash() int { - return w.Target.Hash() + w.Value.Hash() -} - -// SetLocation sets the location on w. -func (w *With) SetLocation(loc *Location) *With { - w.Location = loc - return w -} - -// Loc returns the Location of w. -func (w *With) Loc() *Location { - if w == nil { - return nil - } - return w.Location -} - -// SetLoc sets the location on w. -func (w *With) SetLoc(loc *Location) { - w.Location = loc -} - -func (w *With) setJSONOptions(opts astJSON.Options) { - w.jsonOptions = opts - if w.Location != nil { - w.Location.JSONOptions = opts - } -} - -func (w *With) MarshalJSON() ([]byte, error) { - data := map[string]interface{}{ - "target": w.Target, - "value": w.Value, - } - - if w.jsonOptions.MarshalOptions.IncludeLocation.With { - if w.Location != nil { - data["location"] = w.Location - } - } - - return json.Marshal(data) + return v1.NewBuiltinExpr(terms...) } // Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. func Copy(x interface{}) interface{} { - switch x := x.(type) { - case *Module: - return x.Copy() - case *Package: - return x.Copy() - case *Import: - return x.Copy() - case *Rule: - return x.Copy() - case *Head: - return x.Copy() - case Args: - return x.Copy() - case Body: - return x.Copy() - case *Expr: - return x.Copy() - case *With: - return x.Copy() - case *SomeDecl: - return x.Copy() - case *Every: - return x.Copy() - case *Term: - return x.Copy() - case *ArrayComprehension: - return x.Copy() - case *SetComprehension: - return x.Copy() - case *ObjectComprehension: - return x.Copy() - case Set: - return x.Copy() - case *object: - return x.Copy() - case *Array: - return x.Copy() - case Ref: - return x.Copy() - case Call: - return x.Copy() - case *Comment: - return x.Copy() - } - return x + return v1.Copy(x) } // RuleSet represents a collection of rules that produce a virtual document. -type RuleSet []*Rule +type RuleSet = v1.RuleSet // NewRuleSet returns a new RuleSet containing the given rules. func NewRuleSet(rules ...*Rule) RuleSet { - rs := make(RuleSet, 0, len(rules)) - for _, rule := range rules { - rs.Add(rule) - } - return rs -} - -// Add inserts the rule into rs. -func (rs *RuleSet) Add(rule *Rule) { - for _, exist := range *rs { - if exist.Equal(rule) { - return - } - } - *rs = append(*rs, rule) -} - -// Contains returns true if rs contains rule. -func (rs RuleSet) Contains(rule *Rule) bool { - for i := range rs { - if rs[i].Equal(rule) { - return true - } - } - return false -} - -// Diff returns a new RuleSet containing rules in rs that are not in other. -func (rs RuleSet) Diff(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - if !other.Contains(rs[i]) { - result.Add(rs[i]) - } - } - return result -} - -// Equal returns true if rs equals other. -func (rs RuleSet) Equal(other RuleSet) bool { - return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 -} - -// Merge returns a ruleset containing the union of rules from rs an other. -func (rs RuleSet) Merge(other RuleSet) RuleSet { - result := NewRuleSet() - for i := range rs { - result.Add(rs[i]) - } - for i := range other { - result.Add(other[i]) - } - return result -} - -func (rs RuleSet) String() string { - buf := make([]string, 0, len(rs)) - for _, rule := range rs { - buf = append(buf, rule.String()) - } - return "{" + strings.Join(buf, ", ") + "}" -} - -// Returns true if the equality or assignment expression referred to by expr -// has a valid number of arguments. -func validEqAssignArgCount(expr *Expr) bool { - return len(expr.Operands()) == 2 -} - -// this function checks if the expr refers to a non-namespaced (global) built-in -// function like eq, gt, plus, etc. -func isGlobalBuiltin(expr *Expr, name Var) bool { - terms, ok := expr.Terms.([]*Term) - if !ok { - return false - } - - // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid - // allocation here. - ref, ok := terms[0].Value.(Ref) - if !ok || len(ref) != 1 { - return false - } - if head, ok := ref[0].Value.(Var); ok { - return head.Equal(name) - } - return false + return v1.NewRuleSet(rules...) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/pretty.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/pretty.go index b4f05ad50..f2b8104e0 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/pretty.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/pretty.go @@ -5,78 +5,14 @@ package ast import ( - "fmt" "io" - "strings" + + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Pretty writes a pretty representation of the AST rooted at x to w. // // This is function is intended for debug purposes when inspecting ASTs. func Pretty(w io.Writer, x interface{}) { - pp := &prettyPrinter{ - depth: -1, - w: w, - } - NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) -} - -type prettyPrinter struct { - depth int - w io.Writer -} - -func (pp *prettyPrinter) Before(x interface{}) bool { - switch x.(type) { - case *Term: - default: - pp.depth++ - } - - switch x := x.(type) { - case *Term: - return false - case Args: - if len(x) == 0 { - return false - } - pp.writeType(x) - case *Expr: - extras := []string{} - if x.Negated { - extras = append(extras, "negated") - } - extras = append(extras, fmt.Sprintf("index=%d", x.Index)) - pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) - case Null, Boolean, Number, String, Var: - pp.writeValue(x) - default: - pp.writeType(x) - } - return false -} - -func (pp *prettyPrinter) After(x interface{}) { - switch x.(type) { - case *Term: - default: - pp.depth-- - } -} - -func (pp *prettyPrinter) writeValue(x interface{}) { - pp.writeIndent(fmt.Sprint(x)) -} - -func (pp *prettyPrinter) writeType(x interface{}) { - pp.writeIndent(TypeName(x)) -} - -func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { - pad := strings.Repeat(" ", pp.depth) - pp.write(pad+f, a...) -} - -func (pp *prettyPrinter) write(f string, a ...interface{}) { - fmt.Fprintf(pp.w, f+"\n", a...) + v1.Pretty(w, x) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/schema.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/schema.go index 8c96ac624..979958a3c 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/schema.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/schema.go @@ -5,59 +5,13 @@ package ast import ( - "fmt" - - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // SchemaSet holds a map from a path to a schema. -type SchemaSet struct { - m *util.HashMap -} +type SchemaSet = v1.SchemaSet // NewSchemaSet returns an empty SchemaSet. func NewSchemaSet() *SchemaSet { - - eqFunc := func(a, b util.T) bool { - return a.(Ref).Equal(b.(Ref)) - } - - hashFunc := func(x util.T) int { return x.(Ref).Hash() } - - return &SchemaSet{ - m: util.NewHashMap(eqFunc, hashFunc), - } -} - -// Put inserts a raw schema into the set. -func (ss *SchemaSet) Put(path Ref, raw interface{}) { - ss.m.Put(path, raw) -} - -// Get returns the raw schema identified by the path. -func (ss *SchemaSet) Get(path Ref) interface{} { - if ss == nil { - return nil - } - x, ok := ss.m.Get(path) - if !ok { - return nil - } - return x -} - -func loadSchema(raw interface{}, allowNet []string) (types.Type, error) { - - jsonSchema, err := compileSchema(raw, allowNet) - if err != nil { - return nil, err - } - - tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) - if err != nil { - return nil, fmt.Errorf("type checking: %w", err) - } - - return tpe, nil + return v1.NewSchemaSet() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/strings.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/strings.go index e489f6977..ef9354bf7 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/strings.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/strings.go @@ -5,14 +5,10 @@ package ast import ( - "reflect" - "strings" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // TypeName returns a human readable name for the AST element type. func TypeName(x interface{}) string { - if _, ok := x.(*lazyObj); ok { - return "object" - } - return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) + return v1.TypeName(x) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/term.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/term.go index 4664bc5da..a5d146ea2 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/term.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/term.go @@ -1,40 +1,22 @@ -// Copyright 2016 The OPA Authors. All rights reserved. +// Copyright 2024 The OPA Authors. All rights reserved. // Use of this source code is governed by an Apache2 // license that can be found in the LICENSE file. -// nolint: deadcode // Public API. package ast import ( - "bytes" "encoding/json" - "errors" - "fmt" "io" - "math" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "sync" - - "github.com/OneOfOne/xxhash" - - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/ast/location" - "github.com/open-policy-agent/opa/util" -) -var errFindNotFound = fmt.Errorf("find: not found") + v1 "github.com/open-policy-agent/opa/v1/ast" +) // Location records a position in source code. -type Location = location.Location +type Location = v1.Location // NewLocation returns a new Location object. func NewLocation(text []byte, file string, row int, col int) *Location { - return location.NewLocation(text, file, row, col) + return v1.NewLocation(text, file, row, col) } // Value declares the common interface for all Term values. Every kind of Term value @@ -45,226 +27,58 @@ func NewLocation(text []byte, file string, row int, col int) *Location { // - Variables, References // - Array, Set, and Object Comprehensions // - Calls -type Value interface { - Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. - Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. - Hash() int // Returns hash code of the value. - IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. - String() string // String returns a human readable string representation of the value. -} +type Value = v1.Value // InterfaceToValue converts a native Go value x to a Value. func InterfaceToValue(x interface{}) (Value, error) { - switch x := x.(type) { - case nil: - return Null{}, nil - case bool: - return Boolean(x), nil - case json.Number: - return Number(x), nil - case int64: - return int64Number(x), nil - case uint64: - return uint64Number(x), nil - case float64: - return floatNumber(x), nil - case int: - return intNumber(x), nil - case string: - return String(x), nil - case []interface{}: - r := make([]*Term, len(x)) - for i, e := range x { - e, err := InterfaceToValue(e) - if err != nil { - return nil, err - } - r[i] = &Term{Value: e} - } - return NewArray(r...), nil - case map[string]interface{}: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - case map[string]string: - r := newobject(len(x)) - for k, v := range x { - k, err := InterfaceToValue(k) - if err != nil { - return nil, err - } - v, err := InterfaceToValue(v) - if err != nil { - return nil, err - } - r.Insert(NewTerm(k), NewTerm(v)) - } - return r, nil - default: - ptr := util.Reference(x) - if err := util.RoundTrip(ptr); err != nil { - return nil, fmt.Errorf("ast: interface conversion: %w", err) - } - return InterfaceToValue(*ptr) - } + return v1.InterfaceToValue(x) } // ValueFromReader returns an AST value from a JSON serialized value in the reader. func ValueFromReader(r io.Reader) (Value, error) { - var x interface{} - if err := util.NewJSONDecoder(r).Decode(&x); err != nil { - return nil, err - } - return InterfaceToValue(x) + return v1.ValueFromReader(r) } // As converts v into a Go native type referred to by x. func As(v Value, x interface{}) error { - return util.NewJSONDecoder(bytes.NewBufferString(v.String())).Decode(x) + return v1.As(v, x) } // Resolver defines the interface for resolving references to native Go values. -type Resolver interface { - Resolve(Ref) (interface{}, error) -} +type Resolver = v1.Resolver // ValueResolver defines the interface for resolving references to AST values. -type ValueResolver interface { - Resolve(Ref) (Value, error) -} +type ValueResolver = v1.ValueResolver // UnknownValueErr indicates a ValueResolver was unable to resolve a reference // because the reference refers to an unknown value. -type UnknownValueErr struct{} - -func (UnknownValueErr) Error() string { - return "unknown value" -} +type UnknownValueErr = v1.UnknownValueErr // IsUnknownValueErr returns true if the err is an UnknownValueErr. func IsUnknownValueErr(err error) bool { - _, ok := err.(UnknownValueErr) - return ok -} - -type illegalResolver struct{} - -func (illegalResolver) Resolve(ref Ref) (interface{}, error) { - return nil, fmt.Errorf("illegal value: %v", ref) + return v1.IsUnknownValueErr(err) } // ValueToInterface returns the Go representation of an AST value. The AST // value should not contain any values that require evaluation (e.g., vars, // comprehensions, etc.) func ValueToInterface(v Value, resolver Resolver) (interface{}, error) { - return valueToInterface(v, resolver, JSONOpt{}) -} - -func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) { - switch v := v.(type) { - case Null: - return nil, nil - case Boolean: - return bool(v), nil - case Number: - return json.Number(v), nil - case String: - return string(v), nil - case *Array: - buf := []interface{}{} - for i := 0; i < v.Len(); i++ { - x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) - if err != nil { - return nil, err - } - buf = append(buf, x1) - } - return buf, nil - case *object: - buf := make(map[string]interface{}, v.Len()) - err := v.Iter(func(k, v *Term) error { - ki, err := valueToInterface(k.Value, resolver, opt) - if err != nil { - return err - } - var str string - var ok bool - if str, ok = ki.(string); !ok { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(ki); err != nil { - return err - } - str = strings.TrimSpace(buf.String()) - } - vi, err := valueToInterface(v.Value, resolver, opt) - if err != nil { - return err - } - buf[str] = vi - return nil - }) - if err != nil { - return nil, err - } - return buf, nil - case *lazyObj: - if opt.CopyMaps { - return valueToInterface(v.force(), resolver, opt) - } - return v.native, nil - case Set: - buf := []interface{}{} - iter := func(x *Term) error { - x1, err := valueToInterface(x.Value, resolver, opt) - if err != nil { - return err - } - buf = append(buf, x1) - return nil - } - var err error - if opt.SortSets { - err = v.Sorted().Iter(iter) - } else { - err = v.Iter(iter) - } - if err != nil { - return nil, err - } - return buf, nil - case Ref: - return resolver.Resolve(v) - default: - return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) - } + return v1.ValueToInterface(v, resolver) } // JSON returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) func JSON(v Value) (interface{}, error) { - return JSONWithOpt(v, JSONOpt{}) + return v1.JSON(v) } // JSONOpt defines parameters for AST to JSON conversion. -type JSONOpt struct { - SortSets bool // sort sets before serializing (this makes conversion more expensive) - CopyMaps bool // enforces copying of map[string]interface{} read from the store -} +type JSONOpt = v1.JSONOpt // JSONWithOpt returns the JSON representation of v. The value must not contain any // refs or terms that require evaluation (e.g., vars, comprehensions, etc.) func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { - return valueToInterface(v, illegalResolver{}, opt) + return v1.JSONWithOpt(v, opt) } // MustJSON returns the JSON representation of v. The value must not contain any @@ -272,2982 +86,221 @@ func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { // the conversion fails, this function will panic. This function is mostly for // test purposes. func MustJSON(v Value) interface{} { - r, err := JSON(v) - if err != nil { - panic(err) - } - return r + return v1.MustJSON(v) } // MustInterfaceToValue converts a native Go value x to a Value. If the // conversion fails, this function will panic. This function is mostly for test // purposes. func MustInterfaceToValue(x interface{}) Value { - v, err := InterfaceToValue(x) - if err != nil { - panic(err) - } - return v + return v1.MustInterfaceToValue(x) } // Term is an argument to a function. -type Term struct { - Value Value `json:"value"` // the value of the Term as represented in Go - Location *Location `json:"location,omitempty"` // the location of the Term in the source - - jsonOptions astJSON.Options -} +type Term = v1.Term // NewTerm returns a new Term object. func NewTerm(v Value) *Term { - return &Term{ - Value: v, - } -} - -// SetLocation updates the term's Location and returns the term itself. -func (term *Term) SetLocation(loc *Location) *Term { - term.Location = loc - return term -} - -// Loc returns the Location of term. -func (term *Term) Loc() *Location { - if term == nil { - return nil - } - return term.Location -} - -// SetLoc sets the location on term. -func (term *Term) SetLoc(loc *Location) { - term.SetLocation(loc) -} - -// Copy returns a deep copy of term. -func (term *Term) Copy() *Term { - - if term == nil { - return nil - } - - cpy := *term - - switch v := term.Value.(type) { - case Null, Boolean, Number, String, Var: - cpy.Value = v - case Ref: - cpy.Value = v.Copy() - case *Array: - cpy.Value = v.Copy() - case Set: - cpy.Value = v.Copy() - case *object: - cpy.Value = v.Copy() - case *ArrayComprehension: - cpy.Value = v.Copy() - case *ObjectComprehension: - cpy.Value = v.Copy() - case *SetComprehension: - cpy.Value = v.Copy() - case Call: - cpy.Value = v.Copy() - } - - return &cpy -} - -// Equal returns true if this term equals the other term. Equality is -// defined for each kind of term. -func (term *Term) Equal(other *Term) bool { - if term == nil && other != nil { - return false - } - if term != nil && other == nil { - return false - } - if term == other { - return true - } - - // TODO(tsandall): This early-exit avoids allocations for types that have - // Equal() functions that just use == underneath. We should revisit the - // other types and implement Equal() functions that do not require - // allocations. - switch v := term.Value.(type) { - case Null: - return v.Equal(other.Value) - case Boolean: - return v.Equal(other.Value) - case Number: - return v.Equal(other.Value) - case String: - return v.Equal(other.Value) - case Var: - return v.Equal(other.Value) - } - - return term.Value.Compare(other.Value) == 0 -} - -// Get returns a value referred to by name from the term. -func (term *Term) Get(name *Term) *Term { - switch v := term.Value.(type) { - case *object: - return v.Get(name) - case *Array: - return v.Get(name) - case interface { - Get(*Term) *Term - }: - return v.Get(name) - case Set: - if v.Contains(name) { - return name - } - } - return nil -} - -// Hash returns the hash code of the Term's Value. Its Location -// is ignored. -func (term *Term) Hash() int { - return term.Value.Hash() -} - -// IsGround returns true if this term's Value is ground. -func (term *Term) IsGround() bool { - return term.Value.IsGround() -} - -func (term *Term) setJSONOptions(opts astJSON.Options) { - term.jsonOptions = opts - if term.Location != nil { - term.Location.JSONOptions = opts - } -} - -// MarshalJSON returns the JSON encoding of the term. -// -// Specialized marshalling logic is required to include a type hint for Value. -func (term *Term) MarshalJSON() ([]byte, error) { - d := map[string]interface{}{ - "type": TypeName(term.Value), - "value": term.Value, - } - if term.jsonOptions.MarshalOptions.IncludeLocation.Term { - if term.Location != nil { - d["location"] = term.Location - } - } - return json.Marshal(d) -} - -func (term *Term) String() string { - return term.Value.String() -} - -// UnmarshalJSON parses the byte array and stores the result in term. -// Specialized unmarshalling is required to handle Value and Location. -func (term *Term) UnmarshalJSON(bs []byte) error { - v := map[string]interface{}{} - if err := util.UnmarshalJSON(bs, &v); err != nil { - return err - } - val, err := unmarshalValue(v) - if err != nil { - return err - } - term.Value = val - - if loc, ok := v["location"].(map[string]interface{}); ok { - term.Location = &Location{} - err := unmarshalLocation(term.Location, loc) - if err != nil { - return err - } - } - return nil -} - -// Vars returns a VarSet with variables contained in this term. -func (term *Term) Vars() VarSet { - vis := &VarVisitor{vars: VarSet{}} - vis.Walk(term) - return vis.vars + return v1.NewTerm(v) } // IsConstant returns true if the AST value is constant. func IsConstant(v Value) bool { - found := false - vis := GenericVisitor{ - func(x interface{}) bool { - switch x.(type) { - case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: - found = true - return true - } - return false - }, - } - vis.Walk(v) - return !found + return v1.IsConstant(v) } // IsComprehension returns true if the supplied value is a comprehension. func IsComprehension(x Value) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - } - return false + return v1.IsComprehension(x) } // ContainsRefs returns true if the Value v contains refs. func ContainsRefs(v interface{}) bool { - found := false - WalkRefs(v, func(Ref) bool { - found = true - return found - }) - return found + return v1.ContainsRefs(v) } // ContainsComprehensions returns true if the Value v contains comprehensions. func ContainsComprehensions(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - found = true - return found - } - return found - }) - return found + return v1.ContainsComprehensions(v) } // ContainsClosures returns true if the Value v contains closures. func ContainsClosures(v interface{}) bool { - found := false - WalkClosures(v, func(x interface{}) bool { - switch x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - found = true - return found - } - return found - }) - return found + return v1.ContainsClosures(v) } // IsScalar returns true if the AST value is a scalar. func IsScalar(v Value) bool { - switch v.(type) { - case String: - return true - case Number: - return true - case Boolean: - return true - case Null: - return true - } - return false + return v1.IsScalar(v) } // Null represents the null value defined by JSON. -type Null struct{} +type Null = v1.Null // NullTerm creates a new Term with a Null value. func NullTerm() *Term { - return &Term{Value: Null{}} -} - -// Equal returns true if the other term Value is also Null. -func (null Null) Equal(other Value) bool { - switch other.(type) { - case Null: - return true - default: - return false - } -} - -// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (null Null) Compare(other Value) int { - return Compare(null, other) -} - -// Find returns the current value or a not found error. -func (null Null) Find(path Ref) (Value, error) { - if len(path) == 0 { - return null, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (null Null) Hash() int { - return 0 -} - -// IsGround always returns true. -func (Null) IsGround() bool { - return true -} - -func (null Null) String() string { - return "null" + return v1.NullTerm() } // Boolean represents a boolean value defined by JSON. -type Boolean bool +type Boolean = v1.Boolean // BooleanTerm creates a new Term with a Boolean value. func BooleanTerm(b bool) *Term { - return &Term{Value: Boolean(b)} -} - -// Equal returns true if the other Value is a Boolean and is equal. -func (bol Boolean) Equal(other Value) bool { - switch other := other.(type) { - case Boolean: - return bol == other - default: - return false - } -} - -// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (bol Boolean) Compare(other Value) int { - return Compare(bol, other) -} - -// Find returns the current value or a not found error. -func (bol Boolean) Find(path Ref) (Value, error) { - if len(path) == 0 { - return bol, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (bol Boolean) Hash() int { - if bol { - return 1 - } - return 0 -} - -// IsGround always returns true. -func (Boolean) IsGround() bool { - return true -} - -func (bol Boolean) String() string { - return strconv.FormatBool(bool(bol)) + return v1.BooleanTerm(b) } // Number represents a numeric value as defined by JSON. -type Number json.Number +type Number = v1.Number // NumberTerm creates a new Term with a Number value. func NumberTerm(n json.Number) *Term { - return &Term{Value: Number(n)} + return v1.NumberTerm(n) } // IntNumberTerm creates a new Term with an integer Number value. func IntNumberTerm(i int) *Term { - return &Term{Value: Number(strconv.Itoa(i))} + return v1.IntNumberTerm(i) } // UIntNumberTerm creates a new Term with an unsigned integer Number value. func UIntNumberTerm(u uint64) *Term { - return &Term{Value: uint64Number(u)} + return v1.UIntNumberTerm(u) } // FloatNumberTerm creates a new Term with a floating point Number value. func FloatNumberTerm(f float64) *Term { - s := strconv.FormatFloat(f, 'g', -1, 64) - return &Term{Value: Number(s)} -} - -// Equal returns true if the other Value is a Number and is equal. -func (num Number) Equal(other Value) bool { - switch other := other.(type) { - case Number: - return Compare(num, other) == 0 - default: - return false - } -} - -// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (num Number) Compare(other Value) int { - return Compare(num, other) -} - -// Find returns the current value or a not found error. -func (num Number) Find(path Ref) (Value, error) { - if len(path) == 0 { - return num, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (num Number) Hash() int { - f, err := json.Number(num).Float64() - if err != nil { - bs := []byte(num) - h := xxhash.Checksum64(bs) - return int(h) - } - return int(f) -} - -// Int returns the int representation of num if possible. -func (num Number) Int() (int, bool) { - i64, ok := num.Int64() - return int(i64), ok -} - -// Int64 returns the int64 representation of num if possible. -func (num Number) Int64() (int64, bool) { - i, err := json.Number(num).Int64() - if err != nil { - return 0, false - } - return i, true -} - -// Float64 returns the float64 representation of num if possible. -func (num Number) Float64() (float64, bool) { - f, err := json.Number(num).Float64() - if err != nil { - return 0, false - } - return f, true -} - -// IsGround always returns true. -func (Number) IsGround() bool { - return true -} - -// MarshalJSON returns JSON encoded bytes representing num. -func (num Number) MarshalJSON() ([]byte, error) { - return json.Marshal(json.Number(num)) -} - -func (num Number) String() string { - return string(num) -} - -func intNumber(i int) Number { - return Number(strconv.Itoa(i)) -} - -func int64Number(i int64) Number { - return Number(strconv.FormatInt(i, 10)) -} - -func uint64Number(u uint64) Number { - return Number(strconv.FormatUint(u, 10)) -} - -func floatNumber(f float64) Number { - return Number(strconv.FormatFloat(f, 'g', -1, 64)) + return v1.FloatNumberTerm(f) } // String represents a string value as defined by JSON. -type String string +type String = v1.String // StringTerm creates a new Term with a String value. func StringTerm(s string) *Term { - return &Term{Value: String(s)} -} - -// Equal returns true if the other Value is a String and is equal. -func (str String) Equal(other Value) bool { - switch other := other.(type) { - case String: - return str == other - default: - return false - } -} - -// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (str String) Compare(other Value) int { - return Compare(str, other) -} - -// Find returns the current value or a not found error. -func (str String) Find(path Ref) (Value, error) { - if len(path) == 0 { - return str, nil - } - return nil, errFindNotFound -} - -// IsGround always returns true. -func (String) IsGround() bool { - return true -} - -func (str String) String() string { - return strconv.Quote(string(str)) -} - -// Hash returns the hash code for the Value. -func (str String) Hash() int { - h := xxhash.ChecksumString64S(string(str), hashSeed0) - return int(h) + return v1.StringTerm(s) } // Var represents a variable as defined by the language. -type Var string +type Var = v1.Var // VarTerm creates a new Term with a Variable value. func VarTerm(v string) *Term { - return &Term{Value: Var(v)} -} - -// Equal returns true if the other Value is a Variable and has the same value -// (name). -func (v Var) Equal(other Value) bool { - switch other := other.(type) { - case Var: - return v == other - default: - return false - } -} - -// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (v Var) Compare(other Value) int { - return Compare(v, other) -} - -// Find returns the current value or a not found error. -func (v Var) Find(path Ref) (Value, error) { - if len(path) == 0 { - return v, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (v Var) Hash() int { - h := xxhash.ChecksumString64S(string(v), hashSeed0) - return int(h) -} - -// IsGround always returns false. -func (Var) IsGround() bool { - return false -} - -// IsWildcard returns true if this is a wildcard variable. -func (v Var) IsWildcard() bool { - return strings.HasPrefix(string(v), WildcardPrefix) -} - -// IsGenerated returns true if this variable was generated during compilation. -func (v Var) IsGenerated() bool { - return strings.HasPrefix(string(v), "__local") -} - -func (v Var) String() string { - // Special case for wildcard so that string representation is parseable. The - // parser mangles wildcard variables to make their names unique and uses an - // illegal variable name character (WildcardPrefix) to avoid conflicts. When - // we serialize the variable here, we need to make sure it's parseable. - if v.IsWildcard() { - return Wildcard.String() - } - return string(v) + return v1.VarTerm(v) } // Ref represents a reference as defined by the language. -type Ref []*Term +type Ref = v1.Ref // EmptyRef returns a new, empty reference. func EmptyRef() Ref { - return Ref([]*Term{}) + return v1.EmptyRef() } // PtrRef returns a new reference against the head for the pointer // s. Path components in the pointer are unescaped. func PtrRef(head *Term, s string) (Ref, error) { - s = strings.Trim(s, "/") - if s == "" { - return Ref{head}, nil - } - parts := strings.Split(s, "/") - if maxLen := math.MaxInt32; len(parts) >= maxLen { - return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) - } - ref := make(Ref, uint(len(parts))+1) - ref[0] = head - for i := 0; i < len(parts); i++ { - var err error - parts[i], err = url.PathUnescape(parts[i]) - if err != nil { - return nil, err - } - ref[i+1] = StringTerm(parts[i]) - } - return ref, nil + return v1.PtrRef(head, s) } // RefTerm creates a new Term with a Ref value. func RefTerm(r ...*Term) *Term { - return &Term{Value: Ref(r)} -} - -// Append returns a copy of ref with the term appended to the end. -func (ref Ref) Append(term *Term) Ref { - n := len(ref) - dst := make(Ref, n+1) - copy(dst, ref) - dst[n] = term - return dst -} - -// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), -// existing elements are shifted to the right. If pos > len(ref)+1 this -// function panics. -func (ref Ref) Insert(x *Term, pos int) Ref { - switch { - case pos == len(ref): - return ref.Append(x) - case pos > len(ref)+1: - panic("illegal index") - } - cpy := make(Ref, len(ref)+1) - copy(cpy, ref[:pos]) - cpy[pos] = x - copy(cpy[pos+1:], ref[pos:]) - return cpy -} - -// Extend returns a copy of ref with the terms from other appended. The head of -// other will be converted to a string. -func (ref Ref) Extend(other Ref) Ref { - dst := make(Ref, len(ref)+len(other)) - copy(dst, ref) - - head := other[0].Copy() - head.Value = String(head.Value.(Var)) - offset := len(ref) - dst[offset] = head - - copy(dst[offset+1:], other[1:]) - return dst -} - -// Concat returns a ref with the terms appended. -func (ref Ref) Concat(terms []*Term) Ref { - if len(terms) == 0 { - return ref - } - cpy := make(Ref, len(ref)+len(terms)) - copy(cpy, ref) - copy(cpy[len(ref):], terms) - return cpy -} - -// Dynamic returns the offset of the first non-constant operand of ref. -func (ref Ref) Dynamic() int { - switch ref[0].Value.(type) { - case Call: - return 0 - } - for i := 1; i < len(ref); i++ { - if !IsConstant(ref[i].Value) { - return i - } - } - return -1 -} - -// Copy returns a deep copy of ref. -func (ref Ref) Copy() Ref { - return termSliceCopy(ref) -} - -// Equal returns true if ref is equal to other. -func (ref Ref) Equal(other Value) bool { - return Compare(ref, other) == 0 -} - -// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ref Ref) Compare(other Value) int { - return Compare(ref, other) -} - -// Find returns the current value or a "not found" error. -func (ref Ref) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ref, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (ref Ref) Hash() int { - return termSliceHash(ref) -} - -// HasPrefix returns true if the other ref is a prefix of this ref. -func (ref Ref) HasPrefix(other Ref) bool { - if len(other) > len(ref) { - return false - } - for i := range other { - if !ref[i].Equal(other[i]) { - return false - } - } - return true -} - -// ConstantPrefix returns the constant portion of the ref starting from the head. -func (ref Ref) ConstantPrefix() Ref { - ref = ref.Copy() - - i := ref.Dynamic() - if i < 0 { - return ref - } - return ref[:i] + return v1.RefTerm(r...) } -func (ref Ref) StringPrefix() Ref { - r := ref.Copy() - - for i := 1; i < len(ref); i++ { - switch r[i].Value.(type) { - case String: // pass - default: // cut off - return r[:i] - } - } - - return r -} - -// GroundPrefix returns the ground portion of the ref starting from the head. By -// definition, the head of the reference is always ground. -func (ref Ref) GroundPrefix() Ref { - prefix := make(Ref, 0, len(ref)) - - for i, x := range ref { - if i > 0 && !x.IsGround() { - break - } - prefix = append(prefix, x) - } - - return prefix -} - -func (ref Ref) DynamicSuffix() Ref { - i := ref.Dynamic() - if i < 0 { - return nil - } - return ref[i:] -} - -// IsGround returns true if all of the parts of the Ref are ground. -func (ref Ref) IsGround() bool { - if len(ref) == 0 { - return true - } - return termSliceIsGround(ref[1:]) -} - -// IsNested returns true if this ref contains other Refs. -func (ref Ref) IsNested() bool { - for _, x := range ref { - if _, ok := x.Value.(Ref); ok { - return true - } - } - return false -} - -// Ptr returns a slash-separated path string for this ref. If the ref -// contains non-string terms this function returns an error. Path -// components are escaped. -func (ref Ref) Ptr() (string, error) { - parts := make([]string, 0, len(ref)-1) - for _, term := range ref[1:] { - if str, ok := term.Value.(String); ok { - parts = append(parts, url.PathEscape(string(str))) - } else { - return "", fmt.Errorf("invalid path value type") - } - } - return strings.Join(parts, "/"), nil -} - -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - func IsVarCompatibleString(s string) bool { - return varRegexp.MatchString(s) -} - -func (ref Ref) String() string { - if len(ref) == 0 { - return "" - } - buf := []string{ref[0].Value.String()} - path := ref[1:] - for _, p := range path { - switch p := p.Value.(type) { - case String: - str := string(p) - if varRegexp.MatchString(str) && len(buf) > 0 && !IsKeyword(str) { - buf = append(buf, "."+str) - } else { - buf = append(buf, "["+p.String()+"]") - } - default: - buf = append(buf, "["+p.String()+"]") - } - } - return strings.Join(buf, "") -} - -// OutputVars returns a VarSet containing variables that would be bound by evaluating -// this expression in isolation. -func (ref Ref) OutputVars() VarSet { - vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) - vis.Walk(ref) - return vis.Vars() -} - -func (ref Ref) toArray() *Array { - a := NewArray() - for _, term := range ref { - if _, ok := term.Value.(String); ok { - a = a.Append(term) - } else { - a = a.Append(StringTerm(term.Value.String())) - } - } - return a + return v1.IsVarCompatibleString(s) } // QueryIterator defines the interface for querying AST documents with references. -type QueryIterator func(map[Var]Value, Value) error +type QueryIterator = v1.QueryIterator // ArrayTerm creates a new Term with an Array value. func ArrayTerm(a ...*Term) *Term { - return NewTerm(NewArray(a...)) + return v1.ArrayTerm(a...) } // NewArray creates an Array with the terms provided. The array will // use the provided term slice. func NewArray(a ...*Term) *Array { - hs := make([]int, len(a)) - for i, e := range a { - hs[i] = e.Value.Hash() - } - arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} - arr.rehash() - return arr + return v1.NewArray(a...) } // Array represents an array as defined by the language. Arrays are similar to the // same types as defined by JSON with the exception that they can contain Vars // and References. -type Array struct { - elems []*Term - hashs []int // element hashes - hash int - ground bool -} - -// Copy returns a deep copy of arr. -func (arr *Array) Copy() *Array { - cpy := make([]int, len(arr.elems)) - copy(cpy, arr.hashs) - return &Array{ - elems: termSliceCopy(arr.elems), - hashs: cpy, - hash: arr.hash, - ground: arr.IsGround()} -} - -// Equal returns true if arr is equal to other. -func (arr *Array) Equal(other Value) bool { - return Compare(arr, other) == 0 -} - -// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (arr *Array) Compare(other Value) int { - return Compare(arr, other) -} - -// Find returns the value at the index or an out-of-range error. -func (arr *Array) Find(path Ref) (Value, error) { - if len(path) == 0 { - return arr, nil - } - num, ok := path[0].Value.(Number) - if !ok { - return nil, errFindNotFound - } - i, ok := num.Int() - if !ok { - return nil, errFindNotFound - } - if i < 0 || i >= arr.Len() { - return nil, errFindNotFound - } - return arr.Elem(i).Value.Find(path[1:]) -} - -// Get returns the element at pos or nil if not possible. -func (arr *Array) Get(pos *Term) *Term { - num, ok := pos.Value.(Number) - if !ok { - return nil - } - - i, ok := num.Int() - if !ok { - return nil - } - - if i >= 0 && i < len(arr.elems) { - return arr.elems[i] - } - - return nil -} - -// Sorted returns a new Array that contains the sorted elements of arr. -func (arr *Array) Sorted() *Array { - cpy := make([]*Term, len(arr.elems)) - for i := range cpy { - cpy[i] = arr.elems[i] - } - sort.Sort(termSlice(cpy)) - a := NewArray(cpy...) - a.hashs = arr.hashs - return a -} - -// Hash returns the hash code for the Value. -func (arr *Array) Hash() int { - return arr.hash -} - -// IsGround returns true if all of the Array elements are ground. -func (arr *Array) IsGround() bool { - return arr.ground -} - -// MarshalJSON returns JSON encoded bytes representing arr. -func (arr *Array) MarshalJSON() ([]byte, error) { - if len(arr.elems) == 0 { - return []byte(`[]`), nil - } - return json.Marshal(arr.elems) -} - -func (arr *Array) String() string { - var b strings.Builder - b.WriteRune('[') - for i, e := range arr.elems { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(e.String()) - } - b.WriteRune(']') - return b.String() -} - -// Len returns the number of elements in the array. -func (arr *Array) Len() int { - return len(arr.elems) -} - -// Elem returns the element i of arr. -func (arr *Array) Elem(i int) *Term { - return arr.elems[i] -} - -// rehash updates the cached hash of arr. -func (arr *Array) rehash() { - arr.hash = 0 - for _, h := range arr.hashs { - arr.hash += h - } -} - -// set sets the element i of arr. -func (arr *Array) set(i int, v *Term) { - arr.ground = arr.ground && v.IsGround() - arr.elems[i] = v - arr.hashs[i] = v.Value.Hash() -} - -// Slice returns a slice of arr starting from i index to j. -1 -// indicates the end of the array. The returned value array is not a -// copy and any modifications to either of arrays may be reflected to -// the other. -func (arr *Array) Slice(i, j int) *Array { - var elems []*Term - var hashs []int - if j == -1 { - elems = arr.elems[i:] - hashs = arr.hashs[i:] - } else { - elems = arr.elems[i:j] - hashs = arr.hashs[i:j] - } - // If arr is ground, the slice is, too. - // If it's not, the slice could still be. - gr := arr.ground || termSliceIsGround(elems) - - s := &Array{elems: elems, hashs: hashs, ground: gr} - s.rehash() - return s -} - -// Iter calls f on each element in arr. If f returns an error, -// iteration stops and the return value is the error. -func (arr *Array) Iter(f func(*Term) error) error { - for i := range arr.elems { - if err := f(arr.elems[i]); err != nil { - return err - } - } - return nil -} - -// Until calls f on each element in arr. If f returns true, iteration stops. -func (arr *Array) Until(f func(*Term) bool) bool { - err := arr.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in arr. -func (arr *Array) Foreach(f func(*Term)) { - _ = arr.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Append appends a term to arr, returning the appended array. -func (arr *Array) Append(v *Term) *Array { - cpy := *arr - cpy.elems = append(arr.elems, v) - cpy.hashs = append(arr.hashs, v.Value.Hash()) - cpy.hash = arr.hash + v.Value.Hash() - cpy.ground = arr.ground && v.IsGround() - return &cpy -} +type Array = v1.Array // Set represents a set as defined by the language. -type Set interface { - Value - Len() int - Copy() Set - Diff(Set) Set - Intersect(Set) Set - Union(Set) Set - Add(*Term) - Iter(func(*Term) error) error - Until(func(*Term) bool) bool - Foreach(func(*Term)) - Contains(*Term) bool - Map(func(*Term) (*Term, error)) (Set, error) - Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) - Sorted() *Array - Slice() []*Term -} +type Set = v1.Set // NewSet returns a new Set containing t. func NewSet(t ...*Term) Set { - s := newset(len(t)) - for i := range t { - s.Add(t[i]) - } - return s + return v1.NewSet(t...) } -func newset(n int) *set { - var keys []*Term - if n > 0 { - keys = make([]*Term, 0, n) - } - return &set{ - elems: make(map[int]*Term, n), - keys: keys, - hash: 0, - ground: true, - sortGuard: new(sync.Once), - } -} - -// SetTerm returns a new Term representing a set containing terms t. func SetTerm(t ...*Term) *Term { - set := NewSet(t...) - return &Term{ - Value: set, - } -} - -type set struct { - elems map[int]*Term - keys []*Term - hash int - ground bool - sortGuard *sync.Once // Prevents race condition around sorting. -} - -// Copy returns a deep copy of s. -func (s *set) Copy() Set { - cpy := newset(s.Len()) - s.Foreach(func(x *Term) { - cpy.Add(x.Copy()) - }) - cpy.hash = s.hash - cpy.ground = s.ground - return cpy -} - -// IsGround returns true if all terms in s are ground. -func (s *set) IsGround() bool { - return s.ground -} - -// Hash returns a hash code for s. -func (s *set) Hash() int { - return s.hash -} - -func (s *set) String() string { - if s.Len() == 0 { - return "set()" - } - var b strings.Builder - b.WriteRune('{') - for i := range s.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(s.keys[i].Value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (s *set) sortedKeys() []*Term { - s.sortGuard.Do(func() { - sort.Sort(termSlice(s.keys)) - }) - return s.keys -} - -// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (s *set) Compare(other Value) int { - o1 := sortOrder(s) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o1 > o2 { - return 1 - } - t := other.(*set) - return termSliceCompare(s.sortedKeys(), t.sortedKeys()) -} - -// Find returns the set or dereferences the element itself. -func (s *set) Find(path Ref) (Value, error) { - if len(path) == 0 { - return s, nil - } - if !s.Contains(path[0]) { - return nil, errFindNotFound - } - return path[0].Value.Find(path[1:]) -} - -// Diff returns elements in s that are not in other. -func (s *set) Diff(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - if !other.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Intersect returns the set containing elements in both s and other. -func (s *set) Intersect(other Set) Set { - o := other.(*set) - n, m := s.Len(), o.Len() - ss := s - so := o - if m < n { - ss = o - so = s - n = m - } - - r := newset(n) - ss.Foreach(func(x *Term) { - if so.Contains(x) { - r.Add(x) - } - }) - return r -} - -// Union returns the set containing all elements of s and other. -func (s *set) Union(other Set) Set { - r := NewSet() - s.Foreach(func(x *Term) { - r.Add(x) - }) - other.Foreach(func(x *Term) { - r.Add(x) - }) - return r -} - -// Add updates s to include t. -func (s *set) Add(t *Term) { - s.insert(t) -} - -// Iter calls f on each element in s. If f returns an error, iteration stops -// and the return value is the error. -func (s *set) Iter(f func(*Term) error) error { - for i := range s.sortedKeys() { - if err := f(s.keys[i]); err != nil { - return err - } - } - return nil -} - -var errStop = errors.New("stop") - -// Until calls f on each element in s. If f returns true, iteration stops. -func (s *set) Until(f func(*Term) bool) bool { - err := s.Iter(func(t *Term) error { - if f(t) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f on each element in s. -func (s *set) Foreach(f func(*Term)) { - _ = s.Iter(func(t *Term) error { - f(t) - return nil - }) // ignore error -} - -// Map returns a new Set obtained by applying f to each value in s. -func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { - set := NewSet() - err := s.Iter(func(x *Term) error { - term, err := f(x) - if err != nil { - return err - } - set.Add(term) - return nil - }) - if err != nil { - return nil, err - } - return set, nil -} - -// Reduce returns a Term produced by applying f to each value in s. The first -// argument to f is the reduced value (starting with i) and the second argument -// to f is the element in s. -func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { - err := s.Iter(func(x *Term) error { - var err error - i, err = f(i, x) - if err != nil { - return err - } - return nil - }) - return i, err -} - -// Contains returns true if t is in s. -func (s *set) Contains(t *Term) bool { - return s.get(t) != nil -} - -// Len returns the number of elements in the set. -func (s *set) Len() int { - return len(s.keys) -} - -// MarshalJSON returns JSON encoded bytes representing s. -func (s *set) MarshalJSON() ([]byte, error) { - if s.keys == nil { - return []byte(`[]`), nil - } - return json.Marshal(s.sortedKeys()) -} - -// Sorted returns an Array that contains the sorted elements of s. -func (s *set) Sorted() *Array { - cpy := make([]*Term, len(s.keys)) - copy(cpy, s.sortedKeys()) - return NewArray(cpy...) -} - -// Slice returns a slice of terms contained in the set. -func (s *set) Slice() []*Term { - return s.sortedKeys() -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (s *set) insert(x *Term) { - hash := x.Hash() - insertHash := hash - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[insertHash]; ok; { - if equal(curr.Value) { - return - } - - insertHash++ - curr, ok = s.elems[insertHash] - } - - s.elems[insertHash] = x - // O(1) insertion, but we'll have to re-sort the keys later. - s.keys = append(s.keys, x) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - s.sortGuard = new(sync.Once) - - s.hash += hash - s.ground = s.ground && x.IsGround() -} - -func (s *set) get(x *Term) *Term { - hash := x.Hash() - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := x.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - return false - - } - - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr, ok := s.elems[hash]; ok; { - if equal(curr.Value) { - return curr - } - - hash++ - curr, ok = s.elems[hash] - } - return nil + return v1.SetTerm(t...) } // Object represents an object as defined by the language. -type Object interface { - Value - Len() int - Get(*Term) *Term - Copy() Object - Insert(*Term, *Term) - Iter(func(*Term, *Term) error) error - Until(func(*Term, *Term) bool) bool - Foreach(func(*Term, *Term)) - Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) - Diff(other Object) Object - Intersect(other Object) [][3]*Term - Merge(other Object) (Object, bool) - MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) - Filter(filter Object) (Object, error) - Keys() []*Term - KeysIterator() ObjectKeysIterator - get(k *Term) *objectElem // To prevent external implementations -} +type Object = v1.Object // NewObject creates a new Object with t. func NewObject(t ...[2]*Term) Object { - obj := newobject(len(t)) - for i := range t { - obj.Insert(t[i][0], t[i][1]) - } - return obj + return v1.NewObject(t...) } // ObjectTerm creates a new Term with an Object value. func ObjectTerm(o ...[2]*Term) *Term { - return &Term{Value: NewObject(o...)} + return v1.ObjectTerm(o...) } func LazyObject(blob map[string]interface{}) Object { - return &lazyObj{native: blob, cache: map[string]Value{}} -} - -type lazyObj struct { - strict Object - cache map[string]Value - native map[string]interface{} -} - -func (l *lazyObj) force() Object { - if l.strict == nil { - l.strict = MustInterfaceToValue(l.native).(Object) - // NOTE(jf): a possible performance improvement here would be to check how many - // entries have been realized to AST in the cache, and if some threshold compared to the - // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. - l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. - } - return l.strict -} - -func (l *lazyObj) Compare(other Value) int { - o1 := sortOrder(l) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - return l.force().Compare(other) -} - -func (l *lazyObj) Copy() Object { - return l -} - -func (l *lazyObj) Diff(other Object) Object { - return l.force().Diff(other) -} - -func (l *lazyObj) Intersect(other Object) [][3]*Term { - return l.force().Intersect(other) -} - -func (l *lazyObj) Iter(f func(*Term, *Term) error) error { - return l.force().Iter(f) -} - -func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { - // NOTE(sr): there could be benefits in not forcing here -- if we abort because - // `f` returns true, we could save us from converting the rest of the object. - return l.force().Until(f) -} - -func (l *lazyObj) Foreach(f func(*Term, *Term)) { - l.force().Foreach(f) -} - -func (l *lazyObj) Filter(filter Object) (Object, error) { - return l.force().Filter(filter) -} - -func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - return l.force().Map(f) -} - -func (l *lazyObj) MarshalJSON() ([]byte, error) { - return l.force().(*object).MarshalJSON() -} - -func (l *lazyObj) Merge(other Object) (Object, bool) { - return l.force().Merge(other) -} - -func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - return l.force().MergeWith(other, conflictResolver) -} - -func (l *lazyObj) Len() int { - return len(l.native) + return v1.LazyObject(blob) } -func (l *lazyObj) String() string { - return l.force().String() -} - -// get is merely there to implement the Object interface -- `get` there serves the -// purpose of prohibiting external implementations. It's never called for lazyObj. -func (*lazyObj) get(*Term) *objectElem { - return nil -} - -func (l *lazyObj) Get(k *Term) *Term { - if l.strict != nil { - return l.strict.Get(k) - } - if s, ok := k.Value.(String); ok { - if v, ok := l.cache[string(s)]; ok { - return NewTerm(v) - } - - if val, ok := l.native[string(s)]; ok { - var converted Value - switch val := val.(type) { - case map[string]interface{}: - converted = LazyObject(val) - default: - converted = MustInterfaceToValue(val) - } - l.cache[string(s)] = converted - return NewTerm(converted) - } - } - return nil -} - -func (l *lazyObj) Insert(k, v *Term) { - l.force().Insert(k, v) -} - -func (*lazyObj) IsGround() bool { - return true -} - -func (l *lazyObj) Hash() int { - return l.force().Hash() -} - -func (l *lazyObj) Keys() []*Term { - if l.strict != nil { - return l.strict.Keys() - } - ret := make([]*Term, 0, len(l.native)) - for k := range l.native { - ret = append(ret, StringTerm(k)) - } - sort.Sort(termSlice(ret)) - return ret -} - -func (l *lazyObj) KeysIterator() ObjectKeysIterator { - return &lazyObjKeysIterator{keys: l.Keys()} -} - -type lazyObjKeysIterator struct { - current int - keys []*Term -} - -func (ki *lazyObjKeysIterator) Next() (*Term, bool) { - if ki.current == len(ki.keys) { - return nil, false - } - ki.current++ - return ki.keys[ki.current-1], true -} - -func (l *lazyObj) Find(path Ref) (Value, error) { - if l.strict != nil { - return l.strict.Find(path) - } - if len(path) == 0 { - return l, nil - } - if p0, ok := path[0].Value.(String); ok { - if v, ok := l.cache[string(p0)]; ok { - return v.Find(path[1:]) - } - - if v, ok := l.native[string(p0)]; ok { - var converted Value - switch v := v.(type) { - case map[string]interface{}: - converted = LazyObject(v) - default: - converted = MustInterfaceToValue(v) - } - l.cache[string(p0)] = converted - return converted.Find(path[1:]) - } - } - return nil, errFindNotFound -} - -type object struct { - elems map[int]*objectElem - keys objectElemSlice - ground int // number of key and value grounds. Counting is - // required to support insert's key-value replace. - hash int - sortGuard *sync.Once // Prevents race condition around sorting. -} - -func newobject(n int) *object { - var keys objectElemSlice - if n > 0 { - keys = make(objectElemSlice, 0, n) - } - return &object{ - elems: make(map[int]*objectElem, n), - keys: keys, - ground: 0, - hash: 0, - sortGuard: new(sync.Once), - } -} - -type objectElem struct { - key *Term - value *Term - next *objectElem -} - -type objectElemSlice []*objectElem - -func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 } -func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s objectElemSlice) Len() int { return len(s) } - // Item is a helper for constructing an tuple containing two Terms // representing a key/value pair in an Object. func Item(key, value *Term) [2]*Term { - return [2]*Term{key, value} -} - -func (obj *object) sortedKeys() objectElemSlice { - obj.sortGuard.Do(func() { - sort.Sort(obj.keys) - }) - return obj.keys -} - -// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (obj *object) Compare(other Value) int { - if x, ok := other.(*lazyObj); ok { - other = x.force() - } - o1 := sortOrder(obj) - o2 := sortOrder(other) - if o1 < o2 { - return -1 - } else if o2 < o1 { - return 1 - } - a := obj - b := other.(*object) - // Ensure that keys are in canonical sorted order before use! - akeys := a.sortedKeys() - bkeys := b.sortedKeys() - minLen := len(akeys) - if len(b.keys) < len(akeys) { - minLen = len(bkeys) - } - for i := 0; i < minLen; i++ { - keysCmp := Compare(akeys[i].key, bkeys[i].key) - if keysCmp < 0 { - return -1 - } - if keysCmp > 0 { - return 1 - } - valA := akeys[i].value - valB := bkeys[i].value - valCmp := Compare(valA, valB) - if valCmp != 0 { - return valCmp - } - } - if len(akeys) < len(bkeys) { - return -1 - } - if len(bkeys) < len(akeys) { - return 1 - } - return 0 -} - -// Find returns the value at the key or undefined. -func (obj *object) Find(path Ref) (Value, error) { - if len(path) == 0 { - return obj, nil - } - value := obj.Get(path[0]) - if value == nil { - return nil, errFindNotFound - } - return value.Value.Find(path[1:]) -} - -func (obj *object) Insert(k, v *Term) { - obj.insert(k, v) -} - -// Get returns the value of k in obj if k exists, otherwise nil. -func (obj *object) Get(k *Term) *Term { - if elem := obj.get(k); elem != nil { - return elem.value - } - return nil -} - -// Hash returns the hash code for the Value. -func (obj *object) Hash() int { - return obj.hash -} - -// IsGround returns true if all of the Object key/value pairs are ground. -func (obj *object) IsGround() bool { - return obj.ground == 2*len(obj.keys) -} - -// Copy returns a deep copy of obj. -func (obj *object) Copy() Object { - cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { - return k.Copy(), v.Copy(), nil - }) - cpy.(*object).hash = obj.hash - return cpy -} - -// Diff returns a new Object that contains only the key/value pairs that exist in obj. -func (obj *object) Diff(other Object) Object { - r := NewObject() - obj.Foreach(func(k, v *Term) { - if other.Get(k) == nil { - r.Insert(k, v) - } - }) - return r -} - -// Intersect returns a slice of term triplets that represent the intersection of keys -// between obj and other. For each intersecting key, the values from obj and other are included -// as the last two terms in the triplet (respectively). -func (obj *object) Intersect(other Object) [][3]*Term { - r := [][3]*Term{} - obj.Foreach(func(k, v *Term) { - if v2 := other.Get(k); v2 != nil { - r = append(r, [3]*Term{k, v, v2}) - } - }) - return r -} - -// Iter calls the function f for each key-value pair in the object. If f -// returns an error, iteration stops and the error is returned. -func (obj *object) Iter(f func(*Term, *Term) error) error { - for _, node := range obj.sortedKeys() { - if err := f(node.key, node.value); err != nil { - return err - } - } - return nil -} - -// Until calls f for each key-value pair in the object. If f returns -// true, iteration stops and Until returns true. Otherwise, return -// false. -func (obj *object) Until(f func(*Term, *Term) bool) bool { - err := obj.Iter(func(k, v *Term) error { - if f(k, v) { - return errStop - } - return nil - }) - return err != nil -} - -// Foreach calls f for each key-value pair in the object. -func (obj *object) Foreach(f func(*Term, *Term)) { - _ = obj.Iter(func(k, v *Term) error { - f(k, v) - return nil - }) // ignore error -} - -// Map returns a new Object constructed by mapping each element in the object -// using the function f. -func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { - cpy := newobject(obj.Len()) - err := obj.Iter(func(k, v *Term) error { - var err error - k, v, err = f(k, v) - if err != nil { - return err - } - cpy.insert(k, v) - return nil - }) - if err != nil { - return nil, err - } - return cpy, nil -} - -// Keys returns the keys of obj. -func (obj *object) Keys() []*Term { - keys := make([]*Term, len(obj.keys)) - - for i, elem := range obj.sortedKeys() { - keys[i] = elem.key - } - - return keys -} - -// Returns an iterator over the obj's keys. -func (obj *object) KeysIterator() ObjectKeysIterator { - return newobjectKeysIterator(obj) -} - -// MarshalJSON returns JSON encoded bytes representing obj. -func (obj *object) MarshalJSON() ([]byte, error) { - sl := make([][2]*Term, obj.Len()) - for i, node := range obj.sortedKeys() { - sl[i] = Item(node.key, node.value) - } - return json.Marshal(sl) -} - -// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are -// overlapping keys between obj and other, the values of associated with the keys are merged. Only -// objects can be merged with other objects. If the values cannot be merged, the second turn value -// will be false. -func (obj object) Merge(other Object) (Object, bool) { - return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { - obj1, ok1 := v1.Value.(Object) - obj2, ok2 := v2.Value.(Object) - if !ok1 || !ok2 { - return nil, true - } - obj3, ok := obj1.Merge(obj2) - if !ok { - return nil, true - } - return NewTerm(obj3), false - }) -} - -// MergeWith returns a new Object containing the merged keys of obj and other. -// If there are overlapping keys between obj and other, the conflictResolver -// is called. The conflictResolver can return a merged value and a boolean -// indicating if the merge has failed and should stop. -func (obj object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { - result := NewObject() - stop := obj.Until(func(k, v *Term) bool { - v2 := other.Get(k) - // The key didn't exist in other, keep the original value - if v2 == nil { - result.Insert(k, v) - return false - } - - // The key exists in both, resolve the conflict if possible - merged, stop := conflictResolver(v, v2) - if !stop { - result.Insert(k, merged) - } - return stop - }) - - if stop { - return nil, false - } - - // Copy in any values from other for keys that don't exist in obj - other.Foreach(func(k, v *Term) { - if v2 := obj.Get(k); v2 == nil { - result.Insert(k, v) - } - }) - return result, true -} - -// Filter returns a new object from values in obj where the keys are -// found in filter. Array indices for values can be specified as -// number strings. -func (obj *object) Filter(filter Object) (Object, error) { - filtered, err := filterObject(obj, filter) - if err != nil { - return nil, err - } - return filtered.(Object), nil -} - -// Len returns the number of elements in the object. -func (obj object) Len() int { - return len(obj.keys) -} - -func (obj object) String() string { - var b strings.Builder - b.WriteRune('{') - - for i, elem := range obj.sortedKeys() { - if i > 0 { - b.WriteString(", ") - } - b.WriteString(elem.key.String()) - b.WriteString(": ") - b.WriteString(elem.value.String()) - } - b.WriteRune('}') - return b.String() -} - -func (obj *object) get(k *Term) *objectElem { - hash := k.Hash() - - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := obj.elems[hash]; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - return curr - } - } - return nil -} - -// NOTE(philipc): We assume a many-readers, single-writer model here. -// This method should NOT be used concurrently, or else we risk data races. -func (obj *object) insert(k, v *Term) { - hash := k.Hash() - head := obj.elems[hash] - // This `equal` utility is duplicated and manually inlined a number of - // time in this file. Inlining it avoids heap allocations, so it makes - // a big performance difference: some operations like lookup become twice - // as slow without it. - var equal func(v Value) bool - - switch x := k.Value.(type) { - case Null, Boolean, String, Var: - equal = func(y Value) bool { return x == y } - case Number: - if xi, err := json.Number(x).Int64(); err == nil { - equal = func(y Value) bool { - if y, ok := y.(Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { - return xi == yi - } - } - - return false - } - break - } - - // We use big.Rat for comparing big numbers. - // It replaces big.Float due to following reason: - // big.Float comes with a default precision of 64, and setting a - // larger precision results in more memory being allocated - // (regardless of the actual number we are parsing with SetString). - // - // Note: If we're so close to zero that big.Float says we are zero, do - // *not* big.Rat).SetString on the original string it'll potentially - // take very long. - var a *big.Rat - fa, ok := new(big.Float).SetString(string(x)) - if !ok { - panic("illegal value") - } - if fa.IsInt() { - if i, _ := fa.Int64(); i == 0 { - a = new(big.Rat).SetInt64(0) - } - } - if a == nil { - a, ok = new(big.Rat).SetString(string(x)) - if !ok { - panic("illegal value") - } - } - - equal = func(b Value) bool { - if bNum, ok := b.(Number); ok { - var b *big.Rat - fb, ok := new(big.Float).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - if fb.IsInt() { - if i, _ := fb.Int64(); i == 0 { - b = new(big.Rat).SetInt64(0) - } - } - if b == nil { - b, ok = new(big.Rat).SetString(string(bNum)) - if !ok { - panic("illegal value") - } - } - - return a.Cmp(b) == 0 - } - - return false - } - default: - equal = func(y Value) bool { return Compare(x, y) == 0 } - } - - for curr := head; curr != nil; curr = curr.next { - if equal(curr.key.Value) { - // The ground bit of the value may change in - // replace, hence adjust the counter per old - // and new value. - - if curr.value.IsGround() { - obj.ground-- - } - if v.IsGround() { - obj.ground++ - } - - curr.value = v - return - } - } - elem := &objectElem{ - key: k, - value: v, - next: head, - } - obj.elems[hash] = elem - // O(1) insertion, but we'll have to re-sort the keys later. - obj.keys = append(obj.keys, elem) - // Reset the sync.Once instance. - // See https://github.com/golang/go/issues/25955 for why we do it this way. - obj.sortGuard = new(sync.Once) - obj.hash += hash + v.Hash() - - if k.IsGround() { - obj.ground++ - } - if v.IsGround() { - obj.ground++ - } -} - -func filterObject(o Value, filter Value) (Value, error) { - if filter.Compare(Null{}) == 0 { - return o, nil - } - - filteredObj, ok := filter.(*object) - if !ok { - return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) - } - - switch v := o.(type) { - case String, Number, Boolean, Null: - return o, nil - case *Array: - values := NewArray() - for i := 0; i < v.Len(); i++ { - subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i))) - if subFilter != nil { - filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) - if err != nil { - return nil, err - } - values = values.Append(NewTerm(filteredValue)) - } - } - return values, nil - case Set: - values := NewSet() - err := v.Iter(func(t *Term) error { - if filteredObj.Get(t) != nil { - filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) - if err != nil { - return err - } - values.Add(NewTerm(filteredValue)) - } - return nil - }) - return values, err - case *object: - values := NewObject() - - iterObj := v - other := filteredObj - if v.Len() < filteredObj.Len() { - iterObj = filteredObj - other = v - } - - err := iterObj.Iter(func(key *Term, _ *Term) error { - if other.Get(key) != nil { - filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) - if err != nil { - return err - } - values.Insert(key, NewTerm(filteredValue)) - } - return nil - }) - return values, err - default: - return nil, fmt.Errorf("invalid object value type %q", v) - } + return v1.Item(key, value) } // NOTE(philipc): The only way to get an ObjectKeyIterator should be // from an Object. This ensures that the iterator can have implementation- // specific details internally, with no contracts except to the very // limited interface. -type ObjectKeysIterator interface { - Next() (*Term, bool) -} - -type objectKeysIterator struct { - obj *object - numKeys int - index int -} - -func newobjectKeysIterator(o *object) ObjectKeysIterator { - return &objectKeysIterator{ - obj: o, - numKeys: o.Len(), - index: 0, - } -} - -func (oki *objectKeysIterator) Next() (*Term, bool) { - if oki.index == oki.numKeys || oki.numKeys == 0 { - return nil, false - } - oki.index++ - return oki.obj.sortedKeys()[oki.index-1].key, true -} +type ObjectKeysIterator = v1.ObjectKeysIterator // ArrayComprehension represents an array comprehension as defined in the language. -type ArrayComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type ArrayComprehension = v1.ArrayComprehension // ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. func ArrayComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &ArrayComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of ac. -func (ac *ArrayComprehension) Copy() *ArrayComprehension { - cpy := *ac - cpy.Body = ac.Body.Copy() - cpy.Term = ac.Term.Copy() - return &cpy -} - -// Equal returns true if ac is equal to other. -func (ac *ArrayComprehension) Equal(other Value) bool { - return Compare(ac, other) == 0 -} - -// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (ac *ArrayComprehension) Compare(other Value) int { - return Compare(ac, other) -} - -// Find returns the current value or a not found error. -func (ac *ArrayComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return ac, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (ac *ArrayComprehension) Hash() int { - return ac.Term.Hash() + ac.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (ac *ArrayComprehension) IsGround() bool { - return ac.Term.IsGround() && ac.Body.IsGround() -} - -func (ac *ArrayComprehension) String() string { - return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" + return v1.ArrayComprehensionTerm(term, body) } // ObjectComprehension represents an object comprehension as defined in the language. -type ObjectComprehension struct { - Key *Term `json:"key"` - Value *Term `json:"value"` - Body Body `json:"body"` -} +type ObjectComprehension = v1.ObjectComprehension // ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. func ObjectComprehensionTerm(key, value *Term, body Body) *Term { - return &Term{ - Value: &ObjectComprehension{ - Key: key, - Value: value, - Body: body, - }, - } -} - -// Copy returns a deep copy of oc. -func (oc *ObjectComprehension) Copy() *ObjectComprehension { - cpy := *oc - cpy.Body = oc.Body.Copy() - cpy.Key = oc.Key.Copy() - cpy.Value = oc.Value.Copy() - return &cpy -} - -// Equal returns true if oc is equal to other. -func (oc *ObjectComprehension) Equal(other Value) bool { - return Compare(oc, other) == 0 -} - -// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (oc *ObjectComprehension) Compare(other Value) int { - return Compare(oc, other) -} - -// Find returns the current value or a not found error. -func (oc *ObjectComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return oc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (oc *ObjectComprehension) Hash() int { - return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() -} - -// IsGround returns true if the Key, Value and Body are ground. -func (oc *ObjectComprehension) IsGround() bool { - return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() -} - -func (oc *ObjectComprehension) String() string { - return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" + return v1.ObjectComprehensionTerm(key, value, body) } // SetComprehension represents a set comprehension as defined in the language. -type SetComprehension struct { - Term *Term `json:"term"` - Body Body `json:"body"` -} +type SetComprehension = v1.SetComprehension // SetComprehensionTerm creates a new Term with an SetComprehension value. func SetComprehensionTerm(term *Term, body Body) *Term { - return &Term{ - Value: &SetComprehension{ - Term: term, - Body: body, - }, - } -} - -// Copy returns a deep copy of sc. -func (sc *SetComprehension) Copy() *SetComprehension { - cpy := *sc - cpy.Body = sc.Body.Copy() - cpy.Term = sc.Term.Copy() - return &cpy -} - -// Equal returns true if sc is equal to other. -func (sc *SetComprehension) Equal(other Value) bool { - return Compare(sc, other) == 0 -} - -// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (sc *SetComprehension) Compare(other Value) int { - return Compare(sc, other) -} - -// Find returns the current value or a not found error. -func (sc *SetComprehension) Find(path Ref) (Value, error) { - if len(path) == 0 { - return sc, nil - } - return nil, errFindNotFound -} - -// Hash returns the hash code of the Value. -func (sc *SetComprehension) Hash() int { - return sc.Term.Hash() + sc.Body.Hash() -} - -// IsGround returns true if the Term and Body are ground. -func (sc *SetComprehension) IsGround() bool { - return sc.Term.IsGround() && sc.Body.IsGround() -} - -func (sc *SetComprehension) String() string { - return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" + return v1.SetComprehensionTerm(term, body) } // Call represents as function call in the language. -type Call []*Term +type Call = v1.Call // CallTerm returns a new Term with a Call value defined by terms. The first // term is the operator and the rest are operands. func CallTerm(terms ...*Term) *Term { - return NewTerm(Call(terms)) -} - -// Copy returns a deep copy of c. -func (c Call) Copy() Call { - return termSliceCopy(c) -} - -// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, -// or greater than other. -func (c Call) Compare(other Value) int { - return Compare(c, other) -} - -// Find returns the current value or a not found error. -func (c Call) Find(Ref) (Value, error) { - return nil, errFindNotFound -} - -// Hash returns the hash code for the Value. -func (c Call) Hash() int { - return termSliceHash(c) -} - -// IsGround returns true if the Value is ground. -func (c Call) IsGround() bool { - return termSliceIsGround(c) -} - -// MakeExpr returns an ew Expr from this call. -func (c Call) MakeExpr(output *Term) *Expr { - terms := []*Term(c) - return NewExpr(append(terms, output)) -} - -func (c Call) String() string { - args := make([]string, len(c)-1) - for i := 1; i < len(c); i++ { - args[i-1] = c[i].String() - } - return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) -} - -func termSliceCopy(a []*Term) []*Term { - cpy := make([]*Term, len(a)) - for i := range a { - cpy[i] = a[i].Copy() - } - return cpy -} - -func termSliceEqual(a, b []*Term) bool { - if len(a) == len(b) { - for i := range a { - if !a[i].Equal(b[i]) { - return false - } - } - return true - } - return false -} - -func termSliceHash(a []*Term) int { - var hash int - for _, v := range a { - hash += v.Value.Hash() - } - return hash -} - -func termSliceIsGround(a []*Term) bool { - for _, v := range a { - if !v.IsGround() { - return false - } - } - return true -} - -// NOTE(tsandall): The unmarshalling errors in these functions are not -// helpful for callers because they do not identify the source of the -// unmarshalling error. Because OPA doesn't accept JSON describing ASTs -// from callers, this is acceptable (for now). If that changes in the future, -// the error messages should be revisited. The current approach focuses -// on the happy path and treats all errors the same. If better error -// reporting is needed, the error paths will need to be fleshed out. - -func unmarshalBody(b []interface{}) (Body, error) { - buf := Body{} - for _, e := range b { - if m, ok := e.(map[string]interface{}); ok { - expr := &Expr{} - if err := unmarshalExpr(expr, m); err == nil { - buf = append(buf, expr) - continue - } - } - goto unmarshal_error - } - return buf, nil -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal body") -} - -func unmarshalExpr(expr *Expr, v map[string]interface{}) error { - if x, ok := v["negated"]; ok { - if b, ok := x.(bool); ok { - expr.Negated = b - } else { - return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) - } - } - if generatedRaw, ok := v["generated"]; ok { - if b, ok := generatedRaw.(bool); ok { - expr.Generated = b - } else { - return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) - } - } - - if err := unmarshalExprIndex(expr, v); err != nil { - return err - } - switch ts := v["terms"].(type) { - case map[string]interface{}: - t, err := unmarshalTerm(ts) - if err != nil { - return err - } - expr.Terms = t - case []interface{}: - terms, err := unmarshalTermSlice(ts) - if err != nil { - return err - } - expr.Terms = terms - default: - return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) - } - if x, ok := v["with"]; ok { - if sl, ok := x.([]interface{}); ok { - ws := make([]*With, len(sl)) - for i := range sl { - var err error - ws[i], err = unmarshalWith(sl[i]) - if err != nil { - return err - } - } - expr.With = ws - } - } - if loc, ok := v["location"].(map[string]interface{}); ok { - expr.Location = &Location{} - if err := unmarshalLocation(expr.Location, loc); err != nil { - return err - } - } - return nil -} - -func unmarshalLocation(loc *Location, v map[string]interface{}) error { - if x, ok := v["file"]; ok { - if s, ok := x.(string); ok { - loc.File = s - } else { - return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) - } - } - if x, ok := v["row"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Row = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) - } - } - if x, ok := v["col"]; ok { - if n, ok := x.(json.Number); ok { - i64, err := n.Int64() - if err != nil { - return err - } - loc.Col = int(i64) - } else { - return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) - } - } - - return nil -} - -func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error { - if x, ok := v["index"]; ok { - if n, ok := x.(json.Number); ok { - i, err := n.Int64() - if err == nil { - expr.Index = int(i) - return nil - } - } - } - return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) -} - -func unmarshalTerm(m map[string]interface{}) (*Term, error) { - var term Term - - v, err := unmarshalValue(m) - if err != nil { - return nil, err - } - term.Value = v - - if loc, ok := m["location"].(map[string]interface{}); ok { - term.Location = &Location{} - if err := unmarshalLocation(term.Location, loc); err != nil { - return nil, err - } - } - - return &term, nil -} - -func unmarshalTermSlice(s []interface{}) ([]*Term, error) { - buf := []*Term{} - for _, x := range s { - if m, ok := x.(map[string]interface{}); ok { - t, err := unmarshalTerm(m) - if err == nil { - buf = append(buf, t) - continue - } - return nil, err - } - return nil, fmt.Errorf("ast: unable to unmarshal term") - } - return buf, nil -} - -func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) { - if s, ok := d["value"].([]interface{}); ok { - return unmarshalTermSlice(s) - } - return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) -} - -func unmarshalWith(i interface{}) (*With, error) { - if m, ok := i.(map[string]interface{}); ok { - tgt, _ := m["target"].(map[string]interface{}) - target, err := unmarshalTerm(tgt) - if err == nil { - val, _ := m["value"].(map[string]interface{}) - value, err := unmarshalTerm(val) - if err == nil { - return &With{ - Target: target, - Value: value, - }, nil - } - return nil, err - } - return nil, err - } - return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) -} - -func unmarshalValue(d map[string]interface{}) (Value, error) { - v := d["value"] - switch d["type"] { - case "null": - return Null{}, nil - case "boolean": - if b, ok := v.(bool); ok { - return Boolean(b), nil - } - case "number": - if n, ok := v.(json.Number); ok { - return Number(n), nil - } - case "string": - if s, ok := v.(string); ok { - return String(s), nil - } - case "var": - if s, ok := v.(string); ok { - return Var(s), nil - } - case "ref": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Ref(s), nil - } - case "array": - if s, err := unmarshalTermSliceValue(d); err == nil { - return NewArray(s...), nil - } - case "set": - if s, err := unmarshalTermSliceValue(d); err == nil { - set := NewSet() - for _, x := range s { - set.Add(x) - } - return set, nil - } - case "object": - if s, ok := v.([]interface{}); ok { - buf := NewObject() - for _, x := range s { - if i, ok := x.([]interface{}); ok && len(i) == 2 { - p, err := unmarshalTermSlice(i) - if err == nil { - buf.Insert(p[0], p[1]) - continue - } - } - goto unmarshal_error - } - return buf, nil - } - case "arraycomprehension", "setcomprehension": - if m, ok := v.(map[string]interface{}); ok { - t, ok := m["term"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - term, err := unmarshalTerm(t) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - if d["type"] == "arraycomprehension" { - return &ArrayComprehension{Term: term, Body: body}, nil - } - return &SetComprehension{Term: term, Body: body}, nil - } - case "objectcomprehension": - if m, ok := v.(map[string]interface{}); ok { - k, ok := m["key"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - key, err := unmarshalTerm(k) - if err != nil { - goto unmarshal_error - } - - v, ok := m["value"].(map[string]interface{}) - if !ok { - goto unmarshal_error - } - - value, err := unmarshalTerm(v) - if err != nil { - goto unmarshal_error - } - - b, ok := m["body"].([]interface{}) - if !ok { - goto unmarshal_error - } - - body, err := unmarshalBody(b) - if err != nil { - goto unmarshal_error - } - - return &ObjectComprehension{Key: key, Value: value, Body: body}, nil - } - case "call": - if s, err := unmarshalTermSliceValue(d); err == nil { - return Call(s), nil - } - } -unmarshal_error: - return nil, fmt.Errorf("ast: unable to unmarshal term") + return v1.CallTerm(terms...) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/transform.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/transform.go index 391a16486..cfb137813 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/transform.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/transform.go @@ -5,427 +5,42 @@ package ast import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // Transformer defines the interface for transforming AST elements. If the // transformer returns nil and does not indicate an error, the AST element will // be set to nil and no transformations will be applied to children of the // element. -type Transformer interface { - Transform(interface{}) (interface{}, error) -} +type Transformer = v1.Transformer // Transform iterates the AST and calls the Transform function on the // Transformer t for x before recursing. func Transform(t Transformer, x interface{}) (interface{}, error) { - - if term, ok := x.(*Term); ok { - return Transform(t, term.Value) - } - - y, err := t.Transform(x) - if err != nil { - return x, err - } - - if y == nil { - return nil, nil - } - - var ok bool - switch y := y.(type) { - case *Module: - p, err := Transform(t, y.Package) - if err != nil { - return nil, err - } - if y.Package, ok = p.(*Package); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) - } - for i := range y.Imports { - imp, err := Transform(t, y.Imports[i]) - if err != nil { - return nil, err - } - if y.Imports[i], ok = imp.(*Import); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) - } - } - for i := range y.Rules { - rule, err := Transform(t, y.Rules[i]) - if err != nil { - return nil, err - } - if y.Rules[i], ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) - } - } - for i := range y.Annotations { - a, err := Transform(t, y.Annotations[i]) - if err != nil { - return nil, err - } - if y.Annotations[i], ok = a.(*Annotations); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) - } - } - for i := range y.Comments { - comment, err := Transform(t, y.Comments[i]) - if err != nil { - return nil, err - } - if y.Comments[i], ok = comment.(*Comment); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) - } - } - return y, nil - case *Package: - ref, err := Transform(t, y.Path) - if err != nil { - return nil, err - } - if y.Path, ok = ref.(Ref); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) - } - return y, nil - case *Import: - y.Path, err = transformTerm(t, y.Path) - if err != nil { - return nil, err - } - if y.Alias, err = transformVar(t, y.Alias); err != nil { - return nil, err - } - return y, nil - case *Rule: - if y.Head, err = transformHead(t, y.Head); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - if y.Else != nil { - rule, err := Transform(t, y.Else) - if err != nil { - return nil, err - } - if y.Else, ok = rule.(*Rule); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) - } - } - return y, nil - case *Head: - if y.Reference, err = transformRef(t, y.Reference); err != nil { - return nil, err - } - if y.Name, err = transformVar(t, y.Name); err != nil { - return nil, err - } - if y.Args, err = transformArgs(t, y.Args); err != nil { - return nil, err - } - if y.Key != nil { - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - } - if y.Value != nil { - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - } - return y, nil - case Args: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - case Body: - for i, e := range y { - e, err := Transform(t, e) - if err != nil { - return nil, err - } - if y[i], ok = e.(*Expr); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) - } - } - return y, nil - case *Expr: - switch ts := y.Terms.(type) { - case *SomeDecl: - decl, err := Transform(t, ts) - if err != nil { - return nil, err - } - if y.Terms, ok = decl.(*SomeDecl); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) - } - return y, nil - case []*Term: - for i := range ts { - if ts[i], err = transformTerm(t, ts[i]); err != nil { - return nil, err - } - } - case *Term: - if y.Terms, err = transformTerm(t, ts); err != nil { - return nil, err - } - case *Every: - if ts.Key != nil { - ts.Key, err = transformTerm(t, ts.Key) - if err != nil { - return nil, err - } - } - ts.Value, err = transformTerm(t, ts.Value) - if err != nil { - return nil, err - } - ts.Domain, err = transformTerm(t, ts.Domain) - if err != nil { - return nil, err - } - ts.Body, err = transformBody(t, ts.Body) - if err != nil { - return nil, err - } - y.Terms = ts - } - for i, w := range y.With { - w, err := Transform(t, w) - if err != nil { - return nil, err - } - if y.With[i], ok = w.(*With); !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) - } - } - return y, nil - case *With: - if y.Target, err = transformTerm(t, y.Target); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - return y, nil - case Ref: - for i, term := range y { - if y[i], err = transformTerm(t, term); err != nil { - return nil, err - } - } - return y, nil - case *object: - return y.Map(func(k, v *Term) (*Term, *Term, error) { - k, err := transformTerm(t, k) - if err != nil { - return nil, nil, err - } - v, err = transformTerm(t, v) - if err != nil { - return nil, nil, err - } - return k, v, nil - }) - case *Array: - for i := 0; i < y.Len(); i++ { - v, err := transformTerm(t, y.Elem(i)) - if err != nil { - return nil, err - } - y.set(i, v) - } - return y, nil - case Set: - y, err = y.Map(func(term *Term) (*Term, error) { - return transformTerm(t, term) - }) - if err != nil { - return nil, err - } - return y, nil - case *ArrayComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *ObjectComprehension: - if y.Key, err = transformTerm(t, y.Key); err != nil { - return nil, err - } - if y.Value, err = transformTerm(t, y.Value); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case *SetComprehension: - if y.Term, err = transformTerm(t, y.Term); err != nil { - return nil, err - } - if y.Body, err = transformBody(t, y.Body); err != nil { - return nil, err - } - return y, nil - case Call: - for i := range y { - if y[i], err = transformTerm(t, y[i]); err != nil { - return nil, err - } - } - return y, nil - default: - return y, nil - } + return v1.Transform(t, x) } // TransformRefs calls the function f on all references under x. func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if r, ok := x.(Ref); ok { - return f(r) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformRefs(x, f) } // TransformVars calls the function f on all vars under x. func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - if v, ok := x.(Var); ok { - return f(v) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformVars(x, f) } // TransformComprehensions calls the functio nf on all comprehensions under x. func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) { - t := &GenericTransformer{func(x interface{}) (interface{}, error) { - switch x := x.(type) { - case *ArrayComprehension: - return f(x) - case *SetComprehension: - return f(x) - case *ObjectComprehension: - return f(x) - } - return x, nil - }} - return Transform(t, x) + return v1.TransformComprehensions(x, f) } // GenericTransformer implements the Transformer interface to provide a utility // to transform AST nodes using a closure. -type GenericTransformer struct { - f func(interface{}) (interface{}, error) -} +type GenericTransformer = v1.GenericTransformer // NewGenericTransformer returns a new GenericTransformer that will transform // AST nodes using the function f. func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer { - return &GenericTransformer{ - f: f, - } -} - -// Transform calls the function f on the GenericTransformer. -func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) { - return t.f(x) -} - -func transformHead(t Transformer, head *Head) (*Head, error) { - y, err := Transform(t, head) - if err != nil { - return nil, err - } - h, ok := y.(*Head) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", head, y) - } - return h, nil -} - -func transformArgs(t Transformer, args Args) (Args, error) { - y, err := Transform(t, args) - if err != nil { - return nil, err - } - a, ok := y.(Args) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", args, y) - } - return a, nil -} - -func transformBody(t Transformer, body Body) (Body, error) { - y, err := Transform(t, body) - if err != nil { - return nil, err - } - r, ok := y.(Body) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", body, y) - } - return r, nil -} - -func transformTerm(t Transformer, term *Term) (*Term, error) { - v, err := transformValue(t, term.Value) - if err != nil { - return nil, err - } - r := &Term{ - Value: v, - Location: term.Location, - } - return r, nil -} - -func transformValue(t Transformer, v Value) (Value, error) { - v1, err := Transform(t, v) - if err != nil { - return nil, err - } - r, ok := v1.(Value) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformVar(t Transformer, v Var) (Var, error) { - v1, err := Transform(t, v) - if err != nil { - return "", err - } - r, ok := v1.(Var) - if !ok { - return "", fmt.Errorf("illegal transform: %T != %T", v, v1) - } - return r, nil -} - -func transformRef(t Transformer, r Ref) (Ref, error) { - r1, err := Transform(t, r) - if err != nil { - return nil, err - } - r2, ok := r1.(Ref) - if !ok { - return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) - } - return r2, nil + return v1.NewGenericTransformer(f) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/unify.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/unify.go index 60244974a..3cb260272 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/unify.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/unify.go @@ -4,232 +4,11 @@ package ast -func isRefSafe(ref Ref, safe VarSet) bool { - switch head := ref[0].Value.(type) { - case Var: - return safe.Contains(head) - case Call: - return isCallSafe(head, safe) - default: - for v := range ref[0].Vars() { - if !safe.Contains(v) { - return false - } - } - return true - } -} - -func isCallSafe(call Call, safe VarSet) bool { - vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) - vis.Walk(call) - unsafe := vis.Vars().Diff(safe) - return len(unsafe) == 0 -} +import v1 "github.com/open-policy-agent/opa/v1/ast" // Unify returns a set of variables that will be unified when the equality expression defined by // terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already // unified. func Unify(safe VarSet, a *Term, b *Term) VarSet { - u := &unifier{ - safe: safe, - unified: VarSet{}, - unknown: map[Var]VarSet{}, - } - u.unify(a, b) - return u.unified -} - -type unifier struct { - safe VarSet - unified VarSet - unknown map[Var]VarSet -} - -func (u *unifier) isSafe(x Var) bool { - return u.safe.Contains(x) || u.unified.Contains(x) -} - -func (u *unifier) unify(a *Term, b *Term) { - - switch a := a.Value.(type) { - - case Var: - switch b := b.Value.(type) { - case Var: - if u.isSafe(b) { - u.markSafe(a) - } else if u.isSafe(a) { - u.markSafe(b) - } else { - u.markUnknown(a, b) - u.markUnknown(b, a) - } - case *Array, Object: - u.unifyAll(a, b) - case Ref: - if isRefSafe(b, u.safe) { - u.markSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markSafe(a) - } - default: - u.markSafe(a) - } - - case Ref: - if isRefSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case Call: - if isCallSafe(a, u.safe) { - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array, Object: - u.markAllSafe(b) - } - } - - case *ArrayComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *Array: - u.markAllSafe(b) - } - case *ObjectComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - case *object: - u.markAllSafe(b) - } - case *SetComprehension: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - - case *Array: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - u.markAllSafe(a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *Array: - if a.Len() == b.Len() { - for i := 0; i < a.Len(); i++ { - u.unify(a.Elem(i), b.Elem(i)) - } - } - } - - case *object: - switch b := b.Value.(type) { - case Var: - u.unifyAll(b, a) - case Ref: - if isRefSafe(b, u.safe) { - u.markAllSafe(a) - } - case Call: - if isCallSafe(b, u.safe) { - u.markAllSafe(a) - } - case *object: - if a.Len() == b.Len() { - _ = a.Iter(func(k, v *Term) error { - if v2 := b.Get(k); v2 != nil { - u.unify(v, v2) - } - return nil - }) // impossible to return error - } - } - - default: - switch b := b.Value.(type) { - case Var: - u.markSafe(b) - } - } -} - -func (u *unifier) markAllSafe(x Value) { - vis := u.varVisitor() - vis.Walk(x) - for v := range vis.Vars() { - u.markSafe(v) - } -} - -func (u *unifier) markSafe(x Var) { - u.unified.Add(x) - - // Add dependencies of 'x' to safe set - vs := u.unknown[x] - delete(u.unknown, x) - for v := range vs { - u.markSafe(v) - } - - // Add dependants of 'x' to safe set if they have no more - // dependencies. - for v, deps := range u.unknown { - if deps.Contains(x) { - delete(deps, x) - if len(deps) == 0 { - u.markSafe(v) - } - } - } -} - -func (u *unifier) markUnknown(a, b Var) { - if _, ok := u.unknown[a]; !ok { - u.unknown[a] = NewVarSet() - } - u.unknown[a].Add(b) -} - -func (u *unifier) unifyAll(a Var, b Value) { - if u.isSafe(a) { - u.markAllSafe(b) - } else { - vis := u.varVisitor() - vis.Walk(b) - unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) - if len(unsafe) == 0 { - u.markSafe(a) - } else { - for v := range unsafe { - u.markUnknown(a, v) - } - } - } -} - -func (u *unifier) varVisitor() *VarVisitor { - return NewVarVisitor().WithParams(VarVisitorParams{ - SkipRefHead: true, - SkipObjectKeys: true, - SkipClosures: true, - }) + return v1.Unify(safe, a, b) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/varset.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/varset.go index 14f531494..9e7db8efd 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/varset.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/varset.go @@ -5,96 +5,13 @@ package ast import ( - "fmt" - "sort" + v1 "github.com/open-policy-agent/opa/v1/ast" ) // VarSet represents a set of variables. -type VarSet map[Var]struct{} +type VarSet = v1.VarSet // NewVarSet returns a new VarSet containing the specified variables. func NewVarSet(vs ...Var) VarSet { - s := VarSet{} - for _, v := range vs { - s.Add(v) - } - return s -} - -// Add updates the set to include the variable "v". -func (s VarSet) Add(v Var) { - s[v] = struct{}{} -} - -// Contains returns true if the set contains the variable "v". -func (s VarSet) Contains(v Var) bool { - _, ok := s[v] - return ok -} - -// Copy returns a shallow copy of the VarSet. -func (s VarSet) Copy() VarSet { - cpy := VarSet{} - for v := range s { - cpy.Add(v) - } - return cpy -} - -// Diff returns a VarSet containing variables in s that are not in vs. -func (s VarSet) Diff(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if !vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Equal returns true if s contains exactly the same elements as vs. -func (s VarSet) Equal(vs VarSet) bool { - if len(s.Diff(vs)) > 0 { - return false - } - return len(vs.Diff(s)) == 0 -} - -// Intersect returns a VarSet containing variables in s that are in vs. -func (s VarSet) Intersect(vs VarSet) VarSet { - r := VarSet{} - for v := range s { - if vs.Contains(v) { - r.Add(v) - } - } - return r -} - -// Sorted returns a sorted slice of vars from s. -func (s VarSet) Sorted() []Var { - sorted := make([]Var, 0, len(s)) - for v := range s { - sorted = append(sorted, v) - } - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Compare(sorted[j]) < 0 - }) - return sorted -} - -// Update merges the other VarSet into this VarSet. -func (s VarSet) Update(vs VarSet) { - for v := range vs { - s.Add(v) - } -} - -func (s VarSet) String() string { - tmp := make([]string, 0, len(s)) - for v := range s { - tmp = append(tmp, string(v)) - } - sort.Strings(tmp) - return fmt.Sprintf("%v", tmp) + return v1.NewVarSet(vs...) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/visit.go b/constraint/vendor/github.com/open-policy-agent/opa/ast/visit.go index d83c31149..94823c6cc 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/visit.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/ast/visit.go @@ -4,780 +4,120 @@ package ast +import v1 "github.com/open-policy-agent/opa/v1/ast" + // Visitor defines the interface for iterating AST elements. The Visit function // can return a Visitor w which will be used to visit the children of the AST // element v. If the Visit function returns nil, the children will not be // visited. // Deprecated: use GenericVisitor or another visitor implementation -type Visitor interface { - Visit(v interface{}) (w Visitor) -} +type Visitor = v1.Visitor // BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before // and after the AST has been visited. // Deprecated: use GenericVisitor or another visitor implementation -type BeforeAndAfterVisitor interface { - Visitor - Before(x interface{}) - After(x interface{}) -} +type BeforeAndAfterVisitor = v1.BeforeAndAfterVisitor // Walk iterates the AST by calling the Visit function on the Visitor // v for x before recursing. // Deprecated: use GenericVisitor.Walk func Walk(v Visitor, x interface{}) { - if bav, ok := v.(BeforeAndAfterVisitor); !ok { - walk(v, x) - } else { - bav.Before(x) - defer bav.After(x) - walk(bav, x) - } + v1.Walk(v, x) } // WalkBeforeAndAfter iterates the AST by calling the Visit function on the // Visitor v for x before recursing. // Deprecated: use GenericVisitor.Walk func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) { - Walk(v, x) -} - -func walk(v Visitor, x interface{}) { - w := v.Visit(x) - if w == nil { - return - } - switch x := x.(type) { - case *Module: - Walk(w, x.Package) - for i := range x.Imports { - Walk(w, x.Imports[i]) - } - for i := range x.Rules { - Walk(w, x.Rules[i]) - } - for i := range x.Annotations { - Walk(w, x.Annotations[i]) - } - for i := range x.Comments { - Walk(w, x.Comments[i]) - } - case *Package: - Walk(w, x.Path) - case *Import: - Walk(w, x.Path) - Walk(w, x.Alias) - case *Rule: - Walk(w, x.Head) - Walk(w, x.Body) - if x.Else != nil { - Walk(w, x.Else) - } - case *Head: - Walk(w, x.Name) - Walk(w, x.Args) - if x.Key != nil { - Walk(w, x.Key) - } - if x.Value != nil { - Walk(w, x.Value) - } - case Body: - for i := range x { - Walk(w, x[i]) - } - case Args: - for i := range x { - Walk(w, x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - Walk(w, ts) - case []*Term: - for i := range ts { - Walk(w, ts[i]) - } - } - for i := range x.With { - Walk(w, x.With[i]) - } - case *With: - Walk(w, x.Target) - Walk(w, x.Value) - case *Term: - Walk(w, x.Value) - case Ref: - for i := range x { - Walk(w, x[i]) - } - case *object: - x.Foreach(func(k, vv *Term) { - Walk(w, k) - Walk(w, vv) - }) - case *Array: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case Set: - x.Foreach(func(t *Term) { - Walk(w, t) - }) - case *ArrayComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case *ObjectComprehension: - Walk(w, x.Key) - Walk(w, x.Value) - Walk(w, x.Body) - case *SetComprehension: - Walk(w, x.Term) - Walk(w, x.Body) - case Call: - for i := range x { - Walk(w, x[i]) - } - case *Every: - if x.Key != nil { - Walk(w, x.Key) - } - Walk(w, x.Value) - Walk(w, x.Domain) - Walk(w, x.Body) - case *SomeDecl: - for i := range x.Symbols { - Walk(w, x.Symbols[i]) - } - } + v1.WalkBeforeAndAfter(v, x) } // WalkVars calls the function f on all vars under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkVars(x interface{}, f func(Var) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if v, ok := x.(Var); ok { - return f(v) - } - return false - }} - vis.Walk(x) + v1.WalkVars(x, f) } // WalkClosures calls the function f on all closures under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkClosures(x interface{}, f func(interface{}) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - switch x := x.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: - return f(x) - } - return false - }} - vis.Walk(x) + v1.WalkClosures(x, f) } // WalkRefs calls the function f on all references under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRefs(x interface{}, f func(Ref) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(Ref); ok { - return f(r) - } - return false - }} - vis.Walk(x) + v1.WalkRefs(x, f) } // WalkTerms calls the function f on all terms under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkTerms(x interface{}, f func(*Term) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if term, ok := x.(*Term); ok { - return f(term) - } - return false - }} - vis.Walk(x) + v1.WalkTerms(x, f) } // WalkWiths calls the function f on all with modifiers under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkWiths(x interface{}, f func(*With) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if w, ok := x.(*With); ok { - return f(w) - } - return false - }} - vis.Walk(x) + v1.WalkWiths(x, f) } // WalkExprs calls the function f on all expressions under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkExprs(x interface{}, f func(*Expr) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Expr); ok { - return f(r) - } - return false - }} - vis.Walk(x) + v1.WalkExprs(x, f) } // WalkBodies calls the function f on all bodies under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkBodies(x interface{}, f func(Body) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if b, ok := x.(Body); ok { - return f(b) - } - return false - }} - vis.Walk(x) + v1.WalkBodies(x, f) } // WalkRules calls the function f on all rules under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkRules(x interface{}, f func(*Rule) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if r, ok := x.(*Rule); ok { - stop := f(r) - // NOTE(tsandall): since rules cannot be embedded inside of queries - // we can stop early if there is no else block. - if stop || r.Else == nil { - return true - } - } - return false - }} - vis.Walk(x) + v1.WalkRules(x, f) } // WalkNodes calls the function f on all nodes under x. If the function f // returns true, AST nodes under the last node will not be visited. func WalkNodes(x interface{}, f func(Node) bool) { - vis := &GenericVisitor{func(x interface{}) bool { - if n, ok := x.(Node); ok { - return f(n) - } - return false - }} - vis.Walk(x) + v1.WalkNodes(x, f) } // GenericVisitor provides a utility to walk over AST nodes using a // closure. If the closure returns true, the visitor will not walk // over AST nodes under x. -type GenericVisitor struct { - f func(x interface{}) bool -} +type GenericVisitor = v1.GenericVisitor // NewGenericVisitor returns a new GenericVisitor that will invoke the function // f on AST nodes. func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor { - return &GenericVisitor{f} -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *GenericVisitor) Walk(x interface{}) { - if vis.f(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - vis.Walk(x.Name) - vis.Walk(x.Args) - if x.Key != nil { - vis.Walk(x.Key) - } - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewGenericVisitor(f) } // BeforeAfterVisitor provides a utility to walk over AST nodes using // closures. If the before closure returns true, the visitor will not // walk over AST nodes under x. The after closure is invoked always // after visiting a node. -type BeforeAfterVisitor struct { - before func(x interface{}) bool - after func(x interface{}) -} +type BeforeAfterVisitor = v1.BeforeAfterVisitor // NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that // will invoke the functions before and after AST nodes. func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor { - return &BeforeAfterVisitor{before, after} -} - -// Walk iterates the AST by calling the functions on the -// BeforeAndAfterVisitor before and after recursing. Contrary to the -// generic Walk, this does not require allocating the visitor from -// heap. -func (vis *BeforeAfterVisitor) Walk(x interface{}) { - defer vis.after(x) - if vis.before(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Annotations { - vis.Walk(x.Annotations[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case Object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewBeforeAfterVisitor(before, after) } // VarVisitor walks AST nodes under a given node and collects all encountered // variables. The collected variables can be controlled by specifying // VarVisitorParams when creating the visitor. -type VarVisitor struct { - params VarVisitorParams - vars VarSet -} +type VarVisitor = v1.VarVisitor // VarVisitorParams contains settings for a VarVisitor. -type VarVisitorParams struct { - SkipRefHead bool - SkipRefCallHead bool - SkipObjectKeys bool - SkipClosures bool - SkipWithTarget bool - SkipSets bool -} +type VarVisitorParams = v1.VarVisitorParams // NewVarVisitor returns a new VarVisitor object. func NewVarVisitor() *VarVisitor { - return &VarVisitor{ - vars: NewVarSet(), - } -} - -// WithParams sets the parameters in params on vis. -func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { - vis.params = params - return vis -} - -// Vars returns a VarSet that contains collected vars. -func (vis *VarVisitor) Vars() VarSet { - return vis.vars -} - -// visit determines if the VarVisitor will recurse into x: if it returns `true`, -// the visitor will _skip_ that branch of the AST -func (vis *VarVisitor) visit(v interface{}) bool { - if vis.params.SkipObjectKeys { - if o, ok := v.(Object); ok { - o.Foreach(func(_, v *Term) { - vis.Walk(v) - }) - return true - } - } - if vis.params.SkipRefHead { - if r, ok := v.(Ref); ok { - rSlice := r[1:] - for i := range rSlice { - vis.Walk(rSlice[i]) - } - return true - } - } - if vis.params.SkipClosures { - switch v := v.(type) { - case *ArrayComprehension, *ObjectComprehension, *SetComprehension: - return true - case *Expr: - if ev, ok := v.Terms.(*Every); ok { - vis.Walk(ev.Domain) - // We're _not_ walking ev.Body -- that's the closure here - return true - } - } - } - if vis.params.SkipWithTarget { - if v, ok := v.(*With); ok { - vis.Walk(v.Value) - return true - } - } - if vis.params.SkipSets { - if _, ok := v.(Set); ok { - return true - } - } - if vis.params.SkipRefCallHead { - switch v := v.(type) { - case *Expr: - if terms, ok := v.Terms.([]*Term); ok { - termSlice := terms[0].Value.(Ref)[1:] - for i := range termSlice { - vis.Walk(termSlice[i]) - } - for i := 1; i < len(terms); i++ { - vis.Walk(terms[i]) - } - for i := range v.With { - vis.Walk(v.With[i]) - } - return true - } - case Call: - operator := v[0].Value.(Ref) - for i := 1; i < len(operator); i++ { - vis.Walk(operator[i]) - } - for i := 1; i < len(v); i++ { - vis.Walk(v[i]) - } - return true - case *With: - if ref, ok := v.Target.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } - if ref, ok := v.Value.Value.(Ref); ok { - refSlice := ref[1:] - for i := range refSlice { - vis.Walk(refSlice[i]) - } - } else { - vis.Walk(v.Value) - } - return true - } - } - if v, ok := v.(Var); ok { - vis.vars.Add(v) - } - return false -} - -// Walk iterates the AST by calling the function f on the -// GenericVisitor before recursing. Contrary to the generic Walk, this -// does not require allocating the visitor from heap. -func (vis *VarVisitor) Walk(x interface{}) { - if vis.visit(x) { - return - } - - switch x := x.(type) { - case *Module: - vis.Walk(x.Package) - for i := range x.Imports { - vis.Walk(x.Imports[i]) - } - for i := range x.Rules { - vis.Walk(x.Rules[i]) - } - for i := range x.Comments { - vis.Walk(x.Comments[i]) - } - case *Package: - vis.Walk(x.Path) - case *Import: - vis.Walk(x.Path) - vis.Walk(x.Alias) - case *Rule: - vis.Walk(x.Head) - vis.Walk(x.Body) - if x.Else != nil { - vis.Walk(x.Else) - } - case *Head: - if len(x.Reference) > 0 { - vis.Walk(x.Reference) - } else { - vis.Walk(x.Name) - if x.Key != nil { - vis.Walk(x.Key) - } - } - vis.Walk(x.Args) - - if x.Value != nil { - vis.Walk(x.Value) - } - case Body: - for i := range x { - vis.Walk(x[i]) - } - case Args: - for i := range x { - vis.Walk(x[i]) - } - case *Expr: - switch ts := x.Terms.(type) { - case *Term, *SomeDecl, *Every: - vis.Walk(ts) - case []*Term: - for i := range ts { - vis.Walk(ts[i]) - } - } - for i := range x.With { - vis.Walk(x.With[i]) - } - case *With: - vis.Walk(x.Target) - vis.Walk(x.Value) - case *Term: - vis.Walk(x.Value) - case Ref: - for i := range x { - vis.Walk(x[i]) - } - case *object: - x.Foreach(func(k, _ *Term) { - vis.Walk(k) - vis.Walk(x.Get(k)) - }) - case *Array: - x.Foreach(func(t *Term) { - vis.Walk(t) - }) - case Set: - xSlice := x.Slice() - for i := range xSlice { - vis.Walk(xSlice[i]) - } - case *ArrayComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case *ObjectComprehension: - vis.Walk(x.Key) - vis.Walk(x.Value) - vis.Walk(x.Body) - case *SetComprehension: - vis.Walk(x.Term) - vis.Walk(x.Body) - case Call: - for i := range x { - vis.Walk(x[i]) - } - case *Every: - if x.Key != nil { - vis.Walk(x.Key) - } - vis.Walk(x.Value) - vis.Walk(x.Domain) - vis.Walk(x.Body) - case *SomeDecl: - for i := range x.Symbols { - vis.Walk(x.Symbols[i]) - } - } + return v1.NewVarVisitor() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/bundle.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/bundle.go index 816f5535f..50ad97349 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/bundle.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/bundle.go @@ -6,1380 +6,97 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/hex" - "encoding/json" - "errors" - "fmt" "io" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "github.com/gobwas/glob" "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" - "github.com/open-policy-agent/opa/format" - "github.com/open-policy-agent/opa/internal/file/archive" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // Common file extensions and file names. const ( - RegoExt = ".rego" - WasmFile = "policy.wasm" - PlanFile = "plan.json" - ManifestExt = ".manifest" - SignaturesFile = "signatures.json" - patchFile = "patch.json" - dataFile = "data.json" - yamlDataFile = "data.yaml" - ymlDataFile = "data.yml" - defaultHashingAlg = "SHA-256" - DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs - DeltaBundleType = "delta" - SnapshotBundleType = "snapshot" + RegoExt = v1.RegoExt + WasmFile = v1.WasmFile + PlanFile = v1.PlanFile + ManifestExt = v1.ManifestExt + SignaturesFile = v1.SignaturesFile + + DefaultSizeLimitBytes = v1.DefaultSizeLimitBytes + DeltaBundleType = v1.DeltaBundleType + SnapshotBundleType = v1.SnapshotBundleType ) // Bundle represents a loaded bundle. The bundle can contain data and policies. -type Bundle struct { - Signatures SignaturesConfig - Manifest Manifest - Data map[string]interface{} - Modules []ModuleFile - Wasm []byte // Deprecated. Use WasmModules instead - WasmModules []WasmModuleFile - PlanModules []PlanModuleFile - Patch Patch - Etag string - Raw []Raw - - lazyLoadingMode bool - sizeLimitBytes int64 -} +type Bundle = v1.Bundle // Raw contains raw bytes representing the bundle's content -type Raw struct { - Path string - Value []byte -} +type Raw = v1.Raw // Patch contains an array of objects wherein each object represents the patch operation to be // applied to the bundle data. -type Patch struct { - Data []PatchOperation `json:"data,omitempty"` -} +type Patch = v1.Patch // PatchOperation models a single patch operation against a document. -type PatchOperation struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` -} +type PatchOperation = v1.PatchOperation // SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. -type SignaturesConfig struct { - Signatures []string `json:"signatures,omitempty"` - Plugin string `json:"plugin,omitempty"` -} - -// isEmpty returns if the SignaturesConfig is empty. -func (s SignaturesConfig) isEmpty() bool { - return reflect.DeepEqual(s, SignaturesConfig{}) -} +type SignaturesConfig = v1.SignaturesConfig // DecodedSignature represents the decoded JWT payload. -type DecodedSignature struct { - Files []FileInfo `json:"files"` - KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. - Scope string `json:"scope"` - IssuedAt int64 `json:"iat"` - Issuer string `json:"iss"` -} +type DecodedSignature = v1.DecodedSignature // FileInfo contains the hashing algorithm used, resulting digest etc. -type FileInfo struct { - Name string `json:"name"` - Hash string `json:"hash"` - Algorithm string `json:"algorithm"` -} +type FileInfo = v1.FileInfo // NewFile returns a new FileInfo. func NewFile(name, hash, alg string) FileInfo { - return FileInfo{ - Name: name, - Hash: hash, - Algorithm: alg, - } + return v1.NewFile(name, hash, alg) } // Manifest represents the manifest from a bundle. The manifest may contain // metadata such as the bundle revision. -type Manifest struct { - Revision string `json:"revision"` - Roots *[]string `json:"roots,omitempty"` - WasmResolvers []WasmResolver `json:"wasm,omitempty"` - // RegoVersion is the global Rego version for the bundle described by this Manifest. - // The Rego version of individual files can be overridden in FileRegoVersions. - // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. - // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. - // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, - // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible - // flag and no explicit bundle version should default to v1. - RegoVersion *int `json:"rego_version,omitempty"` - // FileRegoVersions is a map from file paths to Rego versions. - // This allows individual files to override the global Rego version specified by RegoVersion. - FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - - compiledFileRegoVersions []fileRegoVersion -} - -type fileRegoVersion struct { - path glob.Glob - version int -} +type Manifest = v1.Manifest // WasmResolver maps a wasm module to an entrypoint ref. -type WasmResolver struct { - Entrypoint string `json:"entrypoint,omitempty"` - Module string `json:"module,omitempty"` - Annotations []*ast.Annotations `json:"annotations,omitempty"` -} - -// Init initializes the manifest. If you instantiate a manifest -// manually, call Init to ensure that the roots are set properly. -func (m *Manifest) Init() { - if m.Roots == nil { - defaultRoots := []string{""} - m.Roots = &defaultRoots - } -} - -// AddRoot adds r to the roots of m. This function is idempotent. -func (m *Manifest) AddRoot(r string) { - m.Init() - if !RootPathsContain(*m.Roots, r) { - *m.Roots = append(*m.Roots, r) - } -} - -func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { - m.Init() - regoVersion := 0 - if v == ast.RegoV1 { - regoVersion = 1 - } - m.RegoVersion = ®oVersion -} - -// Equal returns true if m is semantically equivalent to other. -func (m Manifest) Equal(other Manifest) bool { - - // This is safe since both are passed by value. - m.Init() - other.Init() - - if m.Revision != other.Revision { - return false - } - - if m.RegoVersion == nil && other.RegoVersion != nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion == nil { - return false - } - if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { - return false - } - if !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { - return false - } - - if !reflect.DeepEqual(m.Metadata, other.Metadata) { - return false - } - - return m.equalWasmResolversAndRoots(other) -} - -func (m Manifest) Empty() bool { - return m.Equal(Manifest{}) -} - -// Copy returns a deep copy of the manifest. -func (m Manifest) Copy() Manifest { - m.Init() - roots := make([]string, len(*m.Roots)) - copy(roots, *m.Roots) - m.Roots = &roots - - wasmModules := make([]WasmResolver, len(m.WasmResolvers)) - copy(wasmModules, m.WasmResolvers) - m.WasmResolvers = wasmModules - - metadata := m.Metadata - - if metadata != nil { - m.Metadata = make(map[string]interface{}) - for k, v := range metadata { - m.Metadata[k] = v - } - } - - return m -} - -func (m Manifest) String() string { - m.Init() - if m.RegoVersion != nil { - return fmt.Sprintf("", - m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) - } - return fmt.Sprintf("", - m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) -} - -func (m Manifest) rootSet() stringSet { - rs := map[string]struct{}{} - - for _, r := range *m.Roots { - rs[r] = struct{}{} - } - - return stringSet(rs) -} - -func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { - if len(m.WasmResolvers) != len(other.WasmResolvers) { - return false - } - - for i := 0; i < len(m.WasmResolvers); i++ { - if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { - return false - } - } - - return m.rootSet().Equal(other.rootSet()) -} - -func (wr *WasmResolver) Equal(other *WasmResolver) bool { - if wr == nil && other == nil { - return true - } - - if wr == nil || other == nil { - return false - } - - if wr.Module != other.Module { - return false - } - - if wr.Entrypoint != other.Entrypoint { - return false - } - - annotLen := len(wr.Annotations) - if annotLen != len(other.Annotations) { - return false - } - - for i := 0; i < annotLen; i++ { - if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { - return false - } - } - - return true -} - -type stringSet map[string]struct{} - -func (ss stringSet) Equal(other stringSet) bool { - if len(ss) != len(other) { - return false - } - for k := range other { - if _, ok := ss[k]; !ok { - return false - } - } - return true -} - -func (m *Manifest) validateAndInjectDefaults(b Bundle) error { - - m.Init() - - // Validate roots in bundle. - roots := *m.Roots - - // Standardize the roots (no starting or trailing slash) - for i := range roots { - roots[i] = strings.Trim(roots[i], "/") - } - - for i := 0; i < len(roots)-1; i++ { - for j := i + 1; j < len(roots); j++ { - if RootPathsOverlap(roots[i], roots[j]) { - return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) - } - } - } - - // Validate modules in bundle. - for _, module := range b.Modules { - found := false - if path, err := module.Parsed.Package.Path.Ptr(); err == nil { - found = RootPathsContain(roots, path) - } - if !found { - return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) - } - } - - // Build a set of wasm module entrypoints to validate - wasmModuleToEps := map[string]string{} - seenEps := map[string]struct{}{} - for _, wm := range b.WasmModules { - wasmModuleToEps[wm.Path] = "" - } - - for _, wmConfig := range b.Manifest.WasmResolvers { - _, ok := wasmModuleToEps[wmConfig.Module] - if !ok { - return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) - } - - // Ensure wasm module entrypoint in within bundle roots - if !RootPathsContain(roots, wmConfig.Entrypoint) { - return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) - } - - if _, ok := seenEps[wmConfig.Entrypoint]; ok { - return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) - } - seenEps[wmConfig.Entrypoint] = struct{}{} - - wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint - } - - // Validate data patches in bundle. - for _, patch := range b.Patch.Data { - path := strings.Trim(patch.Path, "/") - if !RootPathsContain(roots, path) { - return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) - } - } - - if b.lazyLoadingMode { - return nil - } - - // Validate data in bundle. - return dfs(b.Data, "", func(path string, node interface{}) (bool, error) { - path = strings.Trim(path, "/") - if RootPathsContain(roots, path) { - return true, nil - } - - if _, ok := node.(map[string]interface{}); ok { - for i := range roots { - if RootPathsContain(strings.Split(path, "/"), roots[i]) { - return false, nil - } - } - } - return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) - }) -} +type WasmResolver = v1.WasmResolver // ModuleFile represents a single module contained in a bundle. -type ModuleFile struct { - URL string - Path string - RelativePath string - Raw []byte - Parsed *ast.Module -} +type ModuleFile = v1.ModuleFile // WasmModuleFile represents a single wasm module contained in a bundle. -type WasmModuleFile struct { - URL string - Path string - Entrypoints []ast.Ref - Raw []byte -} +type WasmModuleFile = v1.WasmModuleFile // PlanModuleFile represents a single plan module contained in a bundle. // // NOTE(tsandall): currently the plans are just opaque binary blobs. In the // future we could inject the entrypoints so that the plans could be executed // inside of OPA proper like we do for Wasm modules. -type PlanModuleFile struct { - URL string - Path string - Raw []byte -} +type PlanModuleFile = v1.PlanModuleFile // Reader contains the reader to load the bundle from. -type Reader struct { - loader DirectoryLoader - includeManifestInData bool - metrics metrics.Metrics - baseDir string - verificationConfig *VerificationConfig - skipVerify bool - processAnnotations bool - jsonOptions *astJSON.Options - capabilities *ast.Capabilities - files map[string]FileInfo // files in the bundle signature payload - sizeLimitBytes int64 - etag string - lazyLoadingMode bool - name string - persist bool - regoVersion ast.RegoVersion - followSymlinks bool -} +type Reader = v1.Reader // NewReader is deprecated. Use NewCustomReader instead. func NewReader(r io.Reader) *Reader { - return NewCustomReader(NewTarballLoader(r)) + return v1.NewReader(r).WithRegoVersion(ast.DefaultRegoVersion) } // NewCustomReader returns a new Reader configured to use the // specified DirectoryLoader. func NewCustomReader(loader DirectoryLoader) *Reader { - nr := Reader{ - loader: loader, - metrics: metrics.New(), - files: make(map[string]FileInfo), - sizeLimitBytes: DefaultSizeLimitBytes + 1, - } - return &nr -} - -// IncludeManifestInData sets whether the manifest metadata should be -// included in the bundle's data. -func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { - r.includeManifestInData = includeManifestInData - return r -} - -// WithMetrics sets the metrics object to be used while loading bundles -func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { - r.metrics = m - return r -} - -// WithBaseDir sets a base directory for file paths of loaded Rego -// modules. This will *NOT* affect the loaded path of data files. -func (r *Reader) WithBaseDir(dir string) *Reader { - r.baseDir = dir - return r -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { - r.verificationConfig = config - return r -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { - r.skipVerify = skipVerify - return r -} - -// WithProcessAnnotations enables annotation processing during .rego file parsing. -func (r *Reader) WithProcessAnnotations(yes bool) *Reader { - r.processAnnotations = yes - return r -} - -// WithCapabilities sets the supported capabilities when loading the files -func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { - r.capabilities = caps - return r -} - -// WithJSONOptions sets the JSONOptions to use when parsing policy files -func (r *Reader) WithJSONOptions(opts *astJSON.Options) *Reader { - r.jsonOptions = opts - return r -} - -// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger -// than this, an error will be returned by the reader. -func (r *Reader) WithSizeLimitBytes(n int64) *Reader { - r.sizeLimitBytes = n + 1 - return r -} - -// WithBundleEtag sets the given etag value on the bundle -func (r *Reader) WithBundleEtag(etag string) *Reader { - r.etag = etag - return r -} - -// WithBundleName specifies the bundle name -func (r *Reader) WithBundleName(name string) *Reader { - r.name = name - return r -} - -func (r *Reader) WithFollowSymlinks(yes bool) *Reader { - r.followSymlinks = yes - return r -} - -// WithLazyLoadingMode sets the bundle loading mode. If true, -// bundles will be read in lazy mode. In this mode, data files in the bundle will not be -// deserialized and the check to validate that the bundle data does not contain paths -// outside the bundle's roots will not be performed while reading the bundle. -func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { - r.lazyLoadingMode = yes - return r -} - -// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. -func (r *Reader) WithBundlePersistence(persist bool) *Reader { - r.persist = persist - return r -} - -func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { - r.regoVersion = version - return r -} - -func (r *Reader) ParserOptions() ast.ParserOptions { - return ast.ParserOptions{ - ProcessAnnotation: r.processAnnotations, - Capabilities: r.capabilities, - JSONOptions: r.jsonOptions, - RegoVersion: r.regoVersion, - } -} - -// Read returns a new Bundle loaded from the reader. -func (r *Reader) Read() (Bundle, error) { - - var bundle Bundle - var descriptors []*Descriptor - var err error - var raw []Raw - - bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - bundle.lazyLoadingMode = r.lazyLoadingMode - bundle.sizeLimitBytes = r.sizeLimitBytes - - if bundle.Type() == SnapshotBundleType { - err = r.checkSignaturesAndDescriptors(bundle.Signatures) - if err != nil { - return bundle, err - } - - bundle.Data = map[string]interface{}{} - } - - var modules []ModuleFile - for _, f := range descriptors { - buf, err := readFile(f, r.sizeLimitBytes) - if err != nil { - return bundle, err - } - - // verify the file content - if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { - path := f.Path() - if r.baseDir != "" { - path = f.URL() - } - path = strings.TrimPrefix(path, "/") - - // check if the file is to be excluded from bundle verification - if r.isFileExcluded(path) { - delete(r.files, path) - } else { - if err = r.verifyBundleFile(path, buf); err != nil { - return bundle, err - } - } - } - - // Normalize the paths to use `/` separators - path := filepath.ToSlash(f.Path()) - - if strings.HasSuffix(path, RegoExt) { - fullPath := r.fullPath(path) - bs := buf.Bytes() - - if r.lazyLoadingMode { - p := fullPath - if r.name != "" { - p = modulePathWithPrefix(r.name, fullPath) - } - - raw = append(raw, Raw{Path: p, Value: bs}) - } - - // Modules are parsed after we've had a chance to read the manifest - mf := ModuleFile{ - URL: f.URL(), - Path: fullPath, - RelativePath: path, - Raw: bs, - } - modules = append(modules, mf) - } else if filepath.Base(path) == WasmFile { - bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == PlanFile { - bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ - URL: f.URL(), - Path: r.fullPath(path), - Raw: buf.Bytes(), - }) - } else if filepath.Base(path) == dataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.UnmarshalJSON(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { - if r.lazyLoadingMode { - raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) - continue - } - - var value interface{} - - r.metrics.Timer(metrics.RegoDataParse).Start() - err := util.Unmarshal(buf.Bytes(), &value) - r.metrics.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) - } - - if err := insertValue(&bundle, path, value); err != nil { - return bundle, err - } - - } else if strings.HasSuffix(path, ManifestExt) { - if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) - } - } - } - - // Parse modules - popts := r.ParserOptions() - popts.RegoVersion = bundle.RegoVersion(popts.RegoVersion) - for _, mf := range modules { - modulePopts := popts - if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.RegoVersion); err != nil { - return bundle, err - } - r.metrics.Timer(metrics.RegoModuleParse).Start() - mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) - r.metrics.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return bundle, err - } - bundle.Modules = append(bundle.Modules, mf) - } - - if bundle.Type() == DeltaBundleType { - if len(bundle.Data) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found") - } - - if len(bundle.Modules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found") - } - - if len(bundle.WasmModules) != 0 { - return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found") - } - - if r.persist { - return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported") - } - } - - // check if the bundle signatures specify any files that weren't found in the bundle - if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { - extra := []string{} - for k := range r.files { - extra = append(extra, k) - } - return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) - } - - if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { - return bundle, err - } - - // Inject the wasm module entrypoint refs into the WasmModuleFile structs - epMap := map[string][]string{} - for _, r := range bundle.Manifest.WasmResolvers { - epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) - } - for i := 0; i < len(bundle.WasmModules); i++ { - entrypoints := epMap[bundle.WasmModules[i].Path] - for _, entrypoint := range entrypoints { - ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) - if err != nil { - return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) - } - bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) - } - } - - if r.includeManifestInData { - var metadata map[string]interface{} - - b, err := json.Marshal(&bundle.Manifest) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) - } - - err = util.UnmarshalJSON(b, &metadata) - if err != nil { - return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) - } - - // For backwards compatibility always write to the old unnamed manifest path - // This will *not* be correct if >1 bundle is in use... - if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { - return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) - } - } - - bundle.Etag = r.etag - bundle.Raw = raw - - return bundle, nil -} - -func (r *Reader) isFileExcluded(path string) bool { - for _, e := range r.verificationConfig.Exclude { - match, _ := filepath.Match(e, path) - if match { - return true - } - } - return false -} - -func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { - if r.skipVerify { - return nil - } - - if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { - return fmt.Errorf("bundle missing .signatures.json file") - } - - if !signatures.isEmpty() { - if r.verificationConfig == nil { - return fmt.Errorf("verification key not provided") - } - - // verify the JWT signatures included in the `.signatures.json` file - if err := r.verifyBundleSignature(signatures); err != nil { - return err - } - } - return nil -} - -func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { - var err error - r.files, err = VerifyBundleSignature(sc, r.verificationConfig) - return err -} - -func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { - return VerifyBundleFile(path, data, r.files) -} - -func (r *Reader) fullPath(path string) string { - if r.baseDir != "" { - path = filepath.Join(r.baseDir, path) - } - return path + return v1.NewCustomReader(loader).WithRegoVersion(ast.DefaultRegoVersion) } // Write is deprecated. Use NewWriter instead. func Write(w io.Writer, bundle Bundle) error { - return NewWriter(w). - UseModulePath(true). - DisableFormat(true). - Write(bundle) + return v1.Write(w, bundle) } // Writer implements bundle serialization. -type Writer struct { - usePath bool - disableFormat bool - w io.Writer -} +type Writer = v1.Writer // NewWriter returns a bundle writer that writes to w. func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - } -} - -// UseModulePath configures the writer to use the module file path instead of the -// module file URL during serialization. This is for backwards compatibility. -func (w *Writer) UseModulePath(yes bool) *Writer { - w.usePath = yes - return w -} - -// DisableFormat configures the writer to just write out raw bytes instead -// of formatting modules before serialization. -func (w *Writer) DisableFormat(yes bool) *Writer { - w.disableFormat = yes - return w -} - -// Write writes the bundle to the writer's output stream. -func (w *Writer) Write(bundle Bundle) error { - gw := gzip.NewWriter(w.w) - tw := tar.NewWriter(gw) - - bundleType := bundle.Type() - - if bundleType == SnapshotBundleType { - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { - return err - } - - if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { - return err - } - - for _, module := range bundle.Modules { - path := module.URL - if w.usePath { - path = module.Path - } - - if err := archive.WriteFile(tw, path, module.Raw); err != nil { - return err - } - } - - if err := w.writeWasm(tw, bundle); err != nil { - return err - } - - if err := writeSignatures(tw, bundle); err != nil { - return err - } - - if err := w.writePlan(tw, bundle); err != nil { - return err - } - } else if bundleType == DeltaBundleType { - if err := writePatch(tw, bundle); err != nil { - return err - } - } - - if err := writeManifest(tw, bundle); err != nil { - return err - } - - if err := tw.Close(); err != nil { - return err - } - - return gw.Close() -} - -func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.WasmModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - if len(bundle.Wasm) > 0 { - err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) - if err != nil { - return err - } - } - - return nil -} - -func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { - for _, wm := range bundle.PlanModules { - path := wm.URL - if w.usePath { - path = wm.Path - } - - err := archive.WriteFile(tw, path, wm.Raw) - if err != nil { - return err - } - } - - return nil -} - -func writeManifest(tw *tar.Writer, bundle Bundle) error { - - if bundle.Manifest.Empty() { - return nil - } - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { - return err - } - - return archive.WriteFile(tw, ManifestExt, buf.Bytes()) -} - -func writePatch(tw *tar.Writer, bundle Bundle) error { - - var buf bytes.Buffer - - if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { - return err - } - - return archive.WriteFile(tw, patchFile, buf.Bytes()) -} - -func writeSignatures(tw *tar.Writer, bundle Bundle) error { - - if bundle.Signatures.isEmpty() { - return nil - } - - bs, err := json.MarshalIndent(bundle.Signatures, "", " ") - if err != nil { - return err - } - - return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) -} - -func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { - - files := []FileInfo{} - - bs, err := hash.HashFile(b.Data) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) - - if len(b.Wasm) != 0 { - bs, err := hash.HashFile(b.Wasm) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, wasmModule := range b.WasmModules { - bs, err := hash.HashFile(wasmModule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - for _, planmodule := range b.PlanModules { - bs, err := hash.HashFile(planmodule.Raw) - if err != nil { - return files, err - } - files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - // If the manifest is essentially empty, don't add it to the signatures since it - // won't be written to the bundle. Otherwise: - // parse the manifest into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. - if !b.Manifest.Empty() { - mbs, err := json.Marshal(b.Manifest) - if err != nil { - return files, err - } - - var result map[string]interface{} - if err := util.Unmarshal(mbs, &result); err != nil { - return files, err - } - - bs, err = hash.HashFile(result) - if err != nil { - return files, err - } - - files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) - } - - return files, err -} - -// FormatModules formats Rego modules -// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). -func (b *Bundle) FormatModules(useModulePath bool) error { - return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath) -} - -// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version -func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { - var err error - - for i, module := range b.Modules { - opts := format.Opts{} - if preserveModuleRegoVersion { - opts.RegoVersion = module.Parsed.RegoVersion() - } else { - opts.RegoVersion = version - } - - if module.Raw == nil { - module.Raw, err = format.AstWithOpts(module.Parsed, opts) - if err != nil { - return err - } - } else { - path := module.URL - if useModulePath { - path = module.Path - } - - module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) - if err != nil { - return err - } - } - b.Modules[i].Raw = module.Raw - } - return nil -} - -// GenerateSignature generates the signature for the given bundle. -func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { - - hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) - if err != nil { - return err - } - - files := []FileInfo{} - - for _, module := range b.Modules { - bytes, err := hash.HashFile(module.Raw) - if err != nil { - return err - } - - path := module.URL - if useModulePath { - path = module.Path - } - files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) - } - - result, err := hashBundleFiles(hash, b) - if err != nil { - return err - } - files = append(files, result...) - - // generate signed token - token, err := GenerateSignedToken(files, signingConfig, keyID) - if err != nil { - return err - } - - if b.Signatures.isEmpty() { - b.Signatures = SignaturesConfig{} - } - - if signingConfig.Plugin != "" { - b.Signatures.Plugin = signingConfig.Plugin - } - - b.Signatures.Signatures = []string{token} - - return nil -} - -// ParsedModules returns a map of parsed modules with names that are -// unique and human readable for the given a bundle name. -func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { - - mods := make(map[string]*ast.Module, len(b.Modules)) - - for _, mf := range b.Modules { - mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed - } - - return mods -} - -func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { - if v := b.Manifest.RegoVersion; v != nil { - if *v == 0 { - return ast.RegoV0 - } else if *v == 1 { - return ast.RegoV1 - } - } - return def -} - -func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { - b.Manifest.SetRegoVersion(v) -} - -// RegoVersionForFile returns the rego-version for the specified file path. -// If there is no defined version for the given path, the default version def is returned. -// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. -func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { - version, err := b.Manifest.numericRegoVersionForFile(path) - if err != nil { - return def, err - } else if version == nil { - return def, nil - } else if *version == 0 { - return ast.RegoV0, nil - } else if *version == 1 { - return ast.RegoV1, nil - } - return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) -} - -func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { - var version *int - - if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { - m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) - for pattern, v := range m.FileRegoVersions { - compiled, err := glob.Compile(pattern) - if err != nil { - return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) - } - m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) - } - } - - for _, fv := range m.compiledFileRegoVersions { - if fv.path.Match(path) { - version = &fv.version - break - } - } - - if version == nil { - version = m.RegoVersion - } - return version, nil -} - -// Equal returns true if this bundle's contents equal the other bundle's -// contents. -func (b Bundle) Equal(other Bundle) bool { - if !reflect.DeepEqual(b.Data, other.Data) { - return false - } - - if len(b.Modules) != len(other.Modules) { - return false - } - for i := range b.Modules { - // To support bundles built from rootless filesystems we ignore a "/" prefix - // for URLs and Paths, such that "/file" and "file" are equivalent - if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { - return false - } - if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != - strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { - return false - } - if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { - return false - } - if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { - return false - } - } - if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { - return false - } - - return bytes.Equal(b.Wasm, other.Wasm) -} - -// Copy returns a deep copy of the bundle. -func (b Bundle) Copy() Bundle { - - // Copy data. - var x interface{} = b.Data - - if err := util.RoundTrip(&x); err != nil { - panic(err) - } - - if x != nil { - b.Data = x.(map[string]interface{}) - } - - // Copy modules. - for i := range b.Modules { - bs := make([]byte, len(b.Modules[i].Raw)) - copy(bs, b.Modules[i].Raw) - b.Modules[i].Raw = bs - b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() - } - - // Copy manifest. - b.Manifest = b.Manifest.Copy() - - return b -} - -func (b *Bundle) insertData(key []string, value interface{}) error { - // Build an object with the full structure for the value - obj, err := mktree(key, value) - if err != nil { - return err - } - - // Merge the new data in with the current bundle data object - merged, ok := merge.InterfaceMaps(b.Data, obj) - if !ok { - return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) - } - - b.Data = merged - - return nil -} - -func (b *Bundle) readData(key []string) *interface{} { - - if len(key) == 0 { - if len(b.Data) == 0 { - return nil - } - var result interface{} = b.Data - return &result - } - - node := b.Data - - for i := 0; i < len(key)-1; i++ { - - child, ok := node[key[i]] - if !ok { - return nil - } - - childObj, ok := child.(map[string]interface{}) - if !ok { - return nil - } - - node = childObj - } - - child, ok := node[key[len(key)-1]] - if !ok { - return nil - } - - return &child -} - -// Type returns the type of the bundle. -func (b *Bundle) Type() string { - if len(b.Patch.Data) != 0 { - return DeltaBundleType - } - return SnapshotBundleType -} - -func mktree(path []string, value interface{}) (map[string]interface{}, error) { - if len(path) == 0 { - // For 0 length path the value is the full tree. - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("root value must be object") - } - return obj, nil - } - - dir := map[string]interface{}{} - for i := len(path) - 1; i > 0; i-- { - dir[path[i]] = value - value = dir - dir = map[string]interface{}{} - } - dir[path[0]] = value - - return dir, nil + return v1.NewWriter(w) } // Merge accepts a set of bundles and merges them into a single result bundle. If there are @@ -1387,7 +104,7 @@ func mktree(path []string, value interface{}) (map[string]interface{}, error) { // will have an empty revision except in the special case where a single bundle is provided // (and in that case the bundle is just returned unmodified.) func Merge(bundles []*Bundle) (*Bundle, error) { - return MergeWithRegoVersion(bundles, ast.RegoV0, false) + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) } // MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. @@ -1399,348 +116,19 @@ func Merge(bundles []*Bundle) (*Bundle, error) { // If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's // ModuleFile.URL will be used. func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { - - if len(bundles) == 0 { - return nil, errors.New("expected at least one bundle") - } - - if len(bundles) == 1 { - result := bundles[0] - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) - if err != nil { - return nil, err - } - result.Manifest.FileRegoVersions = fileRegoVersions - return result, nil + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion } - var roots []string - var result Bundle - - for _, b := range bundles { - - if b.Manifest.Roots == nil { - return nil, errors.New("bundle manifest not initialized") - } - - roots = append(roots, *b.Manifest.Roots...) - - result.Modules = append(result.Modules, b.Modules...) - - for _, root := range *b.Manifest.Roots { - key := strings.Split(root, "/") - if val := b.readData(key); val != nil { - if err := result.insertData(key, *val); err != nil { - return nil, err - } - } - } - - result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) - result.WasmModules = append(result.WasmModules, b.WasmModules...) - result.PlanModules = append(result.PlanModules, b.PlanModules...) - - if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { - if result.Manifest.FileRegoVersions == nil { - result.Manifest.FileRegoVersions = map[string]int{} - } - - fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) - if err != nil { - return nil, err - } - for k, v := range fileRegoVersions { - result.Manifest.FileRegoVersions[k] = v - } - } - } - - // We respect the bundle rego-version, defaulting to the provided rego version if not set. - result.SetRegoVersion(result.RegoVersion(regoVersion)) - - if result.Data == nil { - result.Data = map[string]interface{}{} - } - - result.Manifest.Roots = &roots - - if err := result.Manifest.validateAndInjectDefaults(result); err != nil { - return nil, err - } - - return &result, nil -} - -func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { - fileRegoVersions := map[string]int{} - - // we drop the bundle-global rego versions and record individual rego versions for each module. - for _, m := range bundle.Modules { - // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might - // contain the path between OPA working directory and the bundle root. - v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) - if err != nil { - return nil, err - } - // only record the rego version if it's different from one applied globally to the result bundle - if v != regoVersion { - // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path - // to the module inside the merged bundle. - fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() - } - } - - return fileRegoVersions, nil -} - -func bundleRelativePath(m ModuleFile, usePath bool) string { - p := m.RelativePath - if p == "" { - if usePath { - p = m.Path - } else { - p = m.URL - } - } - return p -} - -func bundleAbsolutePath(m ModuleFile, usePath bool) string { - var p string - if usePath { - p = m.Path - } else { - p = m.URL - } - if !path.IsAbs(p) { - p = "/" + p - } - return path.Clean(p) + return v1.MergeWithRegoVersion(bundles, regoVersion, usePath) } // RootPathsOverlap takes in two bundle root paths and returns true if they overlap. func RootPathsOverlap(pathA string, pathB string) bool { - a := rootPathSegments(pathA) - b := rootPathSegments(pathB) - return rootContains(a, b) || rootContains(b, a) + return v1.RootPathsOverlap(pathA, pathB) } // RootPathsContain takes a set of bundle root paths and returns true if the path is contained. func RootPathsContain(roots []string, path string) bool { - segments := rootPathSegments(path) - for i := range roots { - if rootContains(rootPathSegments(roots[i]), segments) { - return true - } - } - return false -} - -func rootPathSegments(path string) []string { - return strings.Split(path, "/") -} - -func rootContains(root []string, other []string) bool { - - // A single segment, empty string root always contains the other. - if len(root) == 1 && root[0] == "" { - return true - } - - if len(root) > len(other) { - return false - } - - for j := range root { - if root[j] != other[j] { - return false - } - } - - return true -} - -func insertValue(b *Bundle, path string, value interface{}) error { - if err := b.insertData(getNormalizedPath(path), value); err != nil { - return fmt.Errorf("bundle load failed on %v: %w", path, err) - } - return nil -} - -func getNormalizedPath(path string) []string { - // Remove leading / and . characters from the directory path. If the bundle - // was written with OPA then the paths will contain a leading slash. On the - // other hand, if the path is empty, filepath.Dir will return '.'. - // Note: filepath.Dir can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") - var key []string - if dirpath != "" { - key = strings.Split(dirpath, "/") - } - return key -} - -func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error { - if stop, err := fn(path, value); err != nil { - return err - } else if stop { - return nil - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil - } - for key := range obj { - if err := dfs(obj[key], path+"/"+key, fn); err != nil { - return err - } - } - return nil -} - -func modulePathWithPrefix(bundleName string, modulePath string) string { - // Default prefix is just the bundle name - prefix := bundleName - - // Bundle names are sometimes just file paths, some of which - // are full urls (file:///foo/). Parse these and only use the path. - parsed, err := url.Parse(bundleName) - if err == nil { - prefix = filepath.Join(parsed.Host, parsed.Path) - } - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - return normalizePath(filepath.Join(prefix, modulePath)) -} - -// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" -func IsStructuredDoc(name string) bool { - return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || - filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt -} - -func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { - descriptors := []*Descriptor{} - var signatures SignaturesConfig - var patch Patch - - for { - f, err := loader.NextFile() - if err == io.EOF { - break - } - - if err != nil { - return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) - } - - // check for the signatures file - if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) - } - } else if !strings.HasSuffix(f.Path(), SignaturesFile) { - descriptors = append(descriptors, f) - - if filepath.Base(f.Path()) == patchFile { - - var b bytes.Buffer - tee := io.TeeReader(f.reader, &b) - f.reader = tee - - buf, err := readFile(f, sizeLimitBytes) - if err != nil { - return signatures, patch, nil, err - } - - if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { - return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) - } - - f.reader = &b - } - } - } - return signatures, patch, descriptors, nil -} - -func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { - // Case for pre-loaded byte buffers, like those from the tarballLoader. - if bb, ok := f.reader.(*bytes.Buffer); ok { - _ = f.Close() // always close, even on error - - if int64(bb.Len()) >= sizeLimitBytes { - return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", - strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) - } - - return *bb, nil - } - - // Case for *lazyFile readers: - if lf, ok := f.reader.(*lazyFile); ok { - var buf bytes.Buffer - if lf.file == nil { - var err error - if lf.file, err = os.Open(lf.path); err != nil { - return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! - fileSize, _ := fstatFileSize(lf.file) - if fileSize > sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) - } - // Prealloc the buffer for the file read. - buffer := make([]byte, fileSize) - _, err := io.ReadFull(lf.file, buffer) - if err != nil { - return buf, err - } - _ = lf.file.Close() // always close, even on error - - // Note(philipc): Replace the lazyFile reader in the *Descriptor with a - // pointer to the wrapping bytes.Buffer, so that we don't re-read the - // file on disk again by accident. - buf = *bytes.NewBuffer(buffer) - f.reader = &buf - return buf, nil - } - - // Fallback case: - var buf bytes.Buffer - n, err := f.Read(&buf, sizeLimitBytes) - _ = f.Close() // always close, even on error - - if err != nil && err != io.EOF { - return buf, err - } else if err == nil && n >= sizeLimitBytes { - return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) - } - - return buf, nil -} - -// Takes an already open file handle and invokes the os.Stat system call on it -// to determine the file's size. Passes any errors from *File.Stat on up to the -// caller. -func fstatFileSize(f *os.File) (int64, error) { - fileInfo, err := f.Stat() - if err != nil { - return 0, err - } - return fileInfo.Size(), nil -} - -func normalizePath(p string) string { - return filepath.ToSlash(p) + return v1.RootPathsContain(roots, path) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/doc.go new file mode 100644 index 000000000..7ec7c9b33 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package bundle diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/file.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/file.go index 80b1a87eb..ccb7b2351 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/file.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/file.go @@ -1,508 +1,50 @@ package bundle import ( - "archive/tar" - "bytes" - "compress/gzip" - "fmt" "io" - "io/fs" - "os" - "path/filepath" - "sort" - "strings" - "sync" - - "github.com/open-policy-agent/opa/loader/filter" "github.com/open-policy-agent/opa/storage" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" - // Descriptor contains information about a file and // can be used to read the file contents. -type Descriptor struct { - url string - path string - reader io.Reader - closer io.Closer - closeOnce *sync.Once -} - -// lazyFile defers reading the file until the first call of Read -type lazyFile struct { - path string - file *os.File -} - -// newLazyFile creates a new instance of lazyFile -func newLazyFile(path string) *lazyFile { - return &lazyFile{path: path} -} - -// Read implements io.Reader. It will check if the file has been opened -// and open it if it has not before attempting to read using the file's -// read method -func (f *lazyFile) Read(b []byte) (int, error) { - var err error - - if f.file == nil { - if f.file, err = os.Open(f.path); err != nil { - return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) - } - } - - return f.file.Read(b) -} - -// Close closes the lazy file if it has been opened using the file's -// close method -func (f *lazyFile) Close() error { - if f.file != nil { - return f.file.Close() - } - - return nil -} +type Descriptor = v1.Descriptor func NewDescriptor(url, path string, reader io.Reader) *Descriptor { - return &Descriptor{ - url: url, - path: path, - reader: reader, - } -} - -func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { - d.closer = closer - d.closeOnce = new(sync.Once) - return d -} - -// Path returns the path of the file. -func (d *Descriptor) Path() string { - return d.path -} - -// URL returns the url of the file. -func (d *Descriptor) URL() string { - return d.url -} - -// Read will read all the contents from the file the Descriptor refers to -// into the dest writer up n bytes. Will return an io.EOF error -// if EOF is encountered before n bytes are read. -func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { - n, err := io.CopyN(dest, d.reader, n) - return n, err + return v1.NewDescriptor(url, path, reader) } -// Close the file, on some Loader implementations this might be a no-op. -// It should *always* be called regardless of file. -func (d *Descriptor) Close() error { - var err error - if d.closer != nil { - d.closeOnce.Do(func() { - err = d.closer.Close() - }) - } - return err -} - -type PathFormat int64 +type PathFormat = v1.PathFormat const ( - Chrooted PathFormat = iota - SlashRooted - Passthrough + Chrooted = v1.Chrooted + SlashRooted = v1.SlashRooted + Passthrough = v1.Passthrough ) // DirectoryLoader defines an interface which can be used to load // files from a directory by iterating over each one in the tree. -type DirectoryLoader interface { - // NextFile must return io.EOF if there is no next value. The returned - // descriptor should *always* be closed when no longer needed. - NextFile() (*Descriptor, error) - WithFilter(filter filter.LoaderFilter) DirectoryLoader - WithPathFormat(PathFormat) DirectoryLoader - WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader - WithFollowSymlinks(followSymlinks bool) DirectoryLoader -} - -type dirLoader struct { - root string - files []string - idx int - filter filter.LoaderFilter - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - -// Normalize root directory, ex "./src/bundle" -> "src/bundle" -// We don't need an absolute path, but this makes the joined/trimmed -// paths more uniform. -func normalizeRootDirectory(root string) string { - if len(root) > 1 { - if root[0] == '.' && root[1] == filepath.Separator { - if len(root) == 2 { - root = root[:1] // "./" -> "." - } else { - root = root[2:] // remove leading "./" - } - } - } - return root -} +type DirectoryLoader = v1.DirectoryLoader // NewDirectoryLoader returns a basic DirectoryLoader implementation // that will load files from a given root directory path. func NewDirectoryLoader(root string) DirectoryLoader { - d := dirLoader{ - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - return &d -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the directory to read -func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory -func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -func formatPath(fileName string, root string, pathFormat PathFormat) string { - switch pathFormat { - case SlashRooted: - if !strings.HasPrefix(fileName, string(filepath.Separator)) { - return string(filepath.Separator) + fileName - } - return fileName - case Chrooted: - // Trim off the root directory and return path as if chrooted - result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) - if root == "." && filepath.Base(fileName) == ManifestExt { - result = fileName - } - if !strings.HasPrefix(result, string(filepath.Separator)) { - result = string(filepath.Separator) + result - } - return result - case Passthrough: - fallthrough - default: - return fileName - } -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoader) NextFile() (*Descriptor, error) { - // build a list of all files we will iterate over and read, but only one time - if d.files == nil { - d.files = []string{} - err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { - if info == nil { - return nil - } - - if info.Mode().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) - } - d.files = append(d.files, path) - } else if info.Mode().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return filepath.SkipDir - } - } - return nil - }) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - fh := newLazyFile(fileName) - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) - return f, nil -} - -type tarballLoader struct { - baseURL string - r io.Reader - tr *tar.Reader - files []file - idx int - filter filter.LoaderFilter - skipDir map[string]struct{} - pathFormat PathFormat - maxSizeLimitBytes int64 -} - -type file struct { - name string - reader io.Reader - path storage.Path - raw []byte + return v1.NewDirectoryLoader(root) } // NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. func NewTarballLoader(r io.Reader) DirectoryLoader { - l := tarballLoader{ - r: r, - pathFormat: Passthrough, - } - return &l + return v1.NewTarballLoader(r) } // NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads // files out of a gzipped tar archive. The file URLs will be prefixed // with the baseURL. func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { - l := tarballLoader{ - baseURL: strings.TrimSuffix(baseURL, "/"), - r: r, - pathFormat: Passthrough, - } - return &l -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - t.filter = filter - return t -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - t.pathFormat = pathFormat - return t -} - -// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read -func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - t.maxSizeLimitBytes = sizeLimitBytes - return t -} - -// WithFollowSymlinks is a no-op for tarballLoader -func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { - return t -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (t *tarballLoader) NextFile() (*Descriptor, error) { - if t.tr == nil { - gr, err := gzip.NewReader(t.r) - if err != nil { - return nil, fmt.Errorf("archive read failed: %w", err) - } - - t.tr = tar.NewReader(gr) - } - - if t.files == nil { - t.files = []file{} - - if t.skipDir == nil { - t.skipDir = map[string]struct{}{} - } - - for { - header, err := t.tr.Next() - - if err == io.EOF { - break - } - - if err != nil { - return nil, err - } - - // Keep iterating on the archive until we find a normal file - if header.Typeflag == tar.TypeReg { - - if t.filter != nil { - - if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { - continue - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") - - // check if the directory is to be skipped - if _, ok := t.skipDir[basePath]; ok { - continue - } - - match := false - for p := range t.skipDir { - if strings.HasPrefix(basePath, p) { - match = true - break - } - } - - if match { - continue - } - } - - if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { - return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) - } - - f := file{name: header.Name} - - // Note(philipc): We rely on the previous size check in this loop for safety. - buf := bytes.NewBuffer(make([]byte, 0, header.Size)) - if _, err := io.Copy(buf, t.tr); err != nil { - return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) - } - - f.reader = buf - - t.files = append(t.files, f) - } else if header.Typeflag == tar.TypeDir { - cleanedPath := filepath.ToSlash(header.Name) - if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { - t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} - } - } - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if t.idx >= len(t.files) { - return nil, io.EOF - } - - f := t.files[t.idx] - t.idx++ - - cleanedPath := formatPath(f.name, "", t.pathFormat) - d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) - return d, nil -} - -// Next implements the storage.Iterator interface. -// It iterates to the next policy or data file in the directory tree -// and returns a storage.Update for the file. -func (it *iterator) Next() (*storage.Update, error) { - if it.files == nil { - it.files = []file{} - - for _, item := range it.raw { - f := file{name: item.Path} - - fpath := strings.TrimLeft(normalizePath(filepath.Dir(f.name)), "/.") - if strings.HasSuffix(f.name, RegoExt) { - fpath = strings.Trim(normalizePath(f.name), "/") - } - - p, ok := storage.ParsePathEscaped("/" + fpath) - if !ok { - return nil, fmt.Errorf("storage path invalid: %v", f.name) - } - f.path = p - - f.raw = item.Value - - it.files = append(it.files, f) - } - - sortFilePathAscend(it.files) - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if it.idx >= len(it.files) { - return nil, io.EOF - } - - f := it.files[it.idx] - it.idx++ - - isPolicy := false - if strings.HasSuffix(f.name, RegoExt) { - isPolicy = true - } - - return &storage.Update{ - Path: f.path, - Value: f.raw, - IsPolicy: isPolicy, - }, nil -} - -type iterator struct { - raw []Raw - files []file - idx int + return v1.NewTarballLoaderWithBaseURL(r, baseURL) } func NewIterator(raw []Raw) storage.Iterator { - it := iterator{ - raw: raw, - } - return &it -} - -func sortFilePathAscend(files []file) { - sort.Slice(files, func(i, j int) bool { - return len(files[i].path) < len(files[j].path) - }) -} - -func getdepth(path string, isDir bool) int { - if isDir { - cleanedPath := strings.Trim(filepath.ToSlash(path), "/") - return len(strings.Split(cleanedPath, "/")) - } - - basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") - return len(strings.Split(basePath, "/")) + return v1.NewIterator(raw) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/filefs.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/filefs.go index a3a0dbf20..16e00928d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/filefs.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/filefs.go @@ -4,140 +4,19 @@ package bundle import ( - "fmt" - "io" "io/fs" - "path/filepath" - "sync" - "github.com/open-policy-agent/opa/loader/filter" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const ( - defaultFSLoaderRoot = "." -) - -type dirLoaderFS struct { - sync.Mutex - filesystem fs.FS - files []string - idx int - filter filter.LoaderFilter - root string - pathFormat PathFormat - maxSizeLimitBytes int64 - followSymlinks bool -} - // NewFSLoader returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { - return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil + return v1.NewFSLoader(filesystem) } // NewFSLoaderWithRoot returns a basic DirectoryLoader implementation // that will load files from a fs.FS interface at the supplied root func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { - d := dirLoaderFS{ - filesystem: filesystem, - root: normalizeRootDirectory(root), - pathFormat: Chrooted, - } - - return &d -} - -func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { - if err != nil { - return err - } - - if dirEntry != nil { - info, err := dirEntry.Info() - if err != nil { - return err - } - - if dirEntry.Type().IsRegular() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { - return nil - } - - if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { - return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) - } - - d.files = append(d.files, path) - } else if dirEntry.Type().IsDir() { - if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { - return fs.SkipDir - } - } - } - return nil -} - -// WithFilter specifies the filter object to use to filter files while loading bundles -func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { - d.filter = filter - return d -} - -// WithPathFormat specifies how a path is formatted in a Descriptor -func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { - d.pathFormat = pathFormat - return d -} - -// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read -func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { - d.maxSizeLimitBytes = sizeLimitBytes - return d -} - -func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { - d.followSymlinks = followSymlinks - return d -} - -// NextFile iterates to the next file in the directory tree -// and returns a file Descriptor for the file. -func (d *dirLoaderFS) NextFile() (*Descriptor, error) { - d.Lock() - defer d.Unlock() - - if d.files == nil { - err := fs.WalkDir(d.filesystem, d.root, d.walkDir) - if err != nil { - return nil, fmt.Errorf("failed to list files: %w", err) - } - } - - // If done reading files then just return io.EOF - // errors for each NextFile() call - if d.idx >= len(d.files) { - return nil, io.EOF - } - - fileName := d.files[d.idx] - d.idx++ - - fh, err := d.filesystem.Open(fileName) - if err != nil { - return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) - } - - cleanedPath := formatPath(fileName, d.root, d.pathFormat) - f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) - return f, nil + return v1.NewFSLoaderWithRoot(filesystem, root) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/hash.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/hash.go index 021801bb0..d4cc601de 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/hash.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/hash.go @@ -5,137 +5,28 @@ package bundle import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/json" - "fmt" - "hash" - "io" - "sort" - "strings" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // HashingAlgorithm represents a subset of hashing algorithms implemented in Go -type HashingAlgorithm string +type HashingAlgorithm = v1.HashingAlgorithm // Supported values for HashingAlgorithm const ( - MD5 HashingAlgorithm = "MD5" - SHA1 HashingAlgorithm = "SHA-1" - SHA224 HashingAlgorithm = "SHA-224" - SHA256 HashingAlgorithm = "SHA-256" - SHA384 HashingAlgorithm = "SHA-384" - SHA512 HashingAlgorithm = "SHA-512" - SHA512224 HashingAlgorithm = "SHA-512-224" - SHA512256 HashingAlgorithm = "SHA-512-256" + MD5 = v1.MD5 + SHA1 = v1.SHA1 + SHA224 = v1.SHA224 + SHA256 = v1.SHA256 + SHA384 = v1.SHA384 + SHA512 = v1.SHA512 + SHA512224 = v1.SHA512224 + SHA512256 = v1.SHA512256 ) -// String returns the string representation of a HashingAlgorithm -func (alg HashingAlgorithm) String() string { - return string(alg) -} - // SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy -type SignatureHasher interface { - HashFile(v interface{}) ([]byte, error) -} - -type hasher struct { - h func() hash.Hash // hash function factory -} +type SignatureHasher = v1.SignatureHasher // NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { - h := &hasher{} - - switch alg { - case MD5: - h.h = md5.New - case SHA1: - h.h = sha1.New - case SHA224: - h.h = sha256.New224 - case SHA256: - h.h = sha256.New - case SHA384: - h.h = sha512.New384 - case SHA512: - h.h = sha512.New - case SHA512224: - h.h = sha512.New512_224 - case SHA512256: - h.h = sha512.New512_256 - default: - return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) - } - - return h, nil -} - -// HashFile hashes the file content, JSON or binary, both in golang native format. -func (h *hasher) HashFile(v interface{}) ([]byte, error) { - hf := h.h() - walk(v, hf) - return hf.Sum(nil), nil -} - -// walk hashes the file content, JSON or binary, both in golang native format. -// -// Computation for unstructured documents is a hash of the document. -// -// Computation for the types of structured JSON document is as follows: -// -// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. -// -// array: Hash [, then digest of the value, then comma (between items) and finally ]. -func walk(v interface{}, h io.Writer) { - - switch x := v.(type) { - case map[string]interface{}: - _, _ = h.Write([]byte("{")) - - var keys []string - for k := range x { - keys = append(keys, k) - } - sort.Strings(keys) - - for i, key := range keys { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - - _, _ = h.Write(encodePrimitive(key)) - _, _ = h.Write([]byte(":")) - walk(x[key], h) - } - - _, _ = h.Write([]byte("}")) - case []interface{}: - _, _ = h.Write([]byte("[")) - - for i, e := range x { - if i > 0 { - _, _ = h.Write([]byte(",")) - } - walk(e, h) - } - - _, _ = h.Write([]byte("]")) - case []byte: - _, _ = h.Write(x) - default: - _, _ = h.Write(encodePrimitive(x)) - } -} - -func encodePrimitive(v interface{}) []byte { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetEscapeHTML(false) - _ = encoder.Encode(v) - return []byte(strings.Trim(buf.String(), "\n")) + return v1.NewSignatureHasher(alg) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/keys.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/keys.go index 810bee4b7..99f9b0f16 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/keys.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/keys.go @@ -6,139 +6,25 @@ package bundle import ( - "encoding/pem" - "fmt" - "os" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws/sign" - "github.com/open-policy-agent/opa/keys" - - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultTokenSigningAlg = "RS256" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // KeyConfig holds the keys used to sign or verify bundles and tokens // Moved to own package, alias kept for backwards compatibility -type KeyConfig = keys.Config +type KeyConfig = v1.KeyConfig // VerificationConfig represents the key configuration used to verify a signed bundle -type VerificationConfig struct { - PublicKeys map[string]*KeyConfig - KeyID string `json:"keyid"` - Scope string `json:"scope"` - Exclude []string `json:"exclude_files"` -} +type VerificationConfig = v1.VerificationConfig // NewVerificationConfig return a new VerificationConfig func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { - return &VerificationConfig{ - PublicKeys: keys, - KeyID: id, - Scope: scope, - Exclude: exclude, - } -} - -// ValidateAndInjectDefaults validates the config and inserts default values -func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { - vc.PublicKeys = keys - - if vc.KeyID != "" { - found := false - for key := range keys { - if key == vc.KeyID { - found = true - break - } - } - - if !found { - return fmt.Errorf("key id %s not found", vc.KeyID) - } - } - return nil -} - -// GetPublicKey returns the public key corresponding to the given key id -func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { - var kc *KeyConfig - var ok bool - - if kc, ok = vc.PublicKeys[id]; !ok { - return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) - } - return kc, nil + return v1.NewVerificationConfig(keys, id, scope, exclude) } // SigningConfig represents the key configuration used to generate a signed bundle -type SigningConfig struct { - Plugin string - Key string - Algorithm string - ClaimsPath string -} +type SigningConfig = v1.SigningConfig // NewSigningConfig return a new SigningConfig func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { - if alg == "" { - alg = defaultTokenSigningAlg - } - - return &SigningConfig{ - Plugin: defaultSignerID, - Key: key, - Algorithm: alg, - ClaimsPath: claimsPath, - } -} - -// WithPlugin sets the signing plugin in the signing config -func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { - if plugin != "" { - s.Plugin = plugin - } - return s -} - -// GetPrivateKey returns the private key or secret from the signing config -func (s *SigningConfig) GetPrivateKey() (interface{}, error) { - - block, _ := pem.Decode([]byte(s.Key)) - if block != nil { - return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm)) - } - - var priv string - if _, err := os.Stat(s.Key); err == nil { - bs, err := os.ReadFile(s.Key) - if err != nil { - return nil, err - } - priv = string(bs) - } else if os.IsNotExist(err) { - priv = s.Key - } else { - return nil, err - } - - return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm)) -} - -// GetClaims returns the claims by reading the file specified in the signing config -func (s *SigningConfig) GetClaims() (map[string]interface{}, error) { - var claims map[string]interface{} - - bs, err := os.ReadFile(s.ClaimsPath) - if err != nil { - return claims, err - } - - if err := util.UnmarshalJSON(bs, &claims); err != nil { - return claims, err - } - return claims, nil + return v1.NewSigningConfig(key, alg, claimsPath) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/sign.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/sign.go index cf9a3e183..56e25eec9 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/sign.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/sign.go @@ -6,130 +6,30 @@ package bundle import ( - "crypto/rand" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultSignerID = "_default" - -var signers map[string]Signer - // Signer is the interface expected for implementations that generate bundle signatures. -type Signer interface { - GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) -} +type Signer v1.Signer // GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified // in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates // a signed token given the list of files to be included in the payload and the bundle // signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultSignerID - } else { - plugin = sc.Plugin - } - signer, err := GetSigner(plugin) - if err != nil { - return "", err - } - return signer.GenerateSignedToken(files, sc, keyID) + return v1.GenerateSignedToken(files, sc, keyID) } // DefaultSigner is the default bundle signing implementation. It signs bundles by generating // a JWT and signing it using a locally-accessible private key. -type DefaultSigner struct{} - -// GenerateSignedToken generates a signed token given the list of files to be -// included in the payload and the bundle signing config. The keyID if non-empty, -// represents the value for the "keyid" claim in the token -func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { - payload, err := generatePayload(files, sc, keyID) - if err != nil { - return "", err - } - - privateKey, err := sc.GetPrivateKey() - if err != nil { - return "", err - } - - var headers jws.StandardHeaders - - if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil { - return "", err - } - - if keyID != "" { - if err := headers.Set(jws.KeyIDKey, keyID); err != nil { - return "", err - } - } - - hdr, err := json.Marshal(headers) - if err != nil { - return "", err - } - - token, err := jws.SignLiteral(payload, - jwa.SignatureAlgorithm(sc.Algorithm), - privateKey, - hdr, - rand.Reader) - if err != nil { - return "", err - } - return string(token), nil -} - -func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) { - payload := make(map[string]interface{}) - payload["files"] = files - - if sc.ClaimsPath != "" { - claims, err := sc.GetClaims() - if err != nil { - return nil, err - } - - for claim, value := range claims { - payload[claim] = value - } - } else { - if keyID != "" { - // keyid claim is deprecated but include it for backwards compatibility. - payload["keyid"] = keyID - } - } - return json.Marshal(payload) -} +type DefaultSigner v1.DefaultSigner // GetSigner returns the Signer registered under the given id func GetSigner(id string) (Signer, error) { - signer, ok := signers[id] - if !ok { - return nil, fmt.Errorf("no signer exists under id %s", id) - } - return signer, nil + return v1.GetSigner(id) } // RegisterSigner registers a Signer under the given id func RegisterSigner(id string, s Signer) error { - if id == defaultSignerID { - return fmt.Errorf("signer id %s is reserved, use a different id", id) - } - signers[id] = s - return nil -} - -func init() { - signers = map[string]Signer{ - defaultSignerID: &DefaultSigner{}, - } + return v1.RegisterSigner(id, s) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/store.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/store.go index 45bcf6e55..d73cc7742 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/store.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/store.go @@ -6,998 +6,118 @@ package bundle import ( "context" - "encoding/base64" - "encoding/json" - "fmt" - "path/filepath" - "strings" - "github.com/open-policy-agent/opa/ast" - iCompiler "github.com/open-policy-agent/opa/internal/compiler" - "github.com/open-policy-agent/opa/internal/json/patch" - "github.com/open-policy-agent/opa/metrics" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) // BundlesBasePath is the storage path used for storing bundle metadata -var BundlesBasePath = storage.MustParsePath("/system/bundles") +var BundlesBasePath = v1.BundlesBasePath // Note: As needed these helpers could be memoized. // ManifestStoragePath is the storage path used for the given named bundle manifest. func ManifestStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest") + return v1.ManifestStoragePath(name) } // EtagStoragePath is the storage path used for the given named bundle etag. func EtagStoragePath(name string) storage.Path { - return append(BundlesBasePath, name, "etag") -} - -func namedBundlePath(name string) storage.Path { - return append(BundlesBasePath, name) -} - -func rootsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "roots") -} - -func revisionPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "revision") -} - -func wasmModulePath(name string) storage.Path { - return append(BundlesBasePath, name, "wasm") -} - -func wasmEntrypointsPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "wasm") -} - -func metadataPath(name string) storage.Path { - return append(BundlesBasePath, name, "manifest", "metadata") + return v1.EtagStoragePath(name) } // ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { - value, err := store.Read(ctx, txn, BundlesBasePath) - if err != nil { - return nil, err - } - - bundleMap, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - bundles := make([]string, len(bundleMap)) - idx := 0 - for name := range bundleMap { - bundles[idx] = name - idx++ - } - return bundles, nil + return v1.ReadBundleNamesFromStore(ctx, store, txn) } // WriteManifestToStore will write the manifest into the storage. This function is called when // the bundle is activated. func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { - return write(ctx, store, txn, ManifestStoragePath(name), manifest) + return v1.WriteManifestToStore(ctx, store, txn, name, manifest) } // WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { - return write(ctx, store, txn, EtagStoragePath(name), etag) -} - -func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error { - if err := util.RoundTrip(&value); err != nil { - return err - } - - var dir []string - if len(path) > 1 { - dir = path[:len(path)-1] - } - - if err := storage.MakeDir(ctx, store, txn, dir); err != nil { - return err - } - - return store.Write(ctx, txn, storage.AddOp, path, value) + return v1.WriteEtagToStore(ctx, store, txn, name, etag) } // EraseManifestFromStore will remove the manifest from storage. This function is called // when the bundle is deactivated. func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := namedBundlePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called -// when the bundle is deactivated. -func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := EtagStoragePath(name) - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -func suppressNotFound(err error) error { - if err == nil || storage.IsNotFound(err) { - return nil - } - return err -} - -func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { - basePath := wasmModulePath(name) - for _, wm := range b.WasmModules { - path := append(basePath, wm.Path) - err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) - if err != nil { - return err - } - } - return nil -} - -func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { - path := wasmModulePath(name) - - err := store.Write(ctx, txn, storage.RemoveOp, path, nil) - return suppressNotFound(err) -} - -// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. -func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { - path := wasmEntrypointsPath(name) - value, err := store.Read(ctx, txn, path) - if err != nil { - return nil, err - } - - bs, err := json.Marshal(value) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - var wasmMetadata []WasmResolver - - err = util.UnmarshalJSON(bs, &wasmMetadata) - if err != nil { - return nil, fmt.Errorf("corrupt wasm manifest data") - } - - return wasmMetadata, nil + return v1.EraseManifestFromStore(ctx, store, txn, name) } // ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { - path := wasmModulePath(name) - value, err := store.Read(ctx, txn, path) - if err != nil { - return nil, err - } - - encodedModules, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - - rawModules := map[string][]byte{} - for path, enc := range encodedModules { - encStr, ok := enc.(string) - if !ok { - return nil, fmt.Errorf("corrupt wasm modules") - } - bs, err := base64.StdEncoding.DecodeString(encStr) - if err != nil { - return nil, err - } - rawModules[path] = bs - } - return rawModules, nil + return v1.ReadWasmModulesFromStore(ctx, store, txn, name) } // ReadBundleRootsFromStore returns the roots in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { - value, err := store.Read(ctx, txn, rootsPath(name)) - if err != nil { - return nil, err - } - - sl, ok := value.([]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest roots") - } - - roots := make([]string, len(sl)) - - for i := range sl { - roots[i], ok = sl[i].(string) - if !ok { - return nil, fmt.Errorf("corrupt manifest root") - } - } - - return roots, nil + return v1.ReadBundleRootsFromStore(ctx, store, txn, name) } // ReadBundleRevisionFromStore returns the revision in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readRevisionFromStore(ctx, store, txn, revisionPath(name)) -} - -func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := store.Read(ctx, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt manifest revision") - } - - return str, nil + return v1.ReadBundleRevisionFromStore(ctx, store, txn, name) } // ReadBundleMetadataFromStore returns the metadata in the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) { - return readMetadataFromStore(ctx, store, txn, metadataPath(name)) -} - -func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) { - value, err := store.Read(ctx, txn, path) - if err != nil { - return nil, suppressNotFound(err) - } - - data, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("corrupt manifest metadata") - } - - return data, nil + return v1.ReadBundleMetadataFromStore(ctx, store, txn, name) } // ReadBundleEtagFromStore returns the etag for the specified bundle. // If the bundle is not activated, this function will return // storage NotFound error. func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { - return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) -} - -func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { - value, err := store.Read(ctx, txn, path) - if err != nil { - return "", err - } - - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("corrupt bundle etag") - } - - return str, nil + return v1.ReadBundleEtagFromStore(ctx, store, txn, name) } // ActivateOpts defines options for the Activate API call. -type ActivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - TxnCtx *storage.Context - Compiler *ast.Compiler - Metrics metrics.Metrics - Bundles map[string]*Bundle // Optional - ExtraModules map[string]*ast.Module // Optional - AuthorizationDecisionRef ast.Ref - ParserOptions ast.ParserOptions - - legacy bool -} +type ActivateOpts = v1.ActivateOpts // Activate the bundle(s) by loading into the given Store. This will load policies, data, and record // the manifest in storage. The compiler provided will have had the polices compiled on it. func Activate(opts *ActivateOpts) error { - opts.legacy = false - return activateBundles(opts) + return v1.Activate(opts) } // DeactivateOpts defines options for the Deactivate API call -type DeactivateOpts struct { - Ctx context.Context - Store storage.Store - Txn storage.Transaction - BundleNames map[string]struct{} - ParserOptions ast.ParserOptions -} +type DeactivateOpts = v1.DeactivateOpts // Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. func Deactivate(opts *DeactivateOpts) error { - erase := map[string]struct{}{} - for name := range opts.BundleNames { - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - } - _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) - return err -} - -func activateBundles(opts *ActivateOpts) error { - - // Build collections of bundle names, modules, and roots to erase - erase := map[string]struct{}{} - names := map[string]struct{}{} - deltaBundles := map[string]*Bundle{} - snapshotBundles := map[string]*Bundle{} - - for name, b := range opts.Bundles { - if b.Type() == DeltaBundleType { - deltaBundles[name] = b - } else { - snapshotBundles[name] = b - names[name] = struct{}{} - - roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) - if suppressNotFound(err) != nil { - return err - } - for _, root := range roots { - erase[root] = struct{}{} - } - - // Erase data at new roots to prepare for writing the new data - for _, root := range *b.Manifest.Roots { - erase[root] = struct{}{} - } - } - } - - // Before changing anything make sure the roots don't collide with any - // other bundles that already are activated or other bundles being activated. - err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) - if err != nil { - return err - } - - if len(deltaBundles) != 0 { - err := activateDeltaBundles(opts, deltaBundles) - if err != nil { - return err - } - } - - // Erase data and policies at new + old roots, and remove the old - // manifests before activating a new snapshot bundle. - remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) - if err != nil { - return err - } - - // Validate data in bundle does not contain paths outside the bundle's roots. - for _, b := range snapshotBundles { - - if b.lazyLoadingMode { - - for _, item := range b.Raw { - path := filepath.ToSlash(item.Path) - - if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { - var val map[string]json.RawMessage - err = util.Unmarshal(item.Value, &val) - if err == nil { - err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } else { - // Build an object for the value - p := getNormalizedPath(path) - - if len(p) == 0 { - return fmt.Errorf("root value must be object") - } - - // verify valid YAML or JSON value - var x interface{} - err := util.Unmarshal(item.Value, &x) - if err != nil { - return err - } - - value := item.Value - dir := map[string]json.RawMessage{} - for i := len(p) - 1; i > 0; i-- { - dir[p[i]] = value - - bs, err := json.Marshal(dir) - if err != nil { - return err - } - - value = bs - dir = map[string]json.RawMessage{} - } - dir[p[0]] = value - - err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) - if err != nil { - return err - } - } - } - } - } - } - - // Compile the modules all at once to avoid having to re-do work. - remainingAndExtra := make(map[string]*ast.Module) - for name, mod := range remaining { - remainingAndExtra[name] = mod - } - for name, mod := range opts.ExtraModules { - remainingAndExtra[name] = mod - } - - err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) - if err != nil { - return err - } - - if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy); err != nil { - return err - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range snapshotBundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - - if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { - return err - } - } - - return nil -} - -func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { - if len(roots) == 1 && roots[0] == "" { - return nil - } - - for key := range obj { - - newPath := filepath.Join(strings.Trim(path, "/"), key) - - // Note: filepath.Join can return paths with '\' separators, always use - // filepath.ToSlash to keep them normalized. - newPath = strings.TrimLeft(normalizePath(newPath), "/.") - - contains := false - prefix := false - if RootPathsContain(roots, newPath) { - contains = true - } else { - for i := range roots { - if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { - prefix = true - break - } - } - } - - if !contains && !prefix { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if contains { - continue - } - - var next map[string]json.RawMessage - err := util.Unmarshal(obj[key], &next) - if err != nil { - return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) - } - - if err := doDFS(next, newPath, roots); err != nil { - return err - } - } - return nil -} - -func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { - - // Check that the manifest roots and wasm resolvers in the delta bundle - // match with those currently in the store - for name, b := range bundles { - value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) - if err != nil { - if storage.IsNotFound(err) { - continue - } - return err - } - - bs, err := json.Marshal(value) - if err != nil { - return fmt.Errorf("corrupt manifest data: %w", err) - } - - var manifest Manifest - - err = util.UnmarshalJSON(bs, &manifest) - if err != nil { - return fmt.Errorf("corrupt manifest data: %w", err) - } - - if !b.Manifest.equalWasmResolversAndRoots(manifest) { - return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) - } - } - - for _, b := range bundles { - err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) - if err != nil { - return err - } - } - - if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { - return err - } - - for name, b := range bundles { - if err := writeManifestToStore(opts, name, b.Manifest); err != nil { - return err - } - - if err := writeEtagToStore(opts, name, b.Etag); err != nil { - return err - } - } - - return nil -} - -// erase bundles by name and roots. This will clear all policies and data at its roots and remove its -// manifest from storage. -func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { - - if err := eraseData(ctx, store, txn, roots); err != nil { - return nil, err - } - - remaining, err := erasePolicies(ctx, store, txn, parserOpts, roots) - if err != nil { - return nil, err - } - - for name := range names { - if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - - if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { - return nil, err - } - } - - return remaining, nil -} - -func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { - for root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - - if len(path) > 0 { - if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { - return err - } - } - } - return nil -} - -func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, error) { - - ids, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - - remaining := map[string]*ast.Module{} - - for _, id := range ids { - bs, err := store.GetPolicy(ctx, txn, id) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(id, string(bs), parserOpts) - if err != nil { - return nil, err - } - path, err := module.Package.Path.Ptr() - if err != nil { - return nil, err - } - deleted := false - for root := range roots { - if RootPathsContain([]string{root}, path) { - if err := store.DeletePolicy(ctx, txn, id); err != nil { - return nil, err - } - deleted = true - break - } - } - if !deleted { - remaining[id] = module - } - } - - return remaining, nil -} - -func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { - // Always write manifests to the named location. If the plugin is in the older style config - // then also write to the old legacy unnamed location. - if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { - return err - } - - if opts.legacy { - if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { - return err - } - } - - return nil -} - -func writeEtagToStore(opts *ActivateOpts, name, etag string) error { - if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { - return err - } - - return nil -} - -func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool) error { - params := storage.WriteParams - params.Context = txnCtx - - for name, b := range bundles { - if len(b.Raw) == 0 { - // Write data from each new bundle into the store. Only write under the - // roots contained in their manifest. - if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { - return err - } - - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(name, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } else { - params.BasePaths = *b.Manifest.Roots - - err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) - if err != nil { - return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) - } - } - } - - return nil + return v1.Deactivate(opts) } -func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error { - for _, root := range roots { - path, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("manifest root path invalid: %v", root) - } - if value, ok := lookup(path, data); ok { - if len(path) > 0 { - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { - return err - } - } - } - return nil -} - -func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module - } - - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - - if authorizationDecisionRef.Equal(ast.EmptyRef()) { - return nil - } - - return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) -} - -func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { - - m.Timer(metrics.RegoModuleCompile).Start() - defer m.Timer(metrics.RegoModuleCompile).Stop() - - modules := map[string]*ast.Module{} - - // preserve any modules already on the compiler - for name, module := range compiler.Modules { - modules[name] = module - } - - // preserve any modules passed in from the store - for name, module := range extraModules { - modules[name] = module - } - - // include all the new bundle modules - for bundleName, b := range bundles { - if legacy { - for _, mf := range b.Modules { - modules[mf.Path] = mf.Parsed - } - } else { - for name, module := range b.ParsedModules(bundleName) { - modules[name] = module - } - } - } - - if compiler.Compile(modules); compiler.Failed() { - return compiler.Errors - } - for bundleName, b := range bundles { - for _, mf := range b.Modules { - var path string - - // For backwards compatibility, in legacy mode, upsert policies to - // the unprefixed path. - if legacy { - path = mf.Path - } else { - path = modulePathWithPrefix(bundleName, mf.Path) - } - - if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { - return err - } - } - } - return nil -} - -func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { - if len(path) == 0 { - return data, true - } - for i := 0; i < len(path)-1; i++ { - value, ok := data[path[i]] - if !ok { - return nil, false - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, false - } - data = obj - } - value, ok := data[path[len(path)-1]] - return value, ok -} - -func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error { - collisions := map[string][]string{} - allBundles, err := ReadBundleNamesFromStore(ctx, store, txn) - if suppressNotFound(err) != nil { - return err - } - - allRoots := map[string][]string{} - - // Build a map of roots for existing bundles already in the system - for _, name := range allBundles { - roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) - if suppressNotFound(err) != nil { - return err - } - allRoots[name] = roots - } - - // Add in any bundles that are being activated, overwrite existing roots - // with new ones where bundles are in both groups. - for name, bundle := range bundles { - allRoots[name] = *bundle.Manifest.Roots - } - - // Now check for each new bundle if it conflicts with any of the others - for name, bundle := range bundles { - for otherBundle, otherRoots := range allRoots { - if name == otherBundle { - // Skip the current bundle being checked - continue - } - - // Compare the "new" roots with other existing (or a different bundles new roots) - for _, newRoot := range *bundle.Manifest.Roots { - for _, otherRoot := range otherRoots { - if RootPathsOverlap(newRoot, otherRoot) { - collisions[otherBundle] = append(collisions[otherBundle], newRoot) - } - } - } - } - } - - if len(collisions) > 0 { - var bundleNames []string - for name := range collisions { - bundleNames = append(bundleNames, name) - } - return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames) - } - return nil -} - -func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { - for _, pat := range patches { - - // construct patch path - path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) - if !ok { - return fmt.Errorf("error parsing patch path") - } - - var op storage.PatchOp - switch pat.Op { - case "upsert": - op = storage.AddOp - - _, err := store.Read(ctx, txn, path[:len(path)-1]) - if err != nil { - if !storage.IsNotFound(err) { - return err - } - - if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - } - case "remove": - op = storage.RemoveOp - case "replace": - op = storage.ReplaceOp - default: - return fmt.Errorf("bad patch operation: %v", pat.Op) - } - - // apply the patch - if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { - return err - } - } - - return nil -} - -// Helpers for the older single (unnamed) bundle style manifest storage. - -// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. -// Deprecated: Use ManifestStoragePath and named bundles instead. -var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") -var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") - // LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { - return write(ctx, store, txn, legacyManifestStoragePath, manifest) + return v1.LegacyWriteManifestToStore(ctx, store, txn, manifest) } // LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. // Deprecated: Use WriteManifestToStore and named bundles instead. func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { - err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) - if err != nil { - return err - } - return nil + return v1.LegacyEraseManifestFromStore(ctx, store, txn) } // LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. // Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { - return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) + return v1.LegacyReadRevisionFromStore(ctx, store, txn) } // ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. // Deprecated: Use Activate with named bundles instead. func ActivateLegacy(opts *ActivateOpts) error { - opts.legacy = true - return activateBundles(opts) + return v1.ActivateLegacy(opts) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/bundle/verify.go b/constraint/vendor/github.com/open-policy-agent/opa/bundle/verify.go index e85be835b..ef2e1e32d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/bundle/verify.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/bundle/verify.go @@ -6,26 +6,11 @@ package bundle import ( - "bytes" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/internal/jwx/jwa" - "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/internal/jwx/jws/verify" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/bundle" ) -const defaultVerifierID = "_default" - -var verifiers map[string]Verifier - // Verifier is the interface expected for implementations that verify bundle signatures. -type Verifier interface { - VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) -} +type Verifier v1.Verifier // VerifyBundleSignature will retrieve the Verifier implementation based // on the Plugin specified in SignaturesConfig, and call its implementation @@ -33,199 +18,19 @@ type Verifier interface { // using the given public keys or secret. If a signature is verified, it keeps // track of the files specified in the JWT payload func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - // default implementation does not return a nil for map, so don't - // do it here either - files := make(map[string]FileInfo) - var plugin string - // for backwards compatibility, check if there is no plugin specified, and use default - if sc.Plugin == "" { - plugin = defaultVerifierID - } else { - plugin = sc.Plugin - } - verifier, err := GetVerifier(plugin) - if err != nil { - return files, err - } - return verifier.VerifyBundleSignature(sc, bvc) + return v1.VerifyBundleSignature(sc, bvc) } // DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking // the JWT signature using a locally-accessible public key. -type DefaultVerifier struct{} - -// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. -// If a signature is verified, it keeps track of the files specified in the JWT payload -func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { - files := make(map[string]FileInfo) - - if len(sc.Signatures) == 0 { - return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)") - } - - if len(sc.Signatures) > 1 { - return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)") - } - - for _, token := range sc.Signatures { - payload, err := verifyJWTSignature(token, bvc) - if err != nil { - return files, err - } - - for _, file := range payload.Files { - files[file.Name] = file - } - } - return files, nil -} - -func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { - // decode JWT to check if the header specifies the key to use and/or if claims have the scope. - - parts, err := jws.SplitCompact(token) - if err != nil { - return nil, err - } - - var decodedHeader []byte - if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { - return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err) - } - - var hdr jws.StandardHeaders - if err := json.Unmarshal(decodedHeader, &hdr); err != nil { - return nil, fmt.Errorf("failed to parse JWT headers: %w", err) - } - - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return nil, err - } - - var ds DecodedSignature - if err := json.Unmarshal(payload, &ds); err != nil { - return nil, err - } - - // check for the id of the key to use for JWT signature verification - // first in the OPA config. If not found, then check the JWT kid. - keyID := bvc.KeyID - if keyID == "" { - keyID = hdr.KeyID - } - if keyID == "" { - // If header has no key id, check the deprecated key claim. - keyID = ds.KeyID - } - - if keyID == "" { - return nil, fmt.Errorf("verification key ID is empty") - } - - // now that we have the keyID, fetch the actual key - keyConfig, err := bvc.GetPublicKey(keyID) - if err != nil { - return nil, err - } - - // verify JWT signature - alg := jwa.SignatureAlgorithm(keyConfig.Algorithm) - key, err := verify.GetSigningKey(keyConfig.Key, alg) - if err != nil { - return nil, err - } - - _, err = jws.Verify([]byte(token), alg, key) - if err != nil { - return nil, err - } - - // verify the scope - scope := bvc.Scope - if scope == "" { - scope = keyConfig.Scope - } - - if ds.Scope != scope { - return nil, fmt.Errorf("scope mismatch") - } - return &ds, nil -} - -// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature -func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { - var file FileInfo - var ok bool - - if file, ok = files[path]; !ok { - return fmt.Errorf("file %v not included in bundle signature", path) - } - - if file.Algorithm == "" { - return fmt.Errorf("no hashing algorithm provided for file %v", path) - } - - hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) - if err != nil { - return err - } - - // hash the file content - // For unstructured files, hash the byte stream of the file - // For structured files, read the byte stream and parse into a JSON structure; - // then recursively order the fields of all objects alphabetically and then apply - // the hash function to result to compute the hash. This ensures that the digital signature is - // independent of whitespace and other non-semantic JSON features. - var value interface{} - if IsStructuredDoc(path) { - err := util.Unmarshal(data.Bytes(), &value) - if err != nil { - return err - } - } else { - value = data.Bytes() - } - - bs, err := hash.HashFile(value) - if err != nil { - return err - } - - // compare file hash with same file in the JWT payloads - fb, err := hex.DecodeString(file.Hash) - if err != nil { - return err - } - - if !bytes.Equal(fb, bs) { - return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) - } - - delete(files, path) - return nil -} +type DefaultVerifier = v1.DefaultVerifier // GetVerifier returns the Verifier registered under the given id func GetVerifier(id string) (Verifier, error) { - verifier, ok := verifiers[id] - if !ok { - return nil, fmt.Errorf("no verifier exists under id %s", id) - } - return verifier, nil + return v1.GetVerifier(id) } // RegisterVerifier registers a Verifier under the given id func RegisterVerifier(id string, v Verifier) error { - if id == defaultVerifierID { - return fmt.Errorf("verifier id %s is reserved, use a different id", id) - } - verifiers[id] = v - return nil -} - -func init() { - verifiers = map[string]Verifier{ - defaultVerifierID: &DefaultVerifier{}, - } + return v1.RegisterVerifier(id, v) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/doc.go new file mode 100644 index 000000000..189c2e727 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package capabilities diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.69.0.json b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.69.0.json new file mode 100644 index 000000000..862a4555f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.69.0.json @@ -0,0 +1,4843 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json new file mode 100644 index 000000000..862a4555f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v0.70.0.json @@ -0,0 +1,4843 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1_import" + ] +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json new file mode 100644 index 000000000..48a87b0c3 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json new file mode 100644 index 000000000..48a87b0c3 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.0.1.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json new file mode 100644 index 000000000..48a87b0c3 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/capabilities/v1.1.0.json @@ -0,0 +1,4835 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rego_v1" + ] +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/config/config.go b/constraint/vendor/github.com/open-policy-agent/opa/config/config.go deleted file mode 100644 index 87ab10911..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/config/config.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package config implements OPA configuration file parsing and validation. -package config - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" -) - -// Config represents the configuration file that OPA can be started with. -type Config struct { - Services json.RawMessage `json:"services,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Discovery json.RawMessage `json:"discovery,omitempty"` - Bundle json.RawMessage `json:"bundle,omitempty"` // Deprecated: Use `bundles` instead - Bundles json.RawMessage `json:"bundles,omitempty"` - DecisionLogs json.RawMessage `json:"decision_logs,omitempty"` - Status json.RawMessage `json:"status,omitempty"` - Plugins map[string]json.RawMessage `json:"plugins,omitempty"` - Keys json.RawMessage `json:"keys,omitempty"` - DefaultDecision *string `json:"default_decision,omitempty"` - DefaultAuthorizationDecision *string `json:"default_authorization_decision,omitempty"` - Caching json.RawMessage `json:"caching,omitempty"` - NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"` - PersistenceDirectory *string `json:"persistence_directory,omitempty"` - DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` - Server *struct { - Encoding json.RawMessage `json:"encoding,omitempty"` - Decoding json.RawMessage `json:"decoding,omitempty"` - Metrics json.RawMessage `json:"metrics,omitempty"` - } `json:"server,omitempty"` - Storage *struct { - Disk json.RawMessage `json:"disk,omitempty"` - } `json:"storage,omitempty"` - Extra map[string]json.RawMessage `json:"-"` -} - -// ParseConfig returns a valid Config object with defaults injected. The id -// and version parameters will be set in the labels map. -func ParseConfig(raw []byte, id string) (*Config, error) { - // NOTE(sr): based on https://stackoverflow.com/a/33499066/993018 - var result Config - objValue := reflect.ValueOf(&result).Elem() - knownFields := map[string]reflect.Value{} - for i := 0; i != objValue.NumField(); i++ { - jsonName := strings.Split(objValue.Type().Field(i).Tag.Get("json"), ",")[0] - knownFields[jsonName] = objValue.Field(i) - } - - if err := util.Unmarshal(raw, &result.Extra); err != nil { - return nil, err - } - - for key, chunk := range result.Extra { - if field, found := knownFields[key]; found { - if err := util.Unmarshal(chunk, field.Addr().Interface()); err != nil { - return nil, err - } - delete(result.Extra, key) - } - } - if len(result.Extra) == 0 { - result.Extra = nil - } - return &result, result.validateAndInjectDefaults(id) -} - -// PluginNames returns a sorted list of names of enabled plugins. -func (c Config) PluginNames() (result []string) { - if c.Bundle != nil || c.Bundles != nil { - result = append(result, "bundles") - } - if c.Status != nil { - result = append(result, "status") - } - if c.DecisionLogs != nil { - result = append(result, "decision_logs") - } - for name := range c.Plugins { - result = append(result, name) - } - sort.Strings(result) - return result -} - -// PluginsEnabled returns true if one or more plugin features are enabled. -// -// Deprecated. Use PluginNames instead. -func (c Config) PluginsEnabled() bool { - return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 -} - -// DefaultDecisionRef returns the default decision as a reference. -func (c Config) DefaultDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultDecision) - return r -} - -// DefaultAuthorizationDecisionRef returns the default authorization decision -// as a reference. -func (c Config) DefaultAuthorizationDecisionRef() ast.Ref { - r, _ := ref.ParseDataPath(*c.DefaultAuthorizationDecision) - return r -} - -// NDBuiltinCacheEnabled returns if the ND builtins cache should be used. -func (c Config) NDBuiltinCacheEnabled() bool { - return c.NDBuiltinCache -} - -func (c *Config) validateAndInjectDefaults(id string) error { - - if c.DefaultDecision == nil { - s := defaultDecisionPath - c.DefaultDecision = &s - } - - _, err := ref.ParseDataPath(*c.DefaultDecision) - if err != nil { - return err - } - - if c.DefaultAuthorizationDecision == nil { - s := defaultAuthorizationDecisionPath - c.DefaultAuthorizationDecision = &s - } - - _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision) - if err != nil { - return err - } - - if c.Labels == nil { - c.Labels = map[string]string{} - } - - c.Labels["id"] = id - c.Labels["version"] = version.Version - - return nil -} - -// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured -func (c Config) GetPersistenceDirectory() (string, error) { - if c.PersistenceDirectory == nil { - pwd, err := os.Getwd() - if err != nil { - return "", err - } - return filepath.Join(pwd, ".opa"), nil - } - return *c.PersistenceDirectory, nil -} - -// ActiveConfig returns OPA's active configuration -// with the credentials and crypto keys removed -func (c *Config) ActiveConfig() (interface{}, error) { - bs, err := json.Marshal(c) - if err != nil { - return nil, err - } - - var result map[string]interface{} - if err := util.UnmarshalJSON(bs, &result); err != nil { - return nil, err - } - for k, e := range c.Extra { - var v any - if err := util.UnmarshalJSON(e, &v); err != nil { - return nil, err - } - result[k] = v - } - - if err := removeServiceCredentials(result["services"]); err != nil { - return nil, err - } - - if err := removeCryptoKeys(result["keys"]); err != nil { - return nil, err - } - - return result, nil -} - -func removeServiceCredentials(x interface{}) error { - switch x := x.(type) { - case nil: - return nil - case []interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "credentials") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal service config type: %T", x) - } - - return nil -} - -func removeCryptoKeys(x interface{}) error { - switch x := x.(type) { - case nil: - return nil - case map[string]interface{}: - for _, v := range x { - err := removeKey(v, "key", "private_key") - if err != nil { - return err - } - } - default: - return fmt.Errorf("illegal keys config type: %T", x) - } - - return nil -} - -func removeKey(x interface{}, keys ...string) error { - val, ok := x.(map[string]interface{}) - if !ok { - return fmt.Errorf("type assertion error") - } - - for _, key := range keys { - delete(val, key) - } - - return nil -} - -const ( - defaultDecisionPath = "/system/main" - defaultAuthorizationDecisionPath = "/system/authz/allow" -) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/format/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/format/doc.go new file mode 100644 index 000000000..ba514fffb --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/format/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package format diff --git a/constraint/vendor/github.com/open-policy-agent/opa/format/format.go b/constraint/vendor/github.com/open-policy-agent/opa/format/format.go index e4c9afaeb..ad09cea84 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/format/format.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/format/format.go @@ -6,80 +6,38 @@ package format import ( - "bytes" - "fmt" - "regexp" - "sort" - "strings" - "unicode" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/future" "github.com/open-policy-agent/opa/types" + v1 "github.com/open-policy-agent/opa/v1/format" ) // Opts lets you control the code formatting via `AstWithOpts()`. -type Opts struct { - // IgnoreLocations instructs the formatter not to use the AST nodes' locations - // into account when laying out the code: notably, when the input is the result - // of partial evaluation, arguments maybe have been shuffled around, but still - // carry along their original source locations. - IgnoreLocations bool - - // RegoVersion is the version of Rego to format code for. - RegoVersion ast.RegoVersion - - // ParserOptions is the parser options used when parsing the module to be formatted. - ParserOptions *ast.ParserOptions -} - -// defaultLocationFile is the file name used in `Ast()` for terms -// without a location, as could happen when pretty-printing the -// results of partial eval. -const defaultLocationFile = "__format_default__" +type Opts = v1.Opts // Source formats a Rego source file. The bytes provided must describe a complete // Rego module. If they don't, Source will return an error resulting from the attempt // to parse the bytes. func Source(filename string, src []byte) ([]byte, error) { - return SourceWithOpts(filename, src, Opts{}) + return SourceWithOpts(filename, src, Opts{ + RegoVersion: ast.DefaultRegoVersion, + ParserOptions: &ast.ParserOptions{ + RegoVersion: ast.DefaultRegoVersion, + }, + }) } func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { - var parserOpts ast.ParserOptions - if opts.ParserOptions != nil { - parserOpts = *opts.ParserOptions - } else { - if opts.RegoVersion == ast.RegoV1 { - // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. - // Otherwise, we'll default to RegoV0 - parserOpts.RegoVersion = ast.RegoV1 - } + if opts.RegoVersion == ast.RegoUndefined { + opts.RegoVersion = ast.DefaultRegoVersion } - - module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) - if err != nil { - return nil, err - } - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { - checkOpts := ast.NewRegoCheckOptions() - // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. - checkOpts.RequireIfKeyword = false - checkOpts.RequireContainsKeyword = false - checkOpts.RequireRuleBodyOrValue = false - errors := ast.CheckRegoV1WithOptions(module, checkOpts) - if len(errors) > 0 { - return nil, errors - } + if opts.ParserOptions == nil { + opts.ParserOptions = &ast.ParserOptions{} } - - formatted, err := AstWithOpts(module, opts) - if err != nil { - return nil, fmt.Errorf("%s: %v", filename, err) + if opts.ParserOptions.RegoVersion == ast.RegoUndefined { + opts.ParserOptions.RegoVersion = ast.DefaultRegoVersion } - return formatted, nil + return v1.SourceWithOpts(filename, src, opts) } // MustAst is a helper function to format a Rego AST element. If any errors @@ -92,1491 +50,37 @@ func MustAst(x interface{}) []byte { return bs } -// Ast formats a Rego AST element. If the passed value is not a valid AST -// element, Ast returns nil and an error. If AST nodes are missing locations -// an arbitrary location will be used. -func Ast(x interface{}) ([]byte, error) { - return AstWithOpts(x, Opts{}) -} - -type fmtOpts struct { - // When the future keyword "contains" is imported, all the pretty-printed - // modules will use that format for partial sets. - // NOTE(sr): For ref-head rules, this will be the default behaviour, since - // we need "contains" to disambiguate complete rules from partial sets. - contains bool - - // Same logic applies as for "contains": if `future.keywords.if` (or all - // future keywords) is imported, we'll render rules that can use `if` with - // `if`. - ifs bool - - // We check all rule ref heads to see if any of them _requires_ support - // for ref heads -- if they do, we'll print all of them in a different way - // than if they don't. - refHeads bool - - regoV1 bool -} - -func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { - // The node has to be deep copied because it may be mutated below. Alternatively, - // we could avoid the copy by checking if mutation will occur first. For now, - // since format is not latency sensitive, just deep copy in all cases. - x = ast.Copy(x) - - wildcards := map[ast.Var]*ast.Term{} - - // NOTE(sr): When the formatter encounters a call to internal.member_2 - // or internal.member_3, it will sugarize them into usage of the `in` - // operator. It has to ensure that the proper future keyword import is - // present. - extraFutureKeywordImports := map[string]struct{}{} - - o := fmtOpts{} - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 { - o.regoV1 = true - o.ifs = true - o.contains = true - } - - // Preprocess the AST. Set any required defaults and calculate - // values required for printing the formatted output. - ast.WalkNodes(x, func(x ast.Node) bool { - switch n := x.(type) { - case ast.Body: - if len(n) == 0 { - return false - } - case *ast.Term: - unmangleWildcardVar(wildcards, n) - - case *ast.Expr: - switch { - case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()): - extraFutureKeywordImports["in"] = struct{}{} - case n.IsEvery(): - extraFutureKeywordImports["every"] = struct{}{} - } - - case *ast.Import: - switch { - case isRegoV1Compatible(n): - o.contains = true - o.ifs = true - case future.IsAllFutureKeywords(n): - o.contains = true - o.ifs = true - case future.IsFutureKeyword(n, "contains"): - o.contains = true - case future.IsFutureKeyword(n, "if"): - o.ifs = true - } - - case *ast.Rule: - if len(n.Head.Ref()) > 2 { - o.refHeads = true - } - if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" - o.refHeads = true - } - } - - if opts.IgnoreLocations || x.Loc() == nil { - x.SetLoc(defaultLocation(x)) - } - return false - }) - - w := &writer{ - indent: "\t", - errs: make([]*ast.Error, 0), - } - - switch x := x.(type) { - case *ast.Module: - if opts.RegoVersion == ast.RegoV1 { - x.Imports = filterRegoV1Import(x.Imports) - } else if opts.RegoVersion == ast.RegoV0CompatV1 { - x.Imports = ensureRegoV1Import(x.Imports) - } - - if opts.RegoVersion == ast.RegoV0CompatV1 || opts.RegoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) { - x.Imports = future.FilterFutureImports(x.Imports) - } else { - for kw := range extraFutureKeywordImports { - x.Imports = ensureFutureKeywordImport(x.Imports, kw) - } - } - w.writeModule(x, o) - case *ast.Package: - w.writePackage(x, nil) - case *ast.Import: - w.writeImports([]*ast.Import{x}, nil) - case *ast.Rule: - w.writeRule(x, false /* isElse */, o, nil) - case *ast.Head: - w.writeHead(x, - false, // isDefault - false, // isExpandedConst - o, - nil) - case ast.Body: - w.writeBody(x, nil) - case *ast.Expr: - w.writeExpr(x, nil) - case *ast.With: - w.writeWith(x, nil, false) - case *ast.Term: - w.writeTerm(x, nil) - case ast.Value: - w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) - case *ast.Comment: - w.writeComments([]*ast.Comment{x}) - default: - return nil, fmt.Errorf("not an ast element: %v", x) - } - - if len(w.errs) > 0 { - return nil, w.errs - } - return squashTrailingNewlines(w.buf.Bytes()), nil -} - -func unmangleWildcardVar(wildcards map[ast.Var]*ast.Term, n *ast.Term) { - - v, ok := n.Value.(ast.Var) - if !ok || !v.IsWildcard() { - return - } - - first, ok := wildcards[v] - if !ok { - wildcards[v] = n - return - } - - w := v[len(ast.WildcardPrefix):] - - // Prepend an underscore to ensure the variable will parse. - if len(w) == 0 || w[0] != '_' { - w = "_" + w - } - - if first != nil { - first.Value = w - wildcards[v] = nil - } - - n.Value = w -} - -func squashTrailingNewlines(bs []byte) []byte { - if bytes.HasSuffix(bs, []byte("\n")) { - return append(bytes.TrimRight(bs, "\n"), '\n') +// MustAstWithOpts is a helper function to format a Rego AST element. If any errors +// occurs this function will panic. This is mostly used for test +func MustAstWithOpts(x interface{}, opts Opts) []byte { + bs, err := AstWithOpts(x, opts) + if err != nil { + panic(err) } return bs } -func defaultLocation(x ast.Node) *ast.Location { - return ast.NewLocation([]byte(x.String()), defaultLocationFile, 1, 1) -} - -type writer struct { - buf bytes.Buffer - - indent string - level int - inline bool - beforeEnd *ast.Comment - delay bool - errs ast.Errors -} - -func (w *writer) writeModule(module *ast.Module, o fmtOpts) { - var pkg *ast.Package - var others []interface{} - var comments []*ast.Comment - visitor := ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *ast.Comment: - comments = append(comments, x) - return true - case *ast.Import, *ast.Rule: - others = append(others, x) - return true - case *ast.Package: - pkg = x - return true - default: - return false - } - }) - visitor.Walk(module) - - sort.Slice(comments, func(i, j int) bool { - return locLess(comments[i], comments[j]) - }) - - sort.Slice(others, func(i, j int) bool { - return locLess(others[i], others[j]) - }) - - comments = trimTrailingWhitespaceInComments(comments) - - comments = w.writePackage(pkg, comments) - var imports []*ast.Import - var rules []*ast.Rule - for len(others) > 0 { - imports, others = gatherImports(others) - comments = w.writeImports(imports, comments) - rules, others = gatherRules(others) - comments = w.writeRules(rules, o, comments) - } - - for i, c := range comments { - w.writeLine(c.String()) - if i == len(comments)-1 { - w.write("\n") - } - } -} - -func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { - for _, c := range comments { - c.Text = bytes.TrimRightFunc(c.Text, unicode.IsSpace) - } - - return comments -} - -func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, pkg.Location) - - w.startLine() - w.write(pkg.String()) - w.blankLine() - - return comments -} - -func (w *writer) writeComments(comments []*ast.Comment) { - for i := 0; i < len(comments); i++ { - if i > 0 && locCmp(comments[i], comments[i-1]) > 1 { - w.blankLine() - } - w.writeLine(comments[i].String()) - } -} - -func (w *writer) writeRules(rules []*ast.Rule, o fmtOpts, comments []*ast.Comment) []*ast.Comment { - for _, rule := range rules { - comments = w.insertComments(comments, rule.Location) - comments = w.writeRule(rule, false, o, comments) - w.blankLine() - } - return comments -} - -func (w *writer) writeRule(rule *ast.Rule, isElse bool, o fmtOpts, comments []*ast.Comment) []*ast.Comment { - if rule == nil { - return comments - } - - if !isElse { - w.startLine() - } - - if rule.Default { - w.write("default ") - } - - // OPA transforms lone bodies like `foo = {"a": "b"}` into rules of the form - // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation - // in the formatted code instead of expanding the bodies into rules, so we - // pretend that the rule has no body in this case. - isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil - - comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, o, comments) - - // this excludes partial sets UNLESS `contains` is used - partialSetException := o.contains || rule.Head.Value != nil - - if len(rule.Body) == 0 || isExpandedConst { - w.endLine() - return comments - } - - if (o.regoV1 || o.ifs) && partialSetException { - w.write(" if") - if len(rule.Body) == 1 { - if rule.Body[0].Location.Row == rule.Head.Location.Row { - w.write(" ") - comments = w.writeExpr(rule.Body[0], comments) - w.endLine() - if rule.Else != nil { - comments = w.writeElse(rule, o, comments) - } - return comments - } - } - } - w.write(" {") - w.endLine() - w.up() - - comments = w.writeBody(rule.Body, comments) - - var closeLoc *ast.Location - - if len(rule.Head.Args) > 0 { - closeLoc = closingLoc('(', ')', '{', '}', rule.Location) - } else if rule.Head.Key != nil { - closeLoc = closingLoc('[', ']', '{', '}', rule.Location) - } else { - closeLoc = closingLoc(0, 0, '{', '}', rule.Location) - } - - comments = w.insertComments(comments, closeLoc) - - w.down() - w.startLine() - w.write("}") - if rule.Else != nil { - comments = w.writeElse(rule, o, comments) - } - return comments -} - -func (w *writer) writeElse(rule *ast.Rule, o fmtOpts, comments []*ast.Comment) []*ast.Comment { - // If there was nothing else on the line before the "else" starts - // then preserve this style of else block, otherwise it will be - // started as an "inline" else eg: - // - // p { - // ... - // } - // - // else { - // ... - // } - // - // versus - // - // p { - // ... - // } else { - // ... - // } - // - // Note: This doesn't use the `close` as it currently isn't accurate for all - // types of values. Checking the actual line text is the most consistent approach. - wasInline := false - ruleLines := bytes.Split(rule.Location.Text, []byte("\n")) - relativeElseRow := rule.Else.Location.Row - rule.Location.Row - if relativeElseRow > 0 && relativeElseRow < len(ruleLines) { - elseLine := ruleLines[relativeElseRow] - if !bytes.HasPrefix(bytes.TrimSpace(elseLine), []byte("else")) { - wasInline = true - } - } - - // If there are any comments between the closing brace of the previous rule and the start - // of the else block we will always insert a new blank line between them. - hasCommentAbove := len(comments) > 0 && comments[0].Location.Row-rule.Else.Head.Location.Row < 0 || w.beforeEnd != nil - - if !hasCommentAbove && wasInline { - w.write(" ") - } else { - w.blankLine() - w.startLine() - } - - rule.Else.Head.Name = "else" // NOTE(sr): whaaat - rule.Else.Head.Reference = ast.Ref{ast.VarTerm("else")} - rule.Else.Head.Args = nil - comments = w.insertComments(comments, rule.Else.Head.Location) - - if hasCommentAbove && !wasInline { - // The comments would have ended the line, be sure to start one again - // before writing the rest of the "else" rule. - w.startLine() - } - - // For backwards compatibility adjust the rule head value location - // TODO: Refactor the logic for inserting comments, or special - // case comments in a rule head value so this can be removed - if rule.Else.Head.Value != nil { - rule.Else.Head.Value.Location = rule.Else.Head.Location - } - - return w.writeRule(rule.Else, true, o, comments) -} - -func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, o fmtOpts, comments []*ast.Comment) []*ast.Comment { - ref := head.Ref() - if head.Key != nil && head.Value == nil && !head.HasDynamicRef() { - ref = ref.GroundPrefix() - } - if o.refHeads || len(ref) == 1 { - w.write(ref.String()) - } else { - w.write(ref[0].String()) - w.write("[") - w.write(ref[1].String()) - w.write("]") - } - - if len(head.Args) > 0 { - w.write("(") - var args []interface{} - for _, arg := range head.Args { - args = append(args, arg) - } - comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) - w.write(")") - } - if head.Key != nil { - if o.contains && head.Value == nil { - w.write(" contains ") - comments = w.writeTerm(head.Key, comments) - } else if head.Value == nil { // no `if` for p[x] notation - w.write("[") - comments = w.writeTerm(head.Key, comments) - w.write("]") - } - } - - if head.Value != nil && - (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) { - - // in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.: - // * a -> parser error, won't reach here - // * a.b -> a contains "b" - // * a.b.c -> a.b.c := true - // * a.b.c.d -> a.b.c.d := true - isRegoV1RefConst := o.regoV1 && isExpandedConst && head.Key == nil && len(head.Args) == 0 - - if len(head.Args) > 0 && - head.Location == head.Value.Location && - head.Name != "else" && - ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 && - !isRegoV1RefConst { - // If the value location is the same as the location of the head, - // we know that the value is generated, i.e. f(1) - // Don't print the value (` = true`) as it is implied. - return comments - } - - if head.Assign || o.regoV1 { - // preserve assignment operator, and enforce it if formatting for Rego v1 - w.write(" := ") - } else { - w.write(" = ") - } - comments = w.writeTerm(head.Value, comments) - } - return comments -} - -func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment { - before, at, comments := partitionComments(comments, loc) - w.writeComments(before) - if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 { - w.blankLine() - } - - w.beforeLineEnd(at) - return comments -} - -func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, body.Loc()) - for i, expr := range body { - // Insert a blank line in before the expression if it was not right - // after the previous expression. - if i > 0 { - lastRow := body[i-1].Location.Row - for _, c := range body[i-1].Location.Text { - if c == '\n' { - lastRow++ - } - } - if expr.Location.Row > lastRow+1 { - w.blankLine() - } - } - w.startLine() - - comments = w.writeExpr(expr, comments) - w.endLine() - } - return comments -} - -func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, expr.Location) - if !w.inline { - w.startLine() - } - - if expr.Negated { - w.write("not ") - } - - switch t := expr.Terms.(type) { - case *ast.SomeDecl: - comments = w.writeSomeDecl(t, comments) - case *ast.Every: - comments = w.writeEvery(t, comments) - case []*ast.Term: - comments = w.writeFunctionCall(expr, comments) - case *ast.Term: - comments = w.writeTerm(t, comments) - } - - var indented bool - for i, with := range expr.With { - if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line - comments = w.writeWith(with, comments, false) - } else { // we're on a new line - if !indented { - indented = true - - w.up() - defer w.down() - } - w.endLine() - w.startLine() - comments = w.writeWith(with, comments, true) - } - } - - return comments -} - -func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, decl.Location) - w.write("some ") - - row := decl.Location.Row - - for i, term := range decl.Symbols { - switch val := term.Value.(type) { - case ast.Var: - if term.Location.Row > row { - w.endLine() - w.startLine() - w.write(w.indent) - row = term.Location.Row - } else if i > 0 { - w.write(" ") - } - - comments = w.writeTerm(term, comments) - - if i < len(decl.Symbols)-1 { - w.write(",") - } - case ast.Call: - comments = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) - } - } - - return comments -} - -func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, every.Location) - w.write("every ") - if every.Key != nil { - comments = w.writeTerm(every.Key, comments) - w.write(", ") - } - comments = w.writeTerm(every.Value, comments) - w.write(" in ") - comments = w.writeTerm(every.Domain, comments) - w.write(" {") - comments = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) - - if len(every.Body) == 1 && - every.Body[0].Location.Row == every.Location.Row { - w.write(" ") - } - w.write("}") - return comments -} - -func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { - - terms := expr.Terms.([]*ast.Term) - operator := terms[0].Value.String() - - switch operator { - case ast.Member.Name, ast.MemberWithKey.Name: - return w.writeInOperator(false, terms[1:], comments, terms[0].Location, ast.BuiltinMap[terms[0].String()].Decl) - } - - bi, ok := ast.BuiltinMap[operator] - if !ok || bi.Infix == "" { - return w.writeFunctionCallPlain(terms, comments) - } - - numDeclArgs := len(bi.Decl.Args()) - numCallArgs := len(terms) - 1 - - switch numCallArgs { - case numDeclArgs: // Print infix where result is unassigned (e.g., x != y) - comments = w.writeTerm(terms[1], comments) - w.write(" " + bi.Infix + " ") - return w.writeTerm(terms[2], comments) - - case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y) - comments = w.writeTerm(terms[3], comments) - w.write(" " + ast.Equality.Infix + " ") - comments = w.writeTerm(terms[1], comments) - w.write(" " + bi.Infix + " ") - comments = w.writeTerm(terms[2], comments) - return comments - } - // NOTE(Trolloldem): in this point we are operating with a built-in function with the - // wrong arity even when the assignment notation is used - w.errs = append(w.errs, ArityFormatMismatchError(terms[1:], terms[0].String(), terms[0].Location, bi.Decl)) - return w.writeFunctionCallPlain(terms, comments) -} - -func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment { - w.write(terms[0].String() + "(") - defer w.write(")") - args := make([]interface{}, len(terms)-1) - for i, t := range terms[1:] { - args[i] = t - } - loc := terms[0].Location - return w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) -} - -func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) []*ast.Comment { - comments = w.insertComments(comments, with.Location) - if !indented { - w.write(" ") - } - w.write("with ") - comments = w.writeTerm(with.Target, comments) - w.write(" as ") - return w.writeTerm(with.Value, comments) -} - -func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment { - return w.writeTermParens(false, term, comments) -} - -func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment { - comments = w.insertComments(comments, term.Location) - if !w.inline { - w.startLine() - } - - switch x := term.Value.(type) { - case ast.Ref: - w.writeRef(x) - case ast.Object: - comments = w.writeObject(x, term.Location, comments) - case *ast.Array: - comments = w.writeArray(x, term.Location, comments) - case ast.Set: - comments = w.writeSet(x, term.Location, comments) - case *ast.ArrayComprehension: - comments = w.writeArrayComprehension(x, term.Location, comments) - case *ast.ObjectComprehension: - comments = w.writeObjectComprehension(x, term.Location, comments) - case *ast.SetComprehension: - comments = w.writeSetComprehension(x, term.Location, comments) - case ast.String: - if term.Location.Text[0] == '`' { - // To preserve raw strings, we need to output the original text, - // not what x.String() would give us. - w.write(string(term.Location.Text)) - } else { - w.write(x.String()) - } - case ast.Var: - w.write(w.formatVar(x)) - case ast.Call: - comments = w.writeCall(parens, x, term.Location, comments) - case fmt.Stringer: - w.write(x.String()) - } - - if !w.inline { - w.startLine() - } - return comments -} - -func (w *writer) writeRef(x ast.Ref) { - if len(x) > 0 { - parens := false - _, ok := x[0].Value.(ast.Call) - if ok { - parens = x[0].Location.Text[0] == 40 // Starts with "(" - } - w.writeTermParens(parens, x[0], nil) - path := x[1:] - for _, t := range path { - switch p := t.Value.(type) { - case ast.String: - w.writeRefStringPath(p) - case ast.Var: - w.writeBracketed(w.formatVar(p)) - default: - w.write("[") - w.writeTerm(t, nil) - w.write("]") - } - } - } -} - -func (w *writer) writeBracketed(str string) { - w.write("[" + str + "]") -} - -var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") - -func (w *writer) writeRefStringPath(s ast.String) { - str := string(s) - if varRegexp.MatchString(str) && !ast.IsKeyword(str) { - w.write("." + str) - } else { - w.writeBracketed(s.String()) - } -} - -func (w *writer) formatVar(v ast.Var) string { - if v.IsWildcard() { - return ast.Wildcard.String() - } - return v.String() -} - -func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - bi, ok := ast.BuiltinMap[x[0].String()] - if !ok || bi.Infix == "" { - return w.writeFunctionCallPlain(x, comments) - } - - if bi.Infix == "in" { - // NOTE(sr): `in` requires special handling, mirroring what happens in the parser, - // since there can be one or two lhs arguments. - return w.writeInOperator(true, x[1:], comments, loc, bi.Decl) - } - - // TODO(tsandall): improve to consider precedence? - if parens { - w.write("(") - } - - // NOTE(Trolloldem): writeCall is only invoked when the function call is a term - // of another function. The only valid arity is the one of the - // built-in function - if len(bi.Decl.Args()) != len(x)-1 { - w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl)) - return comments - } - - comments = w.writeTermParens(true, x[1], comments) - w.write(" " + bi.Infix + " ") - comments = w.writeTermParens(true, x[2], comments) - if parens { - w.write(")") - } - - return comments -} - -func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) []*ast.Comment { - - if len(operands) != len(f.Args()) { - // The number of operands does not math the arity of the `in` operator - operator := ast.Member.Name - if len(f.Args()) == 3 { - operator = ast.MemberWithKey.Name - } - w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f)) - return comments - } - kw := "in" - switch len(operands) { - case 2: - comments = w.writeTermParens(true, operands[0], comments) - w.write(" ") - w.write(kw) - w.write(" ") - comments = w.writeTermParens(true, operands[1], comments) - case 3: - if parens { - w.write("(") - defer w.write(")") - } - comments = w.writeTermParens(true, operands[0], comments) - w.write(", ") - comments = w.writeTermParens(true, operands[1], comments) - w.write(" ") - w.write(kw) - w.write(" ") - comments = w.writeTermParens(true, operands[2], comments) - } - return comments -} - -func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - var s []interface{} - obj.Foreach(func(k, v *ast.Term) { - s = append(s, ast.Item(k, v)) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter()) -} - -func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("[") - defer w.write("]") - - var s []interface{} - arr.Foreach(func(t *ast.Term) { - s = append(s, t) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) -} - -func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - - if set.Len() == 0 { - w.write("set()") - return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) - } - - w.write("{") - defer w.write("}") - - var s []interface{} - set.Foreach(func(t *ast.Term) { - s = append(s, t) - }) - return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) -} - -func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("[") - defer w.write("]") - - return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments) -} - -func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments) -} - -func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - w.write("{") - defer w.write("}") - - object.Value.Location = object.Key.Location // Ensure the value is not written on the next line. - if object.Key.Location.Row-loc.Row > 1 { - w.endLine() - w.startLine() - } - - comments = w.writeTerm(object.Key, comments) - w.write(": ") - return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments) -} - -func (w *writer) writeComprehension(open, close byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { - if term.Location.Row-loc.Row >= 1 { - w.endLine() - w.startLine() - } - - parens := false - _, ok := term.Value.(ast.Call) - if ok { - parens = term.Location.Text[0] == 40 // Starts with "(" - } - comments = w.writeTermParens(parens, term, comments) - w.write(" |") - - return w.writeComprehensionBody(open, close, body, term.Location, loc, comments) -} - -func (w *writer) writeComprehensionBody(open, close byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment { - exprs := make([]interface{}, 0, len(body)) - for _, expr := range body { - exprs = append(exprs, expr) - } - lines := groupIterable(exprs, term) - - if body.Loc().Row-term.Row > 0 || len(lines) > 1 { - w.endLine() - w.up() - defer w.startLine() - defer w.down() - - comments = w.writeBody(body, comments) - } else { - w.write(" ") - i := 0 - for ; i < len(body)-1; i++ { - comments = w.writeExpr(body[i], comments) - w.write("; ") - } - comments = w.writeExpr(body[i], comments) - } - - return w.insertComments(comments, closingLoc(0, 0, open, close, compr)) -} - -func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment { - m, comments := mapImportsToComments(imports, comments) - - groups := groupImports(imports) - for _, group := range groups { - comments = w.insertComments(comments, group[0].Loc()) - - // Sort imports within a newline grouping. - sort.Slice(group, func(i, j int) bool { - a := group[i] - b := group[j] - return a.Compare(b) < 0 - }) - for _, i := range group { - w.startLine() - w.write(i.String()) - if c, ok := m[i]; ok { - w.write(" " + c.String()) - } - w.endLine() - } - w.blankLine() - } - - return comments -} - -type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment - -func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment { - lines := groupIterable(elements, last) - if len(lines) > 1 { - w.delayBeforeEnd() - w.startMultilineSeq() - } - - i := 0 - for ; i < len(lines)-1; i++ { - comments = w.writeIterableLine(lines[i], comments, fn) - w.write(",") - - w.endLine() - w.startLine() - } - - comments = w.writeIterableLine(lines[i], comments, fn) - - if len(lines) > 1 { - w.write(",") - w.endLine() - comments = w.insertComments(comments, close) - w.down() - w.startLine() - } - - return comments -} - -func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment { - if len(elements) == 0 { - return comments - } - - i := 0 - for ; i < len(elements)-1; i++ { - comments = fn(elements[i], comments) - w.write(", ") - } - - return fn(elements[i], comments) -} - -func (w *writer) objectWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { - entry := x.([2]*ast.Term) - - call, isCall := entry[0].Value.(ast.Call) - - paren := false - if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[0].Location.Text[0] == 40 { // Starts with "(" - paren = true - w.write("(") - } - - comments = w.writeTerm(entry[0], comments) - if paren { - w.write(")") - } - - w.write(": ") - - call, isCall = entry[1].Value.(ast.Call) - if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[1].Location.Text[0] == 40 { // Starts with "(" - w.write("(") - defer w.write(")") - } - - return w.writeTerm(entry[1], comments) - } -} - -func (w *writer) listWriter() entryWriter { - return func(x interface{}, comments []*ast.Comment) []*ast.Comment { - t, ok := x.(*ast.Term) - if ok { - call, isCall := t.Value.(ast.Call) - if isCall && ast.Or.Ref().Equal(call[0].Value) && t.Location.Text[0] == 40 { // Starts with "(" - w.write("(") - defer w.write(")") - } - } - - return w.writeTerm(t, comments) - } -} - -// groupIterable will group the `elements` slice into slices according to their -// location: anything on the same line will be put into a slice. -func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { - // Generated vars occur in the AST when we're rendering the result of - // partial evaluation in a bundle build with optimization. - // Those variables, and wildcard variables have the "default location", - // set in `Ast()`). That is no proper file location, and the grouping - // based on source location will yield a bad result. - // Another case is generated variables: they do have proper file locations, - // but their row/col information may no longer match their AST location. - // So, for generated variables, we also don't trust the location, but - // keep them ungrouped. - def := false // default location found? - for _, elem := range elements { - ast.WalkTerms(elem, func(t *ast.Term) bool { - if t.Location.File == defaultLocationFile { - def = true - return true - } - return false - }) - ast.WalkVars(elem, func(v ast.Var) bool { - if v.IsGenerated() { - def = true - return true - } - return false - }) - if def { // return as-is - return [][]interface{}{elements} - } - } - sort.Slice(elements, func(i, j int) bool { - return locLess(elements[i], elements[j]) +// Ast formats a Rego AST element. If the passed value is not a valid AST +// element, Ast returns nil and an error. If AST nodes are missing locations +// an arbitrary location will be used. +func Ast(x interface{}) ([]byte, error) { + return AstWithOpts(x, Opts{ + RegoVersion: ast.DefaultRegoVersion, }) - - var lines [][]interface{} - cur := make([]interface{}, 0, len(elements)) - for i, t := range elements { - elem := t - loc := getLoc(elem) - lineDiff := loc.Row - last.Row - if lineDiff > 0 && i > 0 { - lines = append(lines, cur) - cur = nil - } - - last = loc - cur = append(cur, elem) - } - return append(lines, cur) -} - -func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) { - var leftovers []*ast.Comment - m := map[*ast.Import]*ast.Comment{} - - for _, c := range comments { - matched := false - for _, i := range imports { - if c.Loc().Row == i.Loc().Row { - m[i] = c - matched = true - break - } - } - if !matched { - leftovers = append(leftovers, c) - } - } - - return m, leftovers -} - -func groupImports(imports []*ast.Import) [][]*ast.Import { - switch len(imports) { // shortcuts - case 0: - return nil - case 1: - return [][]*ast.Import{imports} - } - // there are >=2 imports to group - - var groups [][]*ast.Import - group := []*ast.Import{imports[0]} - - for _, i := range imports[1:] { - last := group[len(group)-1] - - // nil-location imports have been sorted up to come first - if i.Loc() != nil && last.Loc() != nil && // first import with a location, or - i.Loc().Row-last.Loc().Row > 1 { // more than one row apart from previous import - - // start a new group - groups = append(groups, group) - group = []*ast.Import{} - } - group = append(group, i) - } - if len(group) > 0 { - groups = append(groups, group) - } - - return groups -} - -func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) { - for _, c := range comments { - switch cmp := c.Location.Row - l.Row; { - case cmp < 0: - before = append(before, c) - case cmp > 0: - after = append(after, c) - case cmp == 0: - at = c - } - } - - return before, at, after -} - -func gatherImports(others []interface{}) (imports []*ast.Import, rest []interface{}) { - i := 0 -loop: - for ; i < len(others); i++ { - switch x := others[i].(type) { - case *ast.Import: - imports = append(imports, x) - case *ast.Rule: - break loop - } - } - return imports, others[i:] -} - -func gatherRules(others []interface{}) (rules []*ast.Rule, rest []interface{}) { - i := 0 -loop: - for ; i < len(others); i++ { - switch x := others[i].(type) { - case *ast.Rule: - rules = append(rules, x) - case *ast.Import: - break loop - } - } - return rules, others[i:] -} - -func locLess(a, b interface{}) bool { - return locCmp(a, b) < 0 -} - -func locCmp(a, b interface{}) int { - al := getLoc(a) - bl := getLoc(b) - switch { - case al == nil && bl == nil: - return 0 - case al == nil: - return -1 - case bl == nil: - return 1 - } - - if cmp := al.Row - bl.Row; cmp != 0 { - return cmp - - } - return al.Col - bl.Col -} - -func getLoc(x interface{}) *ast.Location { - switch x := x.(type) { - case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term - return x.Loc() - case *ast.Location: - return x - case [2]*ast.Term: // Special case to allow for easy printing of objects. - return x[0].Location - default: - panic("Not reached") - } -} - -func closingLoc(skipOpen, skipClose, open, close byte, loc *ast.Location) *ast.Location { - i, offset := 0, 0 - - // Skip past parens/brackets/braces in rule heads. - if skipOpen > 0 { - i, offset = skipPast(skipOpen, skipClose, loc) - } - - for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { - break - } - } - - if i >= len(loc.Text) { - return &ast.Location{Row: -1} - } - - state := 1 - for state > 0 { - i++ - if i >= len(loc.Text) { - return &ast.Location{Row: -1} - } - - switch loc.Text[i] { - case open: - state++ - case close: - state-- - case '\n': - offset++ - } - } - - return &ast.Location{Row: loc.Row + offset} -} - -func skipPast(open, close byte, loc *ast.Location) (int, int) { - i := 0 - for ; i < len(loc.Text); i++ { - if loc.Text[i] == open { - break - } - } - - state := 1 - offset := 0 - for state > 0 { - i++ - if i >= len(loc.Text) { - return i, offset - } - - switch loc.Text[i] { - case open: - state++ - case close: - state-- - case '\n': - offset++ - } - } - - return i, offset -} - -// startLine begins a line with the current indentation level. -func (w *writer) startLine() { - w.inline = true - for i := 0; i < w.level; i++ { - w.write(w.indent) - } -} - -// endLine ends a line with a newline. -func (w *writer) endLine() { - w.inline = false - if w.beforeEnd != nil && !w.delay { - w.write(" " + w.beforeEnd.String()) - w.beforeEnd = nil - } - w.delay = false - w.write("\n") -} - -// beforeLineEnd registers a comment to be printed at the end of the current line. -func (w *writer) beforeLineEnd(c *ast.Comment) { - if w.beforeEnd != nil { - if c == nil { - return - } - panic("overwriting non-nil beforeEnd") - } - w.beforeEnd = c -} - -func (w *writer) delayBeforeEnd() { - w.delay = true } -// line prints a blank line. If the writer is currently in the middle of a line, -// line ends it and then prints a blank one. -func (w *writer) blankLine() { - if w.inline { - w.endLine() - } - w.write("\n") -} - -// write the input string and writes it to the buffer. -func (w *writer) write(s string) { - w.buf.WriteString(s) -} - -// writeLine writes the string on a newly started line, then terminate the line. -func (w *writer) writeLine(s string) { - if !w.inline { - w.startLine() - } - w.write(s) - w.endLine() -} - -func (w *writer) startMultilineSeq() { - w.endLine() - w.up() - w.startLine() -} - -// up increases the indentation level -func (w *writer) up() { - w.level++ -} - -// down decreases the indentation level -func (w *writer) down() { - if w.level == 0 { - panic("negative indentation level") - } - w.level-- -} - -func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import { - for _, imp := range imps { - if future.IsAllFutureKeywords(imp) || - future.IsFutureKeyword(imp, kw) || - (future.IsFutureKeyword(imp, "every") && kw == "in") { // "every" implies "in", so we don't need to add both - return imps - } - } - imp := &ast.Import{ - Path: ast.MustParseTerm("future.keywords." + kw), - } - imp.Location = defaultLocation(imp) - return append(imps, imp) -} - -func ensureRegoV1Import(imps []*ast.Import) []*ast.Import { - return ensureImport(imps, ast.RegoV1CompatibleRef) -} - -func filterRegoV1Import(imps []*ast.Import) []*ast.Import { - var ret []*ast.Import - for _, imp := range imps { - path := imp.Path.Value.(ast.Ref) - if !ast.RegoV1CompatibleRef.Equal(path) { - ret = append(ret, imp) - } +func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { + if opts.RegoVersion == ast.RegoUndefined { + opts.RegoVersion = ast.DefaultRegoVersion } - return ret -} -func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import { - for _, imp := range imps { - p := imp.Path.Value.(ast.Ref) - if p.Equal(path) { - return imps - } - } - imp := &ast.Import{ - Path: ast.NewTerm(path), - } - imp.Location = defaultLocation(imp) - return append(imps, imp) + return v1.AstWithOpts(x, opts) } // ArgErrDetail but for `fmt` checks since compiler has not run yet. -type ArityFormatErrDetail struct { - Have []string `json:"have"` - Want []string `json:"want"` -} +type ArityFormatErrDetail = v1.ArityFormatErrDetail // arityMismatchError but for `fmt` checks since the compiler has not run yet. func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error { - want := make([]string, len(f.Args())) - for i := range f.Args() { - want[i] = types.Sprint(f.Args()[i]) - } - - have := make([]string, len(operands)) - for i := 0; i < len(operands); i++ { - have[i] = ast.TypeName(operands[i].Value) - } - err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch") - err.Details = &ArityFormatErrDetail{ - Have: have, - Want: want, - } - return err -} - -// Lines returns the string representation of the detail. -func (d *ArityFormatErrDetail) Lines() []string { - return []string{ - "have: " + "(" + strings.Join(d.Have, ",") + ")", - "want: " + "(" + strings.Join(d.Want, ",") + ")", - } -} - -func moduleIsRegoV1Compatible(m *ast.Module) bool { - for _, imp := range m.Imports { - if isRegoV1Compatible(imp) { - return true - } - } - return false -} - -// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1` -func isRegoV1Compatible(imp *ast.Import) bool { - path := imp.Path.Value.(ast.Ref) - return len(path) == 2 && - ast.RegoRootDocument.Equal(path[0]) && - path[1].Equal(ast.StringTerm("v1")) + return v1.ArityFormatMismatchError(operands, operator, loc, f) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go index 064649733..3d67d5692 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/bundle/utils.go @@ -11,10 +11,10 @@ import ( "os" "path/filepath" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/resolver/wasm" + "github.com/open-policy-agent/opa/v1/storage" ) // LoadWasmResolversFromStore will lookup all Wasm modules from the store along with the diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go index 4d80aeeef..dfb781e19 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/utils.go @@ -5,9 +5,9 @@ package compiler import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/schemas" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/schemas" + "github.com/open-policy-agent/opa/v1/util" ) type SchemaFile string @@ -32,7 +32,10 @@ func VerifyAuthorizationPolicySchema(compiler *ast.Compiler, ref ast.Ref) error schemaSet := ast.NewSchemaSet() schemaSet.Put(ast.SchemaRootRef, schemaDefinitions[AuthorizationPolicySchema]) - errs := ast.NewCompiler().WithSchemas(schemaSet).PassesTypeCheckRules(rules) + errs := ast.NewCompiler(). + WithDefaultRegoVersion(compiler.DefaultRegoVersion()). + WithSchemas(schemaSet). + PassesTypeCheckRules(rules) if len(errs) > 0 { return errs diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv index 48c809d65..473497abb 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/callgraph.csv @@ -749,12 +749,12 @@ opa_set_get,opa_value_hash opa_set_get,opa_value_compare opa_number_try_int,opa_atoi64 opa_number_try_int,opa_abort -opa_value_get,opa_abort opa_value_get,opa_atoi64 opa_value_get,opa_value_hash opa_value_get,opa_value_compare -opa_value_compare_number,opa_atoi64 +opa_value_get,opa_abort opa_value_compare_number,opa_abort +opa_value_compare_number,opa_atoi64 opa_value_compare_number,opa_number_to_bf opa_value_compare_number,mpd_qcmp opa_value_compare_number,mpd_del @@ -779,10 +779,10 @@ opa_value_compare_set,opa_value_compare_set opa_value_compare_set,opa_abort opa_number_hash,opa_atof64 opa_number_hash,opa_abort -opa_value_iter,opa_abort opa_value_iter,opa_atoi64 opa_value_iter,opa_value_hash opa_value_iter,opa_value_compare +opa_value_iter,opa_abort opa_object_keys,opa_malloc opa_object_keys,opa_free opa_object_keys,opa_value_compare @@ -817,7 +817,6 @@ opa_value_merge,opa_malloc opa_value_merge,opa_value_get opa_value_merge,__opa_object_insert opa_value_merge,opa_value_merge -opa_value_merge,opa_abort opa_value_merge,opa_atoi64 opa_value_merge,opa_value_hash opa_value_merge,opa_value_compare_number @@ -825,6 +824,7 @@ opa_value_merge,opa_strncmp opa_value_merge,opa_value_compare opa_value_merge,opa_value_compare_object opa_value_merge,opa_value_compare_set +opa_value_merge,opa_abort __opa_object_insert,opa_value_hash __opa_object_insert,opa_value_compare __opa_object_insert,__opa_value_free @@ -904,8 +904,8 @@ opa_value_remove_path,opa_value_get opa_value_remove_path,opa_object_remove opa_lookup,opa_value_get opa_lookup,opa_value_iter -opa_lookup,opa_atoi64 opa_lookup,opa_abort +opa_lookup,opa_atoi64 opa_mapping_init,opa_json_parse opa_mapping_lookup,opa_lookup node::re2\28std::__1::basic_string\2c\20std::__1::allocator\20>\20const&\29,std::__1::basic_string\2c\20std::__1::allocator\20>::assign\28char\20const*\29 diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm index eb3147b8a..667b9cdd4 100644 Binary files a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm and b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/opa/opa.wasm differ diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go index 9a5cebec5..08dfe4486 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/compiler/wasm/wasm.go @@ -12,7 +12,6 @@ import ( "fmt" "io" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/compiler/wasm/opa" "github.com/open-policy-agent/opa/internal/debug" "github.com/open-policy-agent/opa/internal/wasm/encoding" @@ -20,8 +19,9 @@ import ( "github.com/open-policy-agent/opa/internal/wasm/module" "github.com/open-policy-agent/opa/internal/wasm/types" "github.com/open-policy-agent/opa/internal/wasm/util" - "github.com/open-policy-agent/opa/ir" - opatypes "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" + opatypes "github.com/open-policy-agent/opa/v1/types" ) // Record Wasm ABI version in exported global variable diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/config/config.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/config/config.go index b1a9731f6..fdac48772 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/config/config.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/config/config.go @@ -15,11 +15,11 @@ import ( "sigs.k8s.io/yaml" "github.com/open-policy-agent/opa/internal/strvals" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" ) // ServiceOptions stores the options passed to ParseServicesConfig diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go index 9cfaee8ba..4a4f8101f 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/edittree/edittree.go @@ -146,14 +146,13 @@ package edittree import ( - "encoding/json" "fmt" "math/big" "sort" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/edittree/bitvector" + "github.com/open-policy-agent/opa/v1/ast" ) // Deletions are encoded with a nil value pointer. @@ -213,10 +212,10 @@ func (e *EditTree) getKeyHash(key *ast.Term) (int, bool) { case ast.Null, ast.Boolean, ast.String, ast.Var: equal = func(y ast.Value) bool { return x == y } case ast.Number: - if xi, err := json.Number(x).Int64(); err == nil { + if xi, ok := x.Int64(); ok { equal = func(y ast.Value) bool { if y, ok := y.(ast.Number); ok { - if yi, err := json.Number(y).Int64(); err == nil { + if yi, ok := y.Int64(); ok { return xi == yi } } @@ -725,9 +724,9 @@ func (e *EditTree) Unfold(path ast.Ref) (*EditTree, error) { // Fall back to looking up the key in e.value. // Extend the tree if key is present. Error otherwise. - if v, err := x.Find(ast.Ref{ast.IntNumberTerm(idx)}); err == nil { + if v, err := x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}); err == nil { // TODO: Consider a more efficient "Replace" function that special-cases this for arrays instead? - _, err := e.Delete(ast.IntNumberTerm(idx)) + _, err := e.Delete(ast.InternedIntNumberTerm(idx)) if err != nil { return nil, err } @@ -1026,8 +1025,7 @@ func (e *EditTree) Exists(path ast.Ref) bool { } // Fallback if child lookup failed. // We have to ensure that the lookup term is a number here, or Find will fail. - k := ast.Ref{ast.IntNumberTerm(idx)}.Concat(path[1:]) - _, err = x.Find(k) + _, err = x.Find(ast.Ref{ast.InternedIntNumberTerm(idx)}.Concat(path[1:])) return err == nil default: // Catch all primitive types. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go index 2863aad4e..eb6091cc6 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/future/filter_imports.go @@ -4,7 +4,7 @@ package future -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" // FilterFutureImports filters OUT any future imports from the passed slice of // `*ast.Import`s. @@ -35,3 +35,15 @@ func IsFutureKeyword(imp *ast.Import, kw string) bool { path[1].Equal(ast.StringTerm("keywords")) && path[2].Equal(ast.StringTerm(kw)) } + +func WhichFutureKeyword(imp *ast.Import) (string, bool) { + path := imp.Path.Value.(ast.Ref) + if len(path) == 3 && + ast.FutureRootDocument.Equal(path[0]) && + path[1].Equal(ast.StringTerm("keywords")) { + if str, ok := path[2].Value.(ast.String); ok { + return string(str), true + } + } + return "", false +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go index 804702b94..84a529287 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/future/parser_opts.go @@ -7,7 +7,7 @@ package future import ( "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // ParserOptionsFromFutureImports transforms a slice of `ast.Import`s into the diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go index 876419f56..515702095 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/schemaReferencePool.go @@ -25,10 +25,6 @@ package gojsonschema -import ( - "fmt" -) - type schemaReferencePool struct { documents map[string]*SubSchema } @@ -44,7 +40,7 @@ func newSchemaReferencePool() *schemaReferencePool { func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + internalLog("Schema Reference ( %s )", ref) } if sch, ok := p.documents[ref]; ok { @@ -60,7 +56,7 @@ func (p *schemaReferencePool) Get(ref string) (r *SubSchema, o bool) { func (p *schemaReferencePool) Add(ref string, sch *SubSchema) { if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + internalLog("Add Schema Reference %s to pool", ref) } if _, ok := p.documents[ref]; !ok { p.documents[ref] = sch diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go index 7c86e3724..efdea58b6 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/gojsonschema/validation.go @@ -348,7 +348,7 @@ func (v *SubSchema) validateSchema(currentSubSchema *SubSchema, currentNode inte } } - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if len(currentSubSchema.dependencies) > 0 { if currentNodeMap, ok := currentNode.(map[string]interface{}); ok { for elementKey := range currentNodeMap { if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { @@ -469,7 +469,7 @@ func (v *SubSchema) validateArray(currentSubSchema *SubSchema, value []interface result.mergeErrors(validationResult) } } else { - if currentSubSchema.ItemsChildren != nil && len(currentSubSchema.ItemsChildren) > 0 { + if len(currentSubSchema.ItemsChildren) > 0 { nbItems := len(currentSubSchema.ItemsChildren) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go index d536e5e5f..f68176747 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fields_on_correct_type.go @@ -27,7 +27,7 @@ func init() { } addError( - Message(message), + Message(message), //nolint:govet At(field.Position), ) }) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go index 66bd348c4..861e3b16c 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/gqlparser/validator/rules/fragments_on_composite_types.go @@ -20,7 +20,7 @@ func init() { message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition) addError( - Message(message), + Message(message), //nolint:govet At(inlineFragment.Position), ) }) @@ -33,7 +33,7 @@ func init() { message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition) addError( - Message(message), + Message(message), //nolint:govet At(fragment.Position), ) }) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go index 31c89869d..550618079 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/json/patch/patch.go @@ -7,7 +7,7 @@ package patch import ( "strings" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/storage" ) // ParsePatchPathEscaped returns a new path for the given escaped str. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go index b75d26dda..160775c0e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/planner.go @@ -11,10 +11,10 @@ import ( "io" "sort" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ast/location" "github.com/open-policy-agent/opa/internal/debug" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/ir" ) // QuerySet represents the input to the planner. @@ -1037,7 +1037,7 @@ func (p *Planner) planExprCall(e *ast.Expr, iter planiter) error { args = p.defaultOperands() } else if decl, ok := p.decls[operator]; ok { relation = decl.Relation - arity = len(decl.Decl.Args()) + arity = decl.Decl.Arity() void = decl.Decl.Result() == nil name = operator p.externs[operator] = decl @@ -1519,7 +1519,7 @@ func (p *Planner) planValue(t ast.Value, loc *ast.Location, iter planiter) error p.loc = loc return p.planObjectComprehension(v, iter) default: - return fmt.Errorf("%v term not implemented", ast.TypeName(v)) + return fmt.Errorf("%v term not implemented", ast.ValueName(v)) } } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go index f5d6f3fc6..2f424da52 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/rules.go @@ -4,7 +4,7 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // funcstack implements a simple map structure used to keep track of virtual diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go index dccff1b5c..0df6bcd8b 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/planner/varstack.go @@ -5,8 +5,8 @@ package planner import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" ) type varstack []map[ast.Var]ir.Local diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go index 179b5b5d5..55e587e9f 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/ecr.go @@ -11,7 +11,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go index 77c0bc934..6dfb06a49 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/kms.go @@ -10,7 +10,7 @@ import ( "time" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // Values taken from diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go index bfb780754..1e50d01f9 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/signing_v4.go @@ -13,13 +13,13 @@ import ( "io" "net/http" "net/url" - "sort" "strings" "time" v4 "github.com/open-policy-agent/opa/internal/providers/aws/v4" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) func stringFromTerm(t *ast.Term) string { @@ -67,19 +67,6 @@ func sha256MAC(message string, key []byte) []byte { return mac.Sum(nil) } -func sortKeys(strMap map[string][]string) []string { - keys := make([]string, len(strMap)) - - i := 0 - for k := range strMap { - keys[i] = k - i++ - } - sort.Strings(keys) - - return keys -} - // SignRequest modifies an http.Request to include an AWS V4 signature based on the provided credentials. func SignRequest(req *http.Request, service string, creds Credentials, theTime time.Time, sigVersion string) error { // General ref. https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html @@ -168,7 +155,7 @@ func SignV4(headers map[string][]string, method string, theURL *url.URL, body [] canonicalReq += theURL.RawQuery + "\n" // RAW Query String // include the values for the signed headers - orderedKeys := sortKeys(headersToSign) + orderedKeys := util.KeysSorted(headersToSign) for _, k := range orderedKeys { canonicalReq += k + ":" + strings.Join(headersToSign[k], ",") + "\n" } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go index e033da746..9ce9af90d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/providers/aws/util.go @@ -5,7 +5,7 @@ import ( "io" "net/http" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) // DoRequestWithClient is a convenience function to get the body of an HTTP response with diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go index 6e84df4b0..173b5a3c1 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/ref/ref.go @@ -9,8 +9,8 @@ import ( "errors" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" ) // ParseDataPath returns a ref from the slash separated path s rooted at data. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go index ea1e339c1..072e37667 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/rego/opa/options.go @@ -4,11 +4,11 @@ import ( "io" "time" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" ) // Result holds the evaluation result. @@ -18,13 +18,14 @@ type Result struct { // EvalOpts define options for performing an evaluation. type EvalOpts struct { - Input *interface{} - Metrics metrics.Metrics - Entrypoint int32 - Time time.Time - Seed io.Reader - InterQueryBuiltinCache cache.InterQueryCache - NDBuiltinCache builtins.NDBCache - PrintHook print.Hook - Capabilities *ast.Capabilities + Input *interface{} + Metrics metrics.Metrics + Entrypoint int32 + Time time.Time + Seed io.Reader + InterQueryBuiltinCache cache.InterQueryCache + InterQueryBuiltinValueCache cache.InterQueryValueCache + NDBuiltinCache builtins.NDBCache + PrintHook print.Hook + Capabilities *ast.Capabilities } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/report/report.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/report/report.go index 145d0a946..55f4cfe21 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/report/report.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/report/report.go @@ -17,12 +17,12 @@ import ( "sync" "time" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/version" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/util" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/util" ) // ExternalServiceURL is the base HTTP URL for a telemetry service. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go index b1a5b7157..814847a12 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/runtime/init/init.go @@ -12,12 +12,12 @@ import ( "path/filepath" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" storedversion "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" ) // InsertAndCompileOptions contains the input for the operation. @@ -53,6 +53,7 @@ func InsertAndCompile(ctx context.Context, opts InsertAndCompileOptions) (*Inser } compiler := ast.NewCompiler(). + WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). SetErrorLimit(opts.MaxErrors). WithPathConflictsCheck(storage.NonEmpty(ctx, opts.Store, opts.Txn)). WithEnablePrintStatements(opts.EnablePrintStatements) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go index 1fc07f68c..1eceb83df 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/strvals/parser.go @@ -31,7 +31,7 @@ var ErrNotList = errors.New("not a list") // MaxIndex is the maximum index that will be allowed by setIndex. // The default value 65536 = 1024 * 64 -var MaxIndex = 65536 +const MaxIndex = 65536 // ToYAML takes a string of arguments and converts to a YAML document. func ToYAML(s string) (string, error) { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/version/version.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/version/version.go index 1c2e9ecd0..dc52733fc 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/version/version.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/version/version.go @@ -10,8 +10,8 @@ import ( "fmt" "runtime" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/version" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/version" ) var versionPath = storage.MustParsePath("/system/version") diff --git a/constraint/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go b/constraint/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go index 35e6059c7..7120392ce 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/internal/wasm/encoding/reader.go @@ -809,19 +809,19 @@ func readLimits(r io.Reader, l *module.Limit) error { return err } - min, err := leb128.ReadVarUint32(r) + minLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Min = min + l.Min = minLim if b == 1 { - max, err := leb128.ReadVarUint32(r) + maxLim, err := leb128.ReadVarUint32(r) if err != nil { return err } - l.Max = &max + l.Max = &maxLim } else if b != 0 { return fmt.Errorf("illegal limit flag") } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/keys/keys.go b/constraint/vendor/github.com/open-policy-agent/opa/keys/keys.go deleted file mode 100644 index de0349694..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/keys/keys.go +++ /dev/null @@ -1,99 +0,0 @@ -package keys - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/open-policy-agent/opa/util" -) - -const defaultSigningAlgorithm = "RS256" - -var supportedAlgos = map[string]struct{}{ - "ES256": {}, "ES384": {}, "ES512": {}, - "HS256": {}, "HS384": {}, "HS512": {}, - "PS256": {}, "PS384": {}, "PS512": {}, - "RS256": {}, "RS384": {}, "RS512": {}, -} - -// IsSupportedAlgorithm true if provided alg is supported -func IsSupportedAlgorithm(alg string) bool { - _, ok := supportedAlgos[alg] - return ok -} - -// Config holds the keys used to sign or verify bundles and tokens -type Config struct { - Key string `json:"key"` - PrivateKey string `json:"private_key"` - Algorithm string `json:"algorithm"` - Scope string `json:"scope"` -} - -// Equal returns true if this key config is equal to the other. -func (k *Config) Equal(other *Config) bool { - return other != nil && *k == *other -} - -func (k *Config) validateAndInjectDefaults(id string) error { - if k.Key == "" && k.PrivateKey == "" { - return fmt.Errorf("invalid keys configuration: no keys provided for key ID %v", id) - } - - if k.Algorithm == "" { - k.Algorithm = defaultSigningAlgorithm - } - - if !IsSupportedAlgorithm(k.Algorithm) { - return fmt.Errorf("unsupported algorithm '%v'", k.Algorithm) - } - - return nil -} - -// NewKeyConfig return a new Config -func NewKeyConfig(key, alg, scope string) (*Config, error) { - var pubKey string - if _, err := os.Stat(key); err == nil { - bs, err := os.ReadFile(key) - if err != nil { - return nil, err - } - pubKey = string(bs) - } else if os.IsNotExist(err) { - pubKey = key - } else { - return nil, err - } - - return &Config{ - Key: pubKey, - Algorithm: alg, - Scope: scope, - }, nil -} - -// ParseKeysConfig returns a map containing the key and the signing algorithm -func ParseKeysConfig(raw json.RawMessage) (map[string]*Config, error) { - keys := map[string]*Config{} - var obj map[string]json.RawMessage - - if err := util.Unmarshal(raw, &obj); err == nil { - for k := range obj { - var keyConfig Config - if err = util.Unmarshal(obj[k], &keyConfig); err != nil { - return nil, err - } - - if err = keyConfig.validateAndInjectDefaults(k); err != nil { - return nil, err - } - - keys[k] = &keyConfig - } - } else { - return nil, err - } - return keys, nil -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/loader/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/loader/doc.go new file mode 100644 index 000000000..9f60920d9 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/loader/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package loader diff --git a/constraint/vendor/github.com/open-policy-agent/opa/loader/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/loader/errors.go index b8aafb142..8dc70b867 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/loader/errors.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/loader/errors.go @@ -5,58 +5,8 @@ package loader import ( - "fmt" - "strings" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Errors is a wrapper for multiple loader errors. -type Errors []error - -func (e Errors) Error() string { - if len(e) == 0 { - return "no error(s)" - } - if len(e) == 1 { - return "1 error occurred during loading: " + e[0].Error() - } - buf := make([]string, len(e)) - for i := range buf { - buf[i] = e[i].Error() - } - return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") -} - -func (e *Errors) add(err error) { - if errs, ok := err.(ast.Errors); ok { - for i := range errs { - *e = append(*e, errs[i]) - } - } else { - *e = append(*e, err) - } -} - -type unsupportedDocumentType string - -func (path unsupportedDocumentType) Error() string { - return string(path) + ": document must be of type object" -} - -type unrecognizedFile string - -func (path unrecognizedFile) Error() string { - return string(path) + ": can't recognize file type" -} - -func isUnrecognizedFile(err error) bool { - _, ok := err.(unrecognizedFile) - return ok -} - -type mergeError string - -func (e mergeError) Error() string { - return string(e) + ": merge error" -} +type Errors = v1.Errors diff --git a/constraint/vendor/github.com/open-policy-agent/opa/loader/loader.go b/constraint/vendor/github.com/open-policy-agent/opa/loader/loader.go index 461639ed1..9b2f91d4e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/loader/loader.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/loader/loader.go @@ -6,478 +6,74 @@ package loader import ( - "bytes" - "fmt" - "io" "io/fs" "os" - "path/filepath" - "sort" "strings" - "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - astJSON "github.com/open-policy-agent/opa/ast/json" "github.com/open-policy-agent/opa/bundle" - fileurl "github.com/open-policy-agent/opa/internal/file/url" - "github.com/open-policy-agent/opa/internal/merge" - "github.com/open-policy-agent/opa/loader/filter" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/loader" ) // Result represents the result of successfully loading zero or more files. -type Result struct { - Documents map[string]interface{} - Modules map[string]*RegoFile - path []string -} - -// ParsedModules returns the parsed modules stored on the result. -func (l *Result) ParsedModules() map[string]*ast.Module { - modules := make(map[string]*ast.Module) - for _, module := range l.Modules { - modules[module.Name] = module.Parsed - } - return modules -} - -// Compiler returns a Compiler object with the compiled modules from this loader -// result. -func (l *Result) Compiler() (*ast.Compiler, error) { - compiler := ast.NewCompiler() - compiler.Compile(l.ParsedModules()) - if compiler.Failed() { - return nil, compiler.Errors - } - return compiler, nil -} - -// Store returns a Store object with the documents from this loader result. -func (l *Result) Store() (storage.Store, error) { - return l.StoreWithOpts() -} - -// StoreWithOpts returns a Store object with the documents from this loader result, -// instantiated with the passed options. -func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { - return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil -} +type Result = v1.Result // RegoFile represents the result of loading a single Rego source file. -type RegoFile struct { - Name string - Parsed *ast.Module - Raw []byte -} +type RegoFile = v1.RegoFile // Filter defines the interface for filtering files during loading. If the // filter returns true, the file should be excluded from the result. -type Filter = filter.LoaderFilter +type Filter = v1.Filter // GlobExcludeName excludes files and directories whose names do not match the // shell style pattern at minDepth or greater. func GlobExcludeName(pattern string, minDepth int) Filter { - return func(_ string, info fs.FileInfo, depth int) bool { - match, _ := filepath.Match(pattern, info.Name()) - return match && depth >= minDepth - } + return v1.GlobExcludeName(pattern, minDepth) } // FileLoader defines an interface for loading OPA data files // and Rego policies. -type FileLoader interface { - All(paths []string) (*Result, error) - Filtered(paths []string, filter Filter) (*Result, error) - AsBundle(path string) (*bundle.Bundle, error) - WithReader(io.Reader) FileLoader - WithFS(fs.FS) FileLoader - WithMetrics(metrics.Metrics) FileLoader - WithFilter(Filter) FileLoader - WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader - WithSkipBundleVerification(bool) FileLoader - WithProcessAnnotation(bool) FileLoader - WithCapabilities(*ast.Capabilities) FileLoader - WithJSONOptions(*astJSON.Options) FileLoader - WithRegoVersion(ast.RegoVersion) FileLoader - WithFollowSymlinks(bool) FileLoader -} +type FileLoader = v1.FileLoader // NewFileLoader returns a new FileLoader instance. func NewFileLoader() FileLoader { - return &fileLoader{ - metrics: metrics.New(), - files: make(map[string]bundle.FileInfo), - } -} - -type fileLoader struct { - metrics metrics.Metrics - filter Filter - bvc *bundle.VerificationConfig - skipVerify bool - files map[string]bundle.FileInfo - opts ast.ParserOptions - fsys fs.FS - reader io.Reader - followSymlinks bool -} - -// WithFS provides an fs.FS to use for loading files. You can pass nil to -// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default -// behaviour. -func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { - fl.fsys = fsys - return fl -} - -// WithReader provides an io.Reader to use for loading the bundle tarball. -// An io.Reader passed via WithReader takes precedence over an fs.FS passed -// via WithFS. -func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { - fl.reader = rdr - return fl -} - -// WithMetrics provides the metrics instance to use while loading -func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { - fl.metrics = m - return fl -} - -// WithFilter specifies the filter object to use to filter files while loading -func (fl *fileLoader) WithFilter(filter Filter) FileLoader { - fl.filter = filter - return fl -} - -// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle -func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { - fl.bvc = config - return fl -} - -// WithSkipBundleVerification skips verification of a signed bundle -func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { - fl.skipVerify = skipVerify - return fl -} - -// WithProcessAnnotation enables or disables processing of schema annotations on rules -func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { - fl.opts.ProcessAnnotation = processAnnotation - return fl -} - -// WithCapabilities sets the supported capabilities when loading the files -func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { - fl.opts.Capabilities = caps - return fl -} - -// WithJSONOptions sets the JSONOptions for use when parsing files -func (fl *fileLoader) WithJSONOptions(opts *astJSON.Options) FileLoader { - fl.opts.JSONOptions = opts - return fl -} - -// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. -func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { - fl.opts.RegoVersion = version - return fl -} - -// WithFollowSymlinks enables or disables following symlinks when loading files -func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { - fl.followSymlinks = followSymlinks - return fl -} - -// All returns a Result object loaded (recursively) from the specified paths. -func (fl fileLoader) All(paths []string) (*Result, error) { - return fl.Filtered(paths, nil) -} - -// Filtered returns a Result object loaded (recursively) from the specified -// paths while applying the given filters. If any filter returns true, the -// file/directory is excluded. -func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { - return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { - - var ( - bs []byte - err error - ) - if fl.fsys != nil { - bs, err = fs.ReadFile(fl.fsys, path) - } else { - bs, err = os.ReadFile(path) - } - if err != nil { - return err - } - - result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts) - if err != nil { - if !isUnrecognizedFile(err) { - return err - } - if depth > 0 { - return nil - } - result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) - if err != nil { - return err - } - } - - return curr.merge(path, result) - }) -} - -// AsBundle loads a path as a bundle. If it is a single file -// it will be treated as a normal tarball bundle. If a directory -// is supplied it will be loaded as an unzipped bundle tree. -func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, err - } - - var bundleLoader bundle.DirectoryLoader - var isDir bool - if fl.reader != nil { - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) - } else { - bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) - } - - if err != nil { - return nil, err - } - bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) - - br := bundle.NewCustomReader(bundleLoader). - WithMetrics(fl.metrics). - WithBundleVerificationConfig(fl.bvc). - WithSkipBundleVerification(fl.skipVerify). - WithProcessAnnotations(fl.opts.ProcessAnnotation). - WithCapabilities(fl.opts.Capabilities). - WithJSONOptions(fl.opts.JSONOptions). - WithFollowSymlinks(fl.followSymlinks). - WithRegoVersion(fl.opts.RegoVersion) - - // For bundle directories add the full path in front of module file names - // to simplify debugging. - if isDir { - br.WithBaseDir(path) - } - - b, err := br.Read() - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - - return &b, err + return v1.NewFileLoader().WithRegoVersion(ast.DefaultRegoVersion) } // GetBundleDirectoryLoader returns a bundle directory loader which can be used to load // files in the directory func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, nil) + return v1.GetBundleDirectoryLoader(path) } // GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load // files in the directory after applying the given filter. func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - return GetBundleDirectoryLoaderFS(nil, path, filter) + return v1.GetBundleDirectoryLoaderWithFilter(path, filter) } // GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load // files in the directory. func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, false, err - } - - if err := checkForUNCPath(path); err != nil { - return nil, false, err - } - - var fi fs.FileInfo - if fsys != nil { - fi, err = fs.Stat(fsys, path) - } else { - fi, err = os.Stat(path) - } - if err != nil { - return nil, false, fmt.Errorf("error reading %q: %s", path, err) - } - - var bundleLoader bundle.DirectoryLoader - if fi.IsDir() { - if fsys != nil { - bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) - } else { - bundleLoader = bundle.NewDirectoryLoader(path) - } - } else { - var fh fs.File - if fsys != nil { - fh, err = fsys.Open(path) - } else { - fh, err = os.Open(path) - } - if err != nil { - return nil, false, err - } - bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) - } - - if filter != nil { - bundleLoader = bundleLoader.WithFilter(filter) - } - return bundleLoader, fi.IsDir(), nil + return v1.GetBundleDirectoryLoaderFS(fsys, path, filter) } // FilteredPaths is the same as FilterPathsFS using the current diretory file // system func FilteredPaths(paths []string, filter Filter) ([]string, error) { - return FilteredPathsFS(nil, paths, filter) + return v1.FilteredPaths(paths, filter) } // FilteredPathsFS return a list of files from the specified // paths while applying the given filters. If any filter returns true, the // file/directory is excluded. func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { - result := []string{} - - _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { - result = append(result, path) - return nil - }) - if err != nil { - return nil, err - } - return result, nil + return v1.FilteredPathsFS(fsys, paths, filter) } // Schemas loads a schema set from the specified file path. func Schemas(schemaPath string) (*ast.SchemaSet, error) { - - var errs Errors - ss, err := loadSchemas(schemaPath) - if err != nil { - errs.add(err) - return nil, errs - } - - return ss, nil -} - -func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { - - if schemaPath == "" { - return nil, nil - } - - ss := ast.NewSchemaSet() - path, err := fileurl.Clean(schemaPath) - if err != nil { - return nil, err - } - - info, err := os.Stat(path) - if err != nil { - return nil, err - } - - // Handle single file case. - if !info.IsDir() { - schema, err := loadOneSchema(path) - if err != nil { - return nil, err - } - ss.Put(ast.SchemaRootRef, schema) - return ss, nil - - } - - // Handle directory case. - rootDir := path - - err = filepath.Walk(path, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } else if info.IsDir() { - return nil - } - - schema, err := loadOneSchema(path) - if err != nil { - return err - } - - relPath, err := filepath.Rel(rootDir, path) - if err != nil { - return err - } - - key := getSchemaSetByPathKey(relPath) - ss.Put(key, schema) - return nil - }) - - if err != nil { - return nil, err - } - - return ss, nil -} - -func getSchemaSetByPathKey(path string) ast.Ref { - - front := filepath.Dir(path) - last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) - - var parts []string - - if front != "." { - parts = append(strings.Split(filepath.ToSlash(front), "/"), last) - } else { - parts = []string{last} - } - - key := make(ast.Ref, 1+len(parts)) - key[0] = ast.SchemaRootDocument - for i := range parts { - key[i+1] = ast.StringTerm(parts[i]) - } - - return key -} - -func loadOneSchema(path string) (interface{}, error) { - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var schema interface{} - if err := util.Unmarshal(bs, &schema); err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - - return schema, nil + return v1.Schemas(schemaPath) } // All returns a Result object loaded (recursively) from the specified paths. @@ -517,321 +113,33 @@ func Rego(path string) (*RegoFile, error) { // RegoWithOpts returns a RegoFile object loaded from the given path. func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { - path, err := fileurl.Clean(path) - if err != nil { - return nil, err + if opts.RegoVersion == ast.RegoUndefined { + opts.RegoVersion = ast.DefaultRegoVersion } - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - return loadRego(path, bs, metrics.New(), opts) + + return v1.RegoWithOpts(path, opts) } // CleanPath returns the normalized version of a path that can be used as an identifier. func CleanPath(path string) string { - return strings.Trim(path, "/") + return v1.CleanPath(path) } // Paths returns a sorted list of files contained at path. If recurse is true // and path is a directory, then Paths will walk the directory structure // recursively and list files at each level. func Paths(path string, recurse bool) (paths []string, err error) { - path, err = fileurl.Clean(path) - if err != nil { - return nil, err - } - err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { - if !recurse { - if path != f && path != filepath.Dir(f) { - return filepath.SkipDir - } - } - paths = append(paths, f) - return nil - }) - return paths, err + return v1.Paths(path, recurse) } // Dirs resolves filepaths to directories. It will return a list of unique // directories. func Dirs(paths []string) []string { - unique := map[string]struct{}{} - - for _, path := range paths { - // TODO: /dir/dir will register top level directory /dir - dir := filepath.Dir(path) - unique[dir] = struct{}{} - } - - u := make([]string, 0, len(unique)) - for k := range unique { - u = append(u, k) - } - sort.Strings(u) - return u + return v1.Dirs(paths) } // SplitPrefix returns a tuple specifying the document prefix and the file // path. func SplitPrefix(path string) ([]string, string) { - // Non-prefixed URLs can be returned without modification and their contents - // can be rooted directly under data. - if strings.Index(path, "://") == strings.Index(path, ":") { - return nil, path - } - parts := strings.SplitN(path, ":", 2) - if len(parts) == 2 && len(parts[0]) > 0 { - return strings.Split(parts[0], "."), parts[1] - } - return nil, path -} - -func (l *Result) merge(path string, result interface{}) error { - switch result := result.(type) { - case bundle.Bundle: - for _, module := range result.Modules { - l.Modules[module.Path] = &RegoFile{ - Name: module.Path, - Parsed: module.Parsed, - Raw: module.Raw, - } - } - return l.mergeDocument(path, result.Data) - case *RegoFile: - l.Modules[CleanPath(path)] = result - return nil - default: - return l.mergeDocument(path, result) - } -} - -func (l *Result) mergeDocument(path string, doc interface{}) error { - obj, ok := makeDir(l.path, doc) - if !ok { - return unsupportedDocumentType(path) - } - merged, ok := merge.InterfaceMaps(l.Documents, obj) - if !ok { - return mergeError(path) - } - for k := range merged { - l.Documents[k] = merged[k] - } - return nil -} - -func (l *Result) withParent(p string) *Result { - path := append(l.path, p) - return &Result{ - Documents: l.Documents, - Modules: l.Modules, - path: path, - } -} - -func newResult() *Result { - return &Result{ - Documents: map[string]interface{}{}, - Modules: map[string]*RegoFile{}, - } -} - -func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { - errs := Errors{} - root := newResult() - - for _, path := range paths { - - // Paths can be prefixed with a string that specifies where content should be - // loaded under data. E.g., foo.bar:/path/to/some.json will load the content - // of some.json under {"foo": {"bar": ...}}. - loaded := root - prefix, path := SplitPrefix(path) - if len(prefix) > 0 { - for _, part := range prefix { - loaded = loaded.withParent(part) - } - } - - allRec(fsys, path, filter, &errs, loaded, 0, f) - } - - if len(errs) > 0 { - return nil, errs - } - - return root, nil -} - -func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { - - path, err := fileurl.Clean(path) - if err != nil { - errors.add(err) - return - } - - if err := checkForUNCPath(path); err != nil { - errors.add(err) - return - } - - var info fs.FileInfo - if fsys != nil { - info, err = fs.Stat(fsys, path) - } else { - info, err = os.Stat(path) - } - - if err != nil { - errors.add(err) - return - } - - if filter != nil && filter(path, info, depth) { - return - } - - if !info.IsDir() { - if err := f(loaded, path, depth); err != nil { - errors.add(err) - } - return - } - - // If we are recursing on directories then content must be loaded under path - // specified by directory hierarchy. - if depth > 0 { - loaded = loaded.withParent(info.Name()) - } - - var files []fs.DirEntry - if fsys != nil { - files, err = fs.ReadDir(fsys, path) - } else { - files, err = os.ReadDir(path) - } - if err != nil { - errors.add(err) - return - } - - for _, file := range files { - allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) - } -} - -func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - switch filepath.Ext(path) { - case ".json": - return loadJSON(path, bs, m) - case ".rego": - return loadRego(path, bs, m, opts) - case ".yaml", ".yml": - return loadYAML(path, bs, m) - default: - if strings.HasSuffix(path, ".tar.gz") { - r, err := loadBundleFile(path, bs, m, opts) - if err != nil { - err = fmt.Errorf("bundle %s: %w", path, err) - } - return r, err - } - } - return nil, unrecognizedFile(path) -} - -func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { - module, err := loadRego(path, bs, m, opts) - if err == nil { - return module, nil - } - doc, err := loadJSON(path, bs, m) - if err == nil { - return doc, nil - } - doc, err = loadYAML(path, bs, m) - if err == nil { - return doc, nil - } - return nil, unrecognizedFile(path) -} - -func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) { - tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) - br := bundle.NewCustomReader(tl). - WithRegoVersion(opts.RegoVersion). - WithJSONOptions(opts.JSONOptions). - WithProcessAnnotations(opts.ProcessAnnotation). - WithMetrics(m). - WithSkipBundleVerification(true). - IncludeManifestInData(true) - return br.Read() -} - -func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { - m.Timer(metrics.RegoModuleParse).Start() - var module *ast.Module - var err error - module, err = ast.ParseModuleWithOpts(path, string(bs), opts) - m.Timer(metrics.RegoModuleParse).Stop() - if err != nil { - return nil, err - } - result := &RegoFile{ - Name: path, - Parsed: module, - Raw: bs, - } - return result, nil -} - -func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - var x interface{} - err := util.UnmarshalJSON(bs, &x) - m.Timer(metrics.RegoDataParse).Stop() - - if err != nil { - return nil, fmt.Errorf("%s: %w", path, err) - } - return x, nil -} - -func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) { - m.Timer(metrics.RegoDataParse).Start() - bs, err := yaml.YAMLToJSON(bs) - m.Timer(metrics.RegoDataParse).Stop() - if err != nil { - return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) - } - return loadJSON(path, bs, m) -} - -func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { - if len(path) == 0 { - obj, ok := x.(map[string]interface{}) - if !ok { - return nil, false - } - return obj, true - } - return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) -} - -// isUNC reports whether path is a UNC path. -func isUNC(path string) bool { - return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) -} - -func isSlash(c uint8) bool { - return c == '\\' || c == '/' -} - -func checkForUNCPath(path string) error { - if isUNC(path) { - return fmt.Errorf("UNC path read is not allowed: %s", path) - } - return nil + return v1.SplitPrefix(path) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/plugins.go b/constraint/vendor/github.com/open-policy-agent/opa/plugins/plugins.go deleted file mode 100644 index bacdd1507..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/plugins/plugins.go +++ /dev/null @@ -1,1098 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package plugins implements plugin management for the policy engine. -package plugins - -import ( - "context" - "errors" - "fmt" - mr "math/rand" - "sync" - "time" - - "github.com/open-policy-agent/opa/internal/report" - "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/sdk/trace" - - "github.com/gorilla/mux" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/bundle" - "github.com/open-policy-agent/opa/config" - "github.com/open-policy-agent/opa/hooks" - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - cfg "github.com/open-policy-agent/opa/internal/config" - initload "github.com/open-policy-agent/opa/internal/runtime/init" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/plugins/rest" - "github.com/open-policy-agent/opa/resolver/wasm" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" -) - -// Factory defines the interface OPA uses to instantiate your plugin. -// -// When OPA processes it's configuration it looks for factories that -// have been registered by calling runtime.RegisterPlugin. Factories -// are registered to a name which is used to key into the -// configuration blob. If your plugin has not been configured, your -// factory will not be invoked. -// -// plugins: -// my_plugin1: -// some_key: foo -// # my_plugin2: -// # some_key2: bar -// -// If OPA was started with the configuration above and received two -// calls to runtime.RegisterPlugins (one with NAME "my_plugin1" and -// one with NAME "my_plugin2"), it would only invoke the factory for -// for my_plugin1. -// -// OPA instantiates and reconfigures plugins in two steps. First, OPA -// will call Validate to check the configuration. Assuming the -// configuration is valid, your factory should return a configuration -// value that can be used to construct your plugin. Second, OPA will -// call New to instantiate your plugin providing the configuration -// value returned from the Validate call. -// -// Validate receives a slice of bytes representing plugin -// configuration and returns a configuration value that can be used to -// instantiate your plugin. The manager is provided to give access to -// the OPA's compiler, storage layer, and global configuration. Your -// Validate function will typically: -// -// 1. Deserialize the raw config bytes -// 2. Validate the deserialized config for semantic errors -// 3. Inject default values -// 4. Return a deserialized/parsed config -// -// New receives a valid configuration for your plugin and returns a -// plugin object. Your New function will typically: -// -// 1. Cast the config value to it's own type -// 2. Instantiate a plugin object -// 3. Return the plugin object -// 4. Update status via `plugins.Manager#UpdatePluginStatus` -// -// After a plugin has been created subsequent status updates can be -// send anytime the plugin enters a ready or error state. -type Factory interface { - Validate(manager *Manager, config []byte) (interface{}, error) - New(manager *Manager, config interface{}) Plugin -} - -// Plugin defines the interface OPA uses to manage your plugin. -// -// When OPA starts it will start all of the plugins it was configured -// to instantiate. Each time a new plugin is configured (via -// discovery), OPA will start it. You can use the Start call to spawn -// additional goroutines or perform initialization tasks. -// -// Currently OPA will not call Stop on plugins. -// -// When OPA receives new configuration for your plugin via discovery -// it will first Validate the configuration using your factory and -// then call Reconfigure. -type Plugin interface { - Start(ctx context.Context) error - Stop(ctx context.Context) - Reconfigure(ctx context.Context, config interface{}) -} - -// Triggerable defines the interface plugins use for manual plugin triggers. -type Triggerable interface { - Trigger(context.Context) error -} - -// State defines the state that a Plugin instance is currently -// in with pre-defined states. -type State string - -const ( - // StateNotReady indicates that the Plugin is not in an error state, but isn't - // ready for normal operation yet. This should only happen at - // initialization time. - StateNotReady State = "NOT_READY" - - // StateOK signifies that the Plugin is operating normally. - StateOK State = "OK" - - // StateErr indicates that the Plugin is in an error state and should not - // be considered as functional. - StateErr State = "ERROR" - - // StateWarn indicates the Plugin is operating, but in a potentially dangerous or - // degraded state. It may be used to indicate manual remediation is needed, or to - // alert admins of some other noteworthy state. - StateWarn State = "WARN" -) - -// TriggerMode defines the trigger mode utilized by a Plugin for bundle download, -// log upload etc. -type TriggerMode string - -const ( - // TriggerPeriodic represents periodic polling mechanism - TriggerPeriodic TriggerMode = "periodic" - - // TriggerManual represents manual triggering mechanism - TriggerManual TriggerMode = "manual" - - // DefaultTriggerMode represents default trigger mechanism - DefaultTriggerMode TriggerMode = "periodic" -) - -// default interval between OPA report uploads -var defaultUploadIntervalSec = int64(3600) - -// Status has a Plugin's current status plus an optional Message. -type Status struct { - State State `json:"state"` - Message string `json:"message,omitempty"` -} - -func (s *Status) String() string { - return fmt.Sprintf("{%v %q}", s.State, s.Message) -} - -// StatusListener defines a handler to register for status updates. -type StatusListener func(status map[string]*Status) - -// Manager implements lifecycle management of plugins and gives plugins access -// to engine-wide components like storage. -type Manager struct { - Store storage.Store - Config *config.Config - Info *ast.Term - ID string - - compiler *ast.Compiler - compilerMux sync.RWMutex - wasmResolvers []*wasm.Resolver - wasmResolversMtx sync.RWMutex - services map[string]rest.Client - keys map[string]*keys.Config - plugins []namedplugin - registeredTriggers []func(storage.Transaction) - mtx sync.Mutex - pluginStatus map[string]*Status - pluginStatusListeners map[string]StatusListener - initBundles map[string]*bundle.Bundle - initFiles loader.Result - maxErrors int - initialized bool - interQueryBuiltinCacheConfig *cache.Config - gracefulShutdownPeriod int - registeredCacheTriggers []func(*cache.Config) - logger logging.Logger - consoleLogger logging.Logger - serverInitialized chan struct{} - serverInitializedOnce sync.Once - printHook print.Hook - enablePrintStatements bool - router *mux.Router - prometheusRegister prometheus.Registerer - tracerProvider *trace.TracerProvider - distributedTacingOpts tracing.Options - registeredNDCacheTriggers []func(bool) - registeredTelemetryGatherers map[string]report.Gatherer - bootstrapConfigLabels map[string]string - hooks hooks.Hooks - enableTelemetry bool - reporter *report.Reporter - opaReportNotifyCh chan struct{} - stop chan chan struct{} - parserOptions ast.ParserOptions -} - -type managerContextKey string -type managerWasmResolverKey string - -const managerCompilerContextKey = managerContextKey("compiler") -const managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers") - -// SetCompilerOnContext puts the compiler into the storage context. Calling this -// function before committing updated policies to storage allows the manager to -// skip parsing and compiling of modules. Instead, the manager will use the -// compiler that was stored on the context. -func SetCompilerOnContext(context *storage.Context, compiler *ast.Compiler) { - context.Put(managerCompilerContextKey, compiler) -} - -// GetCompilerOnContext gets the compiler cached on the storage context. -func GetCompilerOnContext(context *storage.Context) *ast.Compiler { - compiler, ok := context.Get(managerCompilerContextKey).(*ast.Compiler) - if !ok { - return nil - } - return compiler -} - -// SetWasmResolversOnContext puts a set of Wasm Resolvers into the storage -// context. Calling this function before committing updated wasm modules to -// storage allows the manager to skip initializing modules before using them. -// Instead, the manager will use the compiler that was stored on the context. -func SetWasmResolversOnContext(context *storage.Context, rs []*wasm.Resolver) { - context.Put(managerWasmResolverContextKey, rs) -} - -// getWasmResolversOnContext gets the resolvers cached on the storage context. -func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver { - resolvers, ok := context.Get(managerWasmResolverContextKey).([]*wasm.Resolver) - if !ok { - return nil - } - return resolvers -} - -func validateTriggerMode(mode TriggerMode) error { - switch mode { - case TriggerPeriodic, TriggerManual: - return nil - default: - return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual) - } -} - -// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values -func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) { - - if a == nil && b != nil { - err := validateTriggerMode(*b) - if err != nil { - return nil, err - } - return b, nil - } else if a != nil && b == nil { - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } else if a != nil && b != nil { - if *a != *b { - return nil, fmt.Errorf("trigger mode mismatch: %s and %s (hint: check discovery configuration)", *a, *b) - } - err := validateTriggerMode(*a) - if err != nil { - return nil, err - } - return a, nil - } - - t := DefaultTriggerMode - return &t, nil -} - -type namedplugin struct { - name string - plugin Plugin -} - -// Info sets the runtime information on the manager. The runtime information is -// propagated to opa.runtime() built-in function calls. -func Info(term *ast.Term) func(*Manager) { - return func(m *Manager) { - m.Info = term - } -} - -// InitBundles provides the initial set of bundles to load. -func InitBundles(b map[string]*bundle.Bundle) func(*Manager) { - return func(m *Manager) { - m.initBundles = b - } -} - -// InitFiles provides the initial set of other data/policy files to load. -func InitFiles(f loader.Result) func(*Manager) { - return func(m *Manager) { - m.initFiles = f - } -} - -// MaxErrors sets the error limit for the manager's shared compiler. -func MaxErrors(n int) func(*Manager) { - return func(m *Manager) { - m.maxErrors = n - } -} - -// GracefulShutdownPeriod passes the configured graceful shutdown period to plugins -func GracefulShutdownPeriod(gracefulShutdownPeriod int) func(*Manager) { - return func(m *Manager) { - m.gracefulShutdownPeriod = gracefulShutdownPeriod - } -} - -// Logger configures the passed logger on the plugin manager (useful to -// configure default fields) -func Logger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.logger = logger - } -} - -// ConsoleLogger sets the passed logger to be used by plugins that are -// configured with console logging enabled. -func ConsoleLogger(logger logging.Logger) func(*Manager) { - return func(m *Manager) { - m.consoleLogger = logger - } -} - -func EnablePrintStatements(yes bool) func(*Manager) { - return func(m *Manager) { - m.enablePrintStatements = yes - } -} - -func PrintHook(h print.Hook) func(*Manager) { - return func(m *Manager) { - m.printHook = h - } -} - -func WithRouter(r *mux.Router) func(*Manager) { - return func(m *Manager) { - m.router = r - } -} - -// WithPrometheusRegister sets the passed prometheus.Registerer to be used by plugins -func WithPrometheusRegister(prometheusRegister prometheus.Registerer) func(*Manager) { - return func(m *Manager) { - m.prometheusRegister = prometheusRegister - } -} - -// WithTracerProvider sets the passed *trace.TracerProvider to be used by plugins -func WithTracerProvider(tracerProvider *trace.TracerProvider) func(*Manager) { - return func(m *Manager) { - m.tracerProvider = tracerProvider - } -} - -// WithDistributedTracingOpts sets the options to be used by distributed tracing. -func WithDistributedTracingOpts(tr tracing.Options) func(*Manager) { - return func(m *Manager) { - m.distributedTacingOpts = tr - } -} - -// WithHooks allows passing hooks to the plugin manager. -func WithHooks(hs hooks.Hooks) func(*Manager) { - return func(m *Manager) { - m.hooks = hs - } -} - -// WithParserOptions sets the parser options to be used by the plugin manager. -func WithParserOptions(opts ast.ParserOptions) func(*Manager) { - return func(m *Manager) { - m.parserOptions = opts - } -} - -// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service. -func WithEnableTelemetry(enableTelemetry bool) func(*Manager) { - return func(m *Manager) { - m.enableTelemetry = enableTelemetry - } -} - -// WithTelemetryGatherers allows registration of telemetry gatherers which enable injection of additional data in the -// telemetry report -func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) { - return func(m *Manager) { - m.registeredTelemetryGatherers = gs - } -} - -// New creates a new Manager using config. -func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) { - - parsedConfig, err := config.ParseConfig(raw, id) - if err != nil { - return nil, err - } - - m := &Manager{ - Store: store, - Config: parsedConfig, - ID: id, - pluginStatus: map[string]*Status{}, - pluginStatusListeners: map[string]StatusListener{}, - maxErrors: -1, - serverInitialized: make(chan struct{}), - bootstrapConfigLabels: parsedConfig.Labels, - } - - for _, f := range opts { - f(m) - } - - if m.logger == nil { - m.logger = logging.Get() - } - - if m.consoleLogger == nil { - m.consoleLogger = logging.New() - } - - m.hooks.Each(func(h hooks.Hook) { - if f, ok := h.(hooks.ConfigHook); ok { - if c, e := f.OnConfig(context.Background(), parsedConfig); e != nil { - err = errors.Join(err, e) - } else { - parsedConfig = c - } - } - }) - if err != nil { - return nil, err - } - - // do after options and overrides - m.keys, err = keys.ParseKeysConfig(parsedConfig.Keys) - if err != nil { - return nil, err - } - - m.interQueryBuiltinCacheConfig, err = cache.ParseCachingConfig(parsedConfig.Caching) - if err != nil { - return nil, err - } - - serviceOpts := cfg.ServiceOptions{ - Raw: parsedConfig.Services, - AuthPlugin: m.AuthPlugin, - Keys: m.keys, - Logger: m.logger, - DistributedTacingOpts: m.distributedTacingOpts, - } - - m.services, err = cfg.ParseServicesConfig(serviceOpts) - if err != nil { - return nil, err - } - - if m.enableTelemetry { - reporter, err := report.New(id, report.Options{Logger: m.logger}) - if err != nil { - return nil, err - } - m.reporter = reporter - - m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) { - var minimumCompatibleVersion string - if m.compiler != nil && m.compiler.Required != nil { - minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion() - } - return minimumCompatibleVersion, nil - }) - - // register any additional gatherers - for k, g := range m.registeredTelemetryGatherers { - m.reporter.RegisterGatherer(k, g) - } - } - - return m, nil -} - -// Init returns an error if the manager could not initialize itself. Init() should -// be called before Start(). Init() is idempotent. -func (m *Manager) Init(ctx context.Context) error { - - if m.initialized { - return nil - } - - params := storage.TransactionParams{ - Write: true, - Context: storage.NewContext(), - } - - if m.enableTelemetry { - m.opaReportNotifyCh = make(chan struct{}) - m.stop = make(chan chan struct{}) - go m.sendOPAUpdateLoop(ctx) - } - - err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error { - - result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{ - Store: m.Store, - Txn: txn, - Files: m.initFiles, - Bundles: m.initBundles, - MaxErrors: m.maxErrors, - EnablePrintStatements: m.enablePrintStatements, - }) - - if err != nil { - return err - } - - SetCompilerOnContext(params.Context, result.Compiler) - - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - return err - } - SetWasmResolversOnContext(params.Context, resolvers) - - _, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit}) - return err - }) - - if err != nil { - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } - - return err - } - - m.initialized = true - return nil -} - -// Labels returns the set of labels from the configuration. -func (m *Manager) Labels() map[string]string { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.Config.Labels -} - -// InterQueryBuiltinCacheConfig returns the configuration for the inter-query cache. -func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.interQueryBuiltinCacheConfig -} - -// Register adds a plugin to the manager. When the manager is started, all of -// the plugins will be started. -func (m *Manager) Register(name string, plugin Plugin) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.plugins = append(m.plugins, namedplugin{ - name: name, - plugin: plugin, - }) - if _, ok := m.pluginStatus[name]; !ok { - m.pluginStatus[name] = &Status{State: StateNotReady} - } -} - -// Plugins returns the list of plugins registered with the manager. -func (m *Manager) Plugins() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - result := make([]string, len(m.plugins)) - for i := range m.plugins { - result[i] = m.plugins[i].name - } - return result -} - -// Plugin returns the plugin registered with name or nil if name is not found. -func (m *Manager) Plugin(name string) Plugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin - } - } - return nil -} - -// AuthPlugin returns the HTTPAuthPlugin registered with name or nil if name is not found. -func (m *Manager) AuthPlugin(name string) rest.HTTPAuthPlugin { - m.mtx.Lock() - defer m.mtx.Unlock() - for i := range m.plugins { - if m.plugins[i].name == name { - return m.plugins[i].plugin.(rest.HTTPAuthPlugin) - } - } - return nil -} - -// GetCompiler returns the manager's compiler. -func (m *Manager) GetCompiler() *ast.Compiler { - m.compilerMux.RLock() - defer m.compilerMux.RUnlock() - return m.compiler -} - -func (m *Manager) setCompiler(compiler *ast.Compiler) { - m.compilerMux.Lock() - defer m.compilerMux.Unlock() - m.compiler = compiler -} - -// GetRouter returns the managers router if set -func (m *Manager) GetRouter() *mux.Router { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.router -} - -// RegisterCompilerTrigger registers for change notifications when the compiler -// is changed. -func (m *Manager) RegisterCompilerTrigger(f func(storage.Transaction)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredTriggers = append(m.registeredTriggers, f) -} - -// GetWasmResolvers returns the manager's set of Wasm Resolvers. -func (m *Manager) GetWasmResolvers() []*wasm.Resolver { - m.wasmResolversMtx.RLock() - defer m.wasmResolversMtx.RUnlock() - return m.wasmResolvers -} - -func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - m.wasmResolvers = rs -} - -// Start starts the manager. Init() should be called once before Start(). -func (m *Manager) Start(ctx context.Context) error { - - if m == nil { - return nil - } - - if !m.initialized { - if err := m.Init(ctx); err != nil { - return err - } - } - - var toStart []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStart = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStart[i] = m.plugins[i].plugin - } - }() - - for i := range toStart { - if err := toStart[i].Start(ctx); err != nil { - return err - } - } - - return nil -} - -// Stop stops the manager, stopping all the plugins registered with it. -// Any plugin that needs to perform cleanup should do so within the duration -// of the graceful shutdown period passed with the context as a timeout. -// Note that a graceful shutdown period configured with the Manager instance -// will override the timeout of the passed in context (if applicable). -func (m *Manager) Stop(ctx context.Context) { - var toStop []Plugin - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - toStop = make([]Plugin, len(m.plugins)) - for i := range m.plugins { - toStop[i] = m.plugins[i].plugin - } - }() - - var cancel context.CancelFunc - if m.gracefulShutdownPeriod > 0 { - ctx, cancel = context.WithTimeout(ctx, time.Duration(m.gracefulShutdownPeriod)*time.Second) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - for i := range toStop { - toStop[i].Stop(ctx) - } - if c, ok := m.Store.(interface{ Close(context.Context) error }); ok { - if err := c.Close(ctx); err != nil { - m.logger.Error("Error closing store: %v", err) - } - } - - if m.stop != nil { - done := make(chan struct{}) - m.stop <- done - <-done - } -} - -// Reconfigure updates the configuration on the manager. -func (m *Manager) Reconfigure(config *config.Config) error { - opts := cfg.ServiceOptions{ - Raw: config.Services, - AuthPlugin: m.AuthPlugin, - Logger: m.logger, - DistributedTacingOpts: m.distributedTacingOpts, - } - - keys, err := keys.ParseKeysConfig(config.Keys) - if err != nil { - return err - } - opts.Keys = keys - - services, err := cfg.ParseServicesConfig(opts) - if err != nil { - return err - } - - interQueryBuiltinCacheConfig, err := cache.ParseCachingConfig(config.Caching) - if err != nil { - return err - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - // don't overwrite existing labels, only allow additions - always based on the boostrap config - if config.Labels == nil { - config.Labels = m.bootstrapConfigLabels - } else { - for label, value := range m.bootstrapConfigLabels { - config.Labels[label] = value - } - } - - // don't erase persistence directory - if config.PersistenceDirectory == nil { - config.PersistenceDirectory = m.Config.PersistenceDirectory - } - - m.Config = config - m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig - for name, client := range services { - m.services[name] = client - } - - for name, key := range keys { - m.keys[name] = key - } - - for _, trigger := range m.registeredCacheTriggers { - trigger(interQueryBuiltinCacheConfig) - } - - for _, trigger := range m.registeredNDCacheTriggers { - trigger(config.NDBuiltinCache) - } - - return nil -} - -// PluginStatus returns the current statuses of any plugins registered. -func (m *Manager) PluginStatus() map[string]*Status { - m.mtx.Lock() - defer m.mtx.Unlock() - - return m.copyPluginStatus() -} - -// RegisterPluginStatusListener registers a StatusListener to be -// called when plugin status updates occur. -func (m *Manager) RegisterPluginStatusListener(name string, listener StatusListener) { - m.mtx.Lock() - defer m.mtx.Unlock() - - m.pluginStatusListeners[name] = listener -} - -// UnregisterPluginStatusListener removes a StatusListener registered with the -// same name. -func (m *Manager) UnregisterPluginStatusListener(name string) { - m.mtx.Lock() - defer m.mtx.Unlock() - - delete(m.pluginStatusListeners, name) -} - -// UpdatePluginStatus updates a named plugins status. Any registered -// listeners will be called with a copy of the new state of all -// plugins. -func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) { - - var toNotify map[string]StatusListener - var statuses map[string]*Status - - func() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.pluginStatus[pluginName] = status - toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners)) - for k, v := range m.pluginStatusListeners { - toNotify[k] = v - } - statuses = m.copyPluginStatus() - }() - - for _, l := range toNotify { - l(statuses) - } -} - -func (m *Manager) copyPluginStatus() map[string]*Status { - statusCpy := map[string]*Status{} - for k, v := range m.pluginStatus { - var cpy *Status - if v != nil { - cpy = &Status{ - State: v.State, - Message: v.Message, - } - } - statusCpy[k] = cpy - } - return statusCpy -} - -func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - - compiler := GetCompilerOnContext(event.Context) - - // If the context does not contain the compiler fallback to loading the - // compiler from the store. Currently the bundle plugin sets the - // compiler on the context but the server does not (nor would users - // implementing their own policy loading.) - if compiler == nil && event.PolicyChanged() { - compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements, m.ParserOptions()) - } - - if compiler != nil { - m.setCompiler(compiler) - - if m.enableTelemetry && event.PolicyChanged() { - m.opaReportNotifyCh <- struct{}{} - } - - for _, f := range m.registeredTriggers { - f(txn) - } - } - - // Similar to the compiler, look for a set of resolvers on the transaction - // context. If they are not set we may need to reload from the store. - resolvers := getWasmResolversOnContext(event.Context) - if resolvers != nil { - m.setWasmResolvers(resolvers) - - } else if event.DataChanged() { - if requiresWasmResolverReload(event) { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) - if err != nil { - panic(err) - } - m.setWasmResolvers(resolvers) - } else { - err := m.updateWasmResolversData(ctx, event) - if err != nil { - panic(err) - } - } - } -} - -func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool, popts ast.ParserOptions) (*ast.Compiler, error) { - policies, err := store.ListPolicies(ctx, txn) - if err != nil { - return nil, err - } - modules := map[string]*ast.Module{} - - for _, policy := range policies { - bs, err := store.GetPolicy(ctx, txn, policy) - if err != nil { - return nil, err - } - module, err := ast.ParseModuleWithOpts(policy, string(bs), popts) - if err != nil { - return nil, err - } - modules[policy] = module - } - - compiler := ast.NewCompiler().WithEnablePrintStatements(enablePrintStatements) - compiler.Compile(modules) - return compiler, nil -} - -func requiresWasmResolverReload(event storage.TriggerEvent) bool { - // If the data changes touched the bundle path (which includes - // the wasm modules) we will reload them. Otherwise update - // data for each module already on the manager. - for _, dataEvent := range event.Data { - if dataEvent.Path.HasPrefix(bundle.BundlesBasePath) { - return true - } - } - return false -} - -func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.TriggerEvent) error { - m.wasmResolversMtx.Lock() - defer m.wasmResolversMtx.Unlock() - - for _, resolver := range m.wasmResolvers { - for _, dataEvent := range event.Data { - var err error - if dataEvent.Removed { - err = resolver.RemoveDataPath(ctx, dataEvent.Path) - } else { - err = resolver.SetDataPath(ctx, dataEvent.Path, dataEvent.Data) - } - if err != nil { - return fmt.Errorf("failed to update wasm runtime data: %s", err) - } - } - } - return nil -} - -// PublicKeys returns a public keys that can be used for verifying signed bundles. -func (m *Manager) PublicKeys() map[string]*keys.Config { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.keys -} - -// Client returns a client for communicating with a remote service. -func (m *Manager) Client(name string) rest.Client { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.services[name] -} - -// Services returns a list of services that m can provide clients for. -func (m *Manager) Services() []string { - m.mtx.Lock() - defer m.mtx.Unlock() - s := make([]string, 0, len(m.services)) - for name := range m.services { - s = append(s, name) - } - return s -} - -// Logger gets the standard logger for this plugin manager. -func (m *Manager) Logger() logging.Logger { - return m.logger -} - -// ConsoleLogger gets the console logger for this plugin manager. -func (m *Manager) ConsoleLogger() logging.Logger { - return m.consoleLogger -} - -func (m *Manager) PrintHook() print.Hook { - return m.printHook -} - -func (m *Manager) EnablePrintStatements() bool { - return m.enablePrintStatements -} - -// ServerInitialized signals a channel indicating that the OPA -// server has finished initialization. -func (m *Manager) ServerInitialized() { - m.serverInitializedOnce.Do(func() { close(m.serverInitialized) }) -} - -// ServerInitializedChannel returns a receive-only channel that -// is closed when the OPA server has finished initialization. -// Be aware that the socket of the server listener may not be -// open by the time this channel is closed. There is a very -// small window where the socket may still be closed, due to -// a race condition. -func (m *Manager) ServerInitializedChannel() <-chan struct{} { - return m.serverInitialized -} - -// RegisterCacheTrigger accepts a func that receives new inter-query cache config generated by -// a reconfigure of the plugin manager, so that it can be propagated to existing inter-query caches. -func (m *Manager) RegisterCacheTrigger(trigger func(*cache.Config)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredCacheTriggers = append(m.registeredCacheTriggers, trigger) -} - -// PrometheusRegister gets the prometheus.Registerer for this plugin manager. -func (m *Manager) PrometheusRegister() prometheus.Registerer { - return m.prometheusRegister -} - -// TracerProvider gets the *trace.TracerProvider for this plugin manager. -func (m *Manager) TracerProvider() *trace.TracerProvider { - return m.tracerProvider -} - -func (m *Manager) RegisterNDCacheTrigger(trigger func(bool)) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.registeredNDCacheTriggers = append(m.registeredNDCacheTriggers, trigger) -} - -func (m *Manager) sendOPAUpdateLoop(ctx context.Context) { - ticker := time.NewTicker(time.Duration(int64(time.Second) * defaultUploadIntervalSec)) - mr.New(mr.NewSource(time.Now().UnixNano())) - - ctx, cancel := context.WithCancel(ctx) - - var opaReportNotify bool - - for { - select { - case <-m.opaReportNotifyCh: - opaReportNotify = true - case <-ticker.C: - ticker.Stop() - - if opaReportNotify { - opaReportNotify = false - _, err := m.reporter.SendReport(ctx) - if err != nil { - m.logger.WithFields(map[string]interface{}{"err": err}).Debug("Unable to send OPA telemetry report.") - } - } - - newInterval := mr.Int63n(defaultUploadIntervalSec) + defaultUploadIntervalSec - ticker = time.NewTicker(time.Duration(int64(time.Second) * newInterval)) - case done := <-m.stop: - cancel() - ticker.Stop() - done <- struct{}{} - return - } - } -} - -func (m *Manager) ParserOptions() ast.ParserOptions { - return m.parserOptions -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go b/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go deleted file mode 100644 index 6a85dea68..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/azure.go +++ /dev/null @@ -1,157 +0,0 @@ -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "time" -) - -var ( - azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - defaultAPIVersion = "2018-02-01" - defaultResource = "https://storage.azure.com/" - timeout = 5 * time.Second -) - -// azureManagedIdentitiesToken holds a token for managed identities for Azure resources -type azureManagedIdentitiesToken struct { - AccessToken string `json:"access_token"` - ExpiresIn string `json:"expires_in"` - ExpiresOn string `json:"expires_on"` - NotBefore string `json:"not_before"` - Resource string `json:"resource"` - TokenType string `json:"token_type"` -} - -// azureManagedIdentitiesError represents an error fetching an azureManagedIdentitiesToken -type azureManagedIdentitiesError struct { - Err string `json:"error"` - Description string `json:"error_description"` - Endpoint string - StatusCode int -} - -func (e *azureManagedIdentitiesError) Error() string { - return fmt.Sprintf("%v %s retrieving azure token from %s: %s", e.StatusCode, e.Err, e.Endpoint, e.Description) -} - -// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization -type azureManagedIdentitiesAuthPlugin struct { - Endpoint string `json:"endpoint"` - APIVersion string `json:"api_version"` - Resource string `json:"resource"` - ObjectID string `json:"object_id"` - ClientID string `json:"client_id"` - MiResID string `json:"mi_res_id"` -} - -func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) { - if c.Type == "oci" { - return nil, errors.New("azure managed identities auth: OCI service not supported") - } - - if ap.Endpoint == "" { - ap.Endpoint = azureIMDSEndpoint - } - - if ap.Resource == "" { - ap.Resource = defaultResource - } - - if ap.APIVersion == "" { - ap.APIVersion = defaultAPIVersion - } - - t, err := DefaultTLSConfig(c) - if err != nil { - return nil, err - } - - return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil -} - -func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error { - token, err := azureManagedIdentitiesTokenRequest( - ap.Endpoint, ap.APIVersion, ap.Resource, - ap.ObjectID, ap.ClientID, ap.MiResID, - ) - if err != nil { - return err - } - - req.Header.Add("Authorization", "Bearer "+token.AccessToken) - return nil -} - -// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken -func azureManagedIdentitiesTokenRequest( - endpoint, apiVersion, resource, objectID, clientID, miResID string, -) (azureManagedIdentitiesToken, error) { - var token azureManagedIdentitiesToken - e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID) - - request, err := http.NewRequest("GET", e, nil) - if err != nil { - return token, err - } - request.Header.Add("Metadata", "true") - - httpClient := http.Client{Timeout: timeout} - response, err := httpClient.Do(request) - if err != nil { - return token, err - } - defer response.Body.Close() - - data, err := io.ReadAll(response.Body) - if err != nil { - return token, err - } - - if s := response.StatusCode; s != http.StatusOK { - var azureError azureManagedIdentitiesError - err = json.Unmarshal(data, &azureError) - if err != nil { - return token, err - } - - azureError.Endpoint = e - azureError.StatusCode = s - return token, &azureError - } - - err = json.Unmarshal(data, &token) - if err != nil { - return token, err - } - - return token, nil -} - -// buildAzureManagedIdentitiesRequestPath constructs the request URL for an Azure managed identities token request -func buildAzureManagedIdentitiesRequestPath( - endpoint, apiVersion, resource, objectID, clientID, miResID string, -) string { - params := url.Values{ - "api-version": []string{apiVersion}, - "resource": []string{resource}, - } - - if objectID != "" { - params.Add("object_id", objectID) - } - - if clientID != "" { - params.Add("client_id", clientID) - } - - if miResID != "" { - params.Add("mi_res_id", miResID) - } - - return endpoint + "?" + params.Encode() -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/rego/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/rego/doc.go new file mode 100644 index 000000000..febe75696 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/rego/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package rego diff --git a/constraint/vendor/github.com/open-policy-agent/opa/rego/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/rego/errors.go index dcc5e2679..bcbd2efed 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/rego/errors.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/rego/errors.go @@ -1,24 +1,17 @@ package rego +import v1 "github.com/open-policy-agent/opa/v1/rego" + // HaltError is an error type to return from a custom function implementation // that will abort the evaluation process (analogous to topdown.Halt). -type HaltError struct { - err error -} - -// Error delegates to the wrapped error -func (h *HaltError) Error() string { - return h.err.Error() -} +type HaltError = v1.HaltError // NewHaltError wraps an error such that the evaluation process will stop // when it occurs. func NewHaltError(err error) error { - return &HaltError{err: err} + return v1.NewHaltError(err) } // ErrorDetails interface is satisfied by an error that provides further // details. -type ErrorDetails interface { - Lines() []string -} +type ErrorDetails = v1.ErrorDetails diff --git a/constraint/vendor/github.com/open-policy-agent/opa/rego/plugins.go b/constraint/vendor/github.com/open-policy-agent/opa/rego/plugins.go index abaa91034..38ef84416 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/rego/plugins.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/rego/plugins.go @@ -5,39 +5,13 @@ package rego import ( - "context" - "sync" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/ir" + v1 "github.com/open-policy-agent/opa/v1/rego" ) -var targetPlugins = map[string]TargetPlugin{} -var pluginMtx sync.Mutex - -type TargetPlugin interface { - IsTarget(string) bool - PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) -} - -type TargetPluginEval interface { - Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) -} +type TargetPlugin = v1.TargetPlugin -func (r *Rego) targetPlugin(tgt string) TargetPlugin { - for _, p := range targetPlugins { - if p.IsTarget(tgt) { - return p - } - } - return nil -} +type TargetPluginEval = v1.TargetPluginEval func RegisterPlugin(name string, p TargetPlugin) { - pluginMtx.Lock() - defer pluginMtx.Unlock() - if _, ok := targetPlugins[name]; ok { - panic("plugin already registered " + name) - } - targetPlugins[name] = p + v1.RegisterPlugin(name, p) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/rego/rego.go b/constraint/vendor/github.com/open-policy-agent/opa/rego/rego.go index 5a5ea0d12..e6af30c39 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/rego/rego.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/rego/rego.go @@ -6,943 +6,367 @@ package rego import ( - "bytes" - "context" - "errors" - "fmt" "io" - "strings" "time" "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/bundle" - bundleUtils "github.com/open-policy-agent/opa/internal/bundle" - "github.com/open-policy-agent/opa/internal/compiler/wasm" - "github.com/open-policy-agent/opa/internal/future" - "github.com/open-policy-agent/opa/internal/planner" - "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/internal/wasm/encoding" - "github.com/open-policy-agent/opa/ir" "github.com/open-policy-agent/opa/loader" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/plugins" - "github.com/open-policy-agent/opa/resolver" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/inmem" - "github.com/open-policy-agent/opa/topdown" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultPartialNamespace = "partial" - wasmVarPrefix = "^" -) - -// nolint: deadcode,varcheck -const ( - targetWasm = "wasm" - targetRego = "rego" + "github.com/open-policy-agent/opa/v1/metrics" + v1 "github.com/open-policy-agent/opa/v1/rego" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" ) // CompileResult represents the result of compiling a Rego query, zero or more // Rego modules, and arbitrary contextual data into an executable. -type CompileResult struct { - Bytes []byte `json:"bytes"` -} +type CompileResult = v1.CompileResult // PartialQueries contains the queries and support modules produced by partial // evaluation. -type PartialQueries struct { - Queries []ast.Body `json:"queries,omitempty"` - Support []*ast.Module `json:"modules,omitempty"` -} +type PartialQueries = v1.PartialQueries // PartialResult represents the result of partial evaluation. The result can be // used to generate a new query that can be run when inputs are known. -type PartialResult struct { - compiler *ast.Compiler - store storage.Store - body ast.Body - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin -} - -// Rego returns an object that can be evaluated to produce a query result. -func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { - options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) - r := New(options...) - - // Propagate any custom builtins. - for k, v := range pr.builtinDecls { - r.builtinDecls[k] = v - } - for k, v := range pr.builtinFuncs { - r.builtinFuncs[k] = v - } - return r -} - -// preparedQuery is a wrapper around a Rego object which has pre-processed -// state stored on it. Once prepared there are a more limited number of actions -// that can be taken with it. It will, however, be able to evaluate faster since -// it will not have to re-parse or compile as much. -type preparedQuery struct { - r *Rego - cfg *PrepareConfig -} +type PartialResult = v1.PartialResult // EvalContext defines the set of options allowed to be set at evaluation // time. Any other options will need to be set on a new Rego object. -type EvalContext struct { - hasInput bool - time time.Time - seed io.Reader - rawInput *interface{} - parsedInput ast.Value - metrics metrics.Metrics - txn storage.Transaction - instrument bool - instrumentation *topdown.Instrumentation - partialNamespace string - queryTracers []topdown.QueryTracer - compiledQuery compiledQuery - unknowns []string - disableInlining []ast.Ref - parsedUnknowns []*ast.Term - indexing bool - earlyExit bool - interQueryBuiltinCache cache.InterQueryCache - ndBuiltinCache builtins.NDBCache - resolvers []refResolver - sortSets bool - copyMaps bool - printHook print.Hook - capabilities *ast.Capabilities - strictBuiltinErrors bool - virtualCache topdown.VirtualCache -} - -func (e *EvalContext) RawInput() *interface{} { - return e.rawInput -} - -func (e *EvalContext) ParsedInput() ast.Value { - return e.parsedInput -} - -func (e *EvalContext) Time() time.Time { - return e.time -} - -func (e *EvalContext) Seed() io.Reader { - return e.seed -} - -func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { - return e.interQueryBuiltinCache -} - -func (e *EvalContext) PrintHook() print.Hook { - return e.printHook -} - -func (e *EvalContext) Metrics() metrics.Metrics { - return e.metrics -} - -func (e *EvalContext) StrictBuiltinErrors() bool { - return e.strictBuiltinErrors -} - -func (e *EvalContext) NDBCache() builtins.NDBCache { - return e.ndBuiltinCache -} - -func (e *EvalContext) CompiledQuery() ast.Body { - return e.compiledQuery.query -} - -func (e *EvalContext) Capabilities() *ast.Capabilities { - return e.capabilities -} - -func (e *EvalContext) Transaction() storage.Transaction { - return e.txn -} +type EvalContext = v1.EvalContext // EvalOption defines a function to set an option on an EvalConfig -type EvalOption func(*EvalContext) +type EvalOption = v1.EvalOption // EvalInput configures the input for a Prepared Query's evaluation func EvalInput(input interface{}) EvalOption { - return func(e *EvalContext) { - e.rawInput = &input - e.hasInput = true - } + return v1.EvalInput(input) } // EvalParsedInput configures the input for a Prepared Query's evaluation func EvalParsedInput(input ast.Value) EvalOption { - return func(e *EvalContext) { - e.parsedInput = input - e.hasInput = true - } + return v1.EvalParsedInput(input) } // EvalMetrics configures the metrics for a Prepared Query's evaluation func EvalMetrics(metric metrics.Metrics) EvalOption { - return func(e *EvalContext) { - e.metrics = metric - } + return v1.EvalMetrics(metric) } // EvalTransaction configures the Transaction for a Prepared Query's evaluation func EvalTransaction(txn storage.Transaction) EvalOption { - return func(e *EvalContext) { - e.txn = txn - } + return v1.EvalTransaction(txn) } // EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation func EvalInstrument(instrument bool) EvalOption { - return func(e *EvalContext) { - e.instrument = instrument - } + return v1.EvalInstrument(instrument) } // EvalTracer configures a tracer for a Prepared Query's evaluation // Deprecated: Use EvalQueryTracer instead. func EvalTracer(tracer topdown.Tracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) - } - } + return v1.EvalTracer(tracer) } // EvalQueryTracer configures a tracer for a Prepared Query's evaluation func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { - return func(e *EvalContext) { - if tracer != nil { - e.queryTracers = append(e.queryTracers, tracer) - } - } + return v1.EvalQueryTracer(tracer) } // EvalPartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func EvalPartialNamespace(ns string) EvalOption { - return func(e *EvalContext) { - e.partialNamespace = ns - } + return v1.EvalPartialNamespace(ns) } // EvalUnknowns returns an argument that sets the values to treat as // unknown during partial evaluation. func EvalUnknowns(unknowns []string) EvalOption { - return func(e *EvalContext) { - e.unknowns = unknowns - } + return v1.EvalUnknowns(unknowns) } // EvalDisableInlining returns an argument that adds a set of paths to exclude from // partial evaluation inlining. func EvalDisableInlining(paths []ast.Ref) EvalOption { - return func(e *EvalContext) { - e.disableInlining = paths - } + return v1.EvalDisableInlining(paths) } // EvalParsedUnknowns returns an argument that sets the values to treat // as unknown during partial evaluation. func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { - return func(e *EvalContext) { - e.parsedUnknowns = unknowns - } + return v1.EvalParsedUnknowns(unknowns) } // EvalRuleIndexing will disable indexing optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalRuleIndexing(enabled bool) EvalOption { - return func(e *EvalContext) { - e.indexing = enabled - } + return v1.EvalRuleIndexing(enabled) } // EvalEarlyExit will disable 'early exit' optimizations for the // evaluation. This should only be used when tracing in debug mode. func EvalEarlyExit(enabled bool) EvalOption { - return func(e *EvalContext) { - e.earlyExit = enabled - } + return v1.EvalEarlyExit(enabled) } // EvalTime sets the wall clock time to use during policy evaluation. // time.now_ns() calls will return this value. func EvalTime(x time.Time) EvalOption { - return func(e *EvalContext) { - e.time = x - } + return v1.EvalTime(x) } // EvalSeed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func EvalSeed(r io.Reader) EvalOption { - return func(e *EvalContext) { - e.seed = r - } + return v1.EvalSeed(r) } // EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { - return func(e *EvalContext) { - e.interQueryBuiltinCache = c - } + return v1.EvalInterQueryBuiltinCache(c) +} + +// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { + return v1.EvalInterQueryBuiltinValueCache(c) } // EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can // use during evaluation. func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { - return func(e *EvalContext) { - e.ndBuiltinCache = c - } + return v1.EvalNDBuiltinCache(c) } // EvalResolver sets a Resolver for a specified ref path for this evaluation. func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { - return func(e *EvalContext) { - e.resolvers = append(e.resolvers, refResolver{ref, r}) - } + return v1.EvalResolver(ref, r) } // EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. func EvalSortSets(yes bool) EvalOption { - return func(e *EvalContext) { - e.sortSets = yes - } + return v1.EvalSortSets(yes) } // EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them. func EvalCopyMaps(yes bool) EvalOption { - return func(e *EvalContext) { - e.copyMaps = yes - } + return v1.EvalCopyMaps(yes) } // EvalPrintHook sets the object to use for handling print statement outputs. func EvalPrintHook(ph print.Hook) EvalOption { - return func(e *EvalContext) { - e.printHook = ph - } + return v1.EvalPrintHook(ph) } // EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is // optional, and if not set, the default cache is used. func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { - return func(e *EvalContext) { - e.virtualCache = vc - } -} - -func (pq preparedQuery) Modules() map[string]*ast.Module { - mods := make(map[string]*ast.Module) - - for name, mod := range pq.r.parsedModules { - mods[name] = mod - } - - for _, b := range pq.r.bundles { - for _, mod := range b.Modules { - mods[mod.Path] = mod.Parsed - } - } - - return mods -} - -// newEvalContext creates a new EvalContext overlaying any EvalOptions over top -// the Rego object on the preparedQuery. The returned function should be called -// once the evaluation is complete to close any transactions that might have -// been opened. -func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { - ectx := &EvalContext{ - hasInput: false, - rawInput: nil, - parsedInput: nil, - metrics: nil, - txn: nil, - instrument: false, - instrumentation: nil, - partialNamespace: pq.r.partialNamespace, - queryTracers: nil, - unknowns: pq.r.unknowns, - parsedUnknowns: pq.r.parsedUnknowns, - compiledQuery: compiledQuery{}, - indexing: true, - earlyExit: true, - resolvers: pq.r.resolvers, - printHook: pq.r.printHook, - capabilities: pq.r.capabilities, - strictBuiltinErrors: pq.r.strictBuiltinErrors, - } - - for _, o := range options { - o(ectx) - } - - if ectx.metrics == nil { - ectx.metrics = metrics.New() - } - - if ectx.instrument { - ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) - } - - // Default to an empty "finish" function - finishFunc := func(context.Context) {} - - var err error - ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) - if err != nil { - return nil, finishFunc, err - } - - if ectx.txn == nil { - ectx.txn, err = pq.r.store.NewTransaction(ctx) - if err != nil { - return nil, finishFunc, err - } - finishFunc = func(ctx context.Context) { - pq.r.store.Abort(ctx, ectx.txn) - } - } - - // If we didn't get an input specified in the Eval options - // then fall back to the Rego object's input fields. - if !ectx.hasInput { - ectx.rawInput = pq.r.rawInput - ectx.parsedInput = pq.r.parsedInput - } - - if ectx.parsedInput == nil { - if ectx.rawInput == nil { - // Fall back to the original Rego objects input if none was specified - // Note that it could still be nil - ectx.rawInput = pq.r.rawInput - } - - if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target - pq.r.target != targetWasm { - ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) - if err != nil { - return nil, finishFunc, err - } - } - } - - return ectx, finishFunc, nil + return v1.EvalVirtualCache(vc) } // PreparedEvalQuery holds the prepared Rego state that has been pre-processed // for subsequent evaluations. -type PreparedEvalQuery struct { - preparedQuery -} - -// Eval evaluates this PartialResult's Rego object with additional eval options -// and returns a ResultSet. -// If options are provided they will override the original Rego options respective value. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] - - return pq.r.eval(ctx, ectx) -} +type PreparedEvalQuery = v1.PreparedEvalQuery // PreparedPartialQuery holds the prepared Rego state that has been pre-processed // for partial evaluations. -type PreparedPartialQuery struct { - preparedQuery -} - -// Partial runs partial evaluation on the prepared query and returns the result. -// The original Rego object transaction will *not* be re-used. A new transaction will be opened -// if one is not provided with an EvalOption. -func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { - ectx, finish, err := pq.newEvalContext(ctx, options) - if err != nil { - return nil, err - } - defer finish(ctx) - - ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] - - return pq.r.partial(ctx, ectx) -} +type PreparedPartialQuery = v1.PreparedPartialQuery // Errors represents a collection of errors returned when evaluating Rego. -type Errors []error - -func (errs Errors) Error() string { - if len(errs) == 0 { - return "no error" - } - if len(errs) == 1 { - return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) - } - buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} - for _, err := range errs { - buf = append(buf, err.Error()) - } - return strings.Join(buf, "\n") -} - -var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") +type Errors = v1.Errors // IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by // this package to indicate that partial evaluation was ineffective. func IsPartialEvaluationNotEffectiveErr(err error) bool { - errs, ok := err.(Errors) - if !ok { - return false - } - return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective -} - -type compiledQuery struct { - query ast.Body - compiler ast.QueryCompiler -} - -type queryType int - -// Define a query type for each of the top level Rego -// API's that compile queries differently. -const ( - evalQueryType queryType = iota - partialResultQueryType - partialQueryType - compileQueryType -) - -type loadPaths struct { - paths []string - filter loader.Filter + return v1.IsPartialEvaluationNotEffectiveErr(err) } // Rego constructs a query and can be evaluated to obtain results. -type Rego struct { - query string - parsedQuery ast.Body - compiledQueries map[queryType]compiledQuery - pkg string - parsedPackage *ast.Package - imports []string - parsedImports []*ast.Import - rawInput *interface{} - parsedInput ast.Value - unknowns []string - parsedUnknowns []*ast.Term - disableInlining []string - shallowInlining bool - skipPartialNamespace bool - partialNamespace string - modules []rawModule - parsedModules map[string]*ast.Module - compiler *ast.Compiler - store storage.Store - ownStore bool - txn storage.Transaction - metrics metrics.Metrics - queryTracers []topdown.QueryTracer - tracebuf *topdown.BufferTracer - trace bool - instrumentation *topdown.Instrumentation - instrument bool - capture map[*ast.Expr]ast.Var // map exprs to generated capture vars - termVarID int - dump io.Writer - runtime *ast.Term - time time.Time - seed io.Reader - capabilities *ast.Capabilities - builtinDecls map[string]*ast.Builtin - builtinFuncs map[string]*topdown.Builtin - unsafeBuiltins map[string]struct{} - loadPaths loadPaths - bundlePaths []string - bundles map[string]*bundle.Bundle - skipBundleVerification bool - interQueryBuiltinCache cache.InterQueryCache - ndBuiltinCache builtins.NDBCache - strictBuiltinErrors bool - builtinErrorList *[]topdown.Error - resolvers []refResolver - schemaSet *ast.SchemaSet - target string // target type (wasm, rego, etc.) - opa opa.EvalEngine - generateJSON func(*ast.Term, *EvalContext) (interface{}, error) - printHook print.Hook - enablePrintStatements bool - distributedTacingOpts tracing.Options - strict bool - pluginMgr *plugins.Manager - plugins []TargetPlugin - targetPrepState TargetPluginEval - regoVersion ast.RegoVersion -} +type Rego = v1.Rego // Function represents a built-in function that is callable in Rego. -type Function struct { - Name string - Description string - Decl *types.Function - Memoize bool - Nondeterministic bool -} +type Function = v1.Function // BuiltinContext contains additional attributes from the evaluator that // built-in functions can use, e.g., the request context.Context, caches, etc. -type BuiltinContext = topdown.BuiltinContext +type BuiltinContext = v1.BuiltinContext type ( // Builtin1 defines a built-in function that accepts 1 argument. - Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + Builtin1 = v1.Builtin1 // Builtin2 defines a built-in function that accepts 2 arguments. - Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + Builtin2 = v1.Builtin2 // Builtin3 defines a built-in function that accepts 3 argument. - Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + Builtin3 = v1.Builtin3 // Builtin4 defines a built-in function that accepts 4 argument. - Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + Builtin4 = v1.Builtin4 // BuiltinDyn defines a built-in function that accepts a list of arguments. - BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) + BuiltinDyn = v1.BuiltinDyn ) // RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin1(decl *Function, impl Builtin1) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin1(decl, impl) } // RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin2(decl *Function, impl Builtin2) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin2(decl, impl) } // RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin3(decl *Function, impl Builtin3) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin3(decl, impl) } // RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. func RegisterBuiltin4(decl *Function, impl Builtin4) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltin4(decl, impl) } // RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { - ast.RegisterBuiltin(&ast.Builtin{ - Name: decl.Name, - Description: decl.Description, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - }) - topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + v1.RegisterBuiltinDyn(decl, impl) } // Function1 returns an option that adds a built-in function to the Rego object. func Function1(decl *Function, f Builtin1) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function1(decl, f) } // Function2 returns an option that adds a built-in function to the Rego object. func Function2(decl *Function, f Builtin2) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function2(decl, f) } // Function3 returns an option that adds a built-in function to the Rego object. func Function3(decl *Function, f Builtin3) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function3(decl, f) } // Function4 returns an option that adds a built-in function to the Rego object. func Function4(decl *Function, f Builtin4) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.Function4(decl, f) } // FunctionDyn returns an option that adds a built-in function to the Rego object. func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { - return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { - result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) - return finishFunction(decl.Name, bctx, result, err, iter) - }) + return v1.FunctionDyn(decl, f) } // FunctionDecl returns an option that adds a custom-built-in function // __declaration__. NO implementation is provided. This is used for // non-interpreter execution envs (e.g., Wasm). func FunctionDecl(decl *Function) func(*Rego) { - return newDecl(decl) -} - -func newDecl(decl *Function) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - } - } -} - -type memo struct { - term *ast.Term - err error -} - -type memokey string - -func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { - - if !decl.Memoize { - return ifEmpty() - } - - // NOTE(tsandall): we assume memoization is applied to infrequent built-in - // calls that do things like fetch data from remote locations. As such, - // converting the terms to strings is acceptable for now. - var b strings.Builder - if _, err := b.WriteString(decl.Name); err != nil { - return nil, err - } - - // The term slice _may_ include an output term depending on how the caller - // referred to the built-in function. Only use the arguments as the cache - // key. Unification ensures we don't get false positive matches. - for i := 0; i < len(decl.Decl.Args()); i++ { - if _, err := b.WriteString(terms[i].String()); err != nil { - return nil, err - } - } - - key := memokey(b.String()) - hit, ok := bctx.Cache.Get(key) - var m memo - if ok { - m = hit.(memo) - } else { - m.term, m.err = ifEmpty() - bctx.Cache.Put(key, m) - } - - return m.term, m.err + return v1.FunctionDecl(decl) } // Dump returns an argument that sets the writer to dump debugging information to. func Dump(w io.Writer) func(r *Rego) { - return func(r *Rego) { - r.dump = w - } + return v1.Dump(w) } // Query returns an argument that sets the Rego query. func Query(q string) func(r *Rego) { - return func(r *Rego) { - r.query = q - } + return v1.Query(q) } // ParsedQuery returns an argument that sets the Rego query. func ParsedQuery(q ast.Body) func(r *Rego) { - return func(r *Rego) { - r.parsedQuery = q - } + return v1.ParsedQuery(q) } // Package returns an argument that sets the Rego package on the query's // context. func Package(p string) func(r *Rego) { - return func(r *Rego) { - r.pkg = p - } + return v1.Package(p) } // ParsedPackage returns an argument that sets the Rego package on the query's // context. func ParsedPackage(pkg *ast.Package) func(r *Rego) { - return func(r *Rego) { - r.parsedPackage = pkg - } + return v1.ParsedPackage(pkg) } // Imports returns an argument that adds a Rego import to the query's context. func Imports(p []string) func(r *Rego) { - return func(r *Rego) { - r.imports = append(r.imports, p...) - } + return v1.Imports(p) } // ParsedImports returns an argument that adds Rego imports to the query's // context. func ParsedImports(imp []*ast.Import) func(r *Rego) { - return func(r *Rego) { - r.parsedImports = append(r.parsedImports, imp...) - } + return v1.ParsedImports(imp) } // Input returns an argument that sets the Rego input document. Input should be // a native Go value representing the input document. func Input(x interface{}) func(r *Rego) { - return func(r *Rego) { - r.rawInput = &x - } + return v1.Input(x) } // ParsedInput returns an argument that sets the Rego input document. func ParsedInput(x ast.Value) func(r *Rego) { - return func(r *Rego) { - r.parsedInput = x - } + return v1.ParsedInput(x) } // Unknowns returns an argument that sets the values to treat as unknown during // partial evaluation. func Unknowns(unknowns []string) func(r *Rego) { - return func(r *Rego) { - r.unknowns = unknowns - } + return v1.Unknowns(unknowns) } // ParsedUnknowns returns an argument that sets the values to treat as unknown // during partial evaluation. func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { - return func(r *Rego) { - r.parsedUnknowns = unknowns - } + return v1.ParsedUnknowns(unknowns) } // DisableInlining adds a set of paths to exclude from partial evaluation inlining. func DisableInlining(paths []string) func(r *Rego) { - return func(r *Rego) { - r.disableInlining = paths - } + return v1.DisableInlining(paths) } // ShallowInlining prevents rules that depend on unknown values from being inlined. // Rules that only depend on known values are inlined. func ShallowInlining(yes bool) func(r *Rego) { - return func(r *Rego) { - r.shallowInlining = yes - } + return v1.ShallowInlining(yes) } // SkipPartialNamespace disables namespacing of partial evalution results for support // rules generated from policy. Synthetic support rules are still namespaced. func SkipPartialNamespace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipPartialNamespace = yes - } + return v1.SkipPartialNamespace(yes) } // PartialNamespace returns an argument that sets the namespace to use for // partial evaluation results. The namespace must be a valid package path // component. func PartialNamespace(ns string) func(r *Rego) { - return func(r *Rego) { - r.partialNamespace = ns - } + return v1.PartialNamespace(ns) } // Module returns an argument that adds a Rego module. func Module(filename, input string) func(r *Rego) { - return func(r *Rego) { - r.modules = append(r.modules, rawModule{ - filename: filename, - module: input, - }) - } + return v1.Module(filename, input) } // ParsedModule returns an argument that adds a parsed Rego module. If a string // module with the same filename name is added, it will override the parsed // module. func ParsedModule(module *ast.Module) func(*Rego) { - return func(r *Rego) { - var filename string - if module.Package.Location != nil { - filename = module.Package.Location.File - } else { - filename = fmt.Sprintf("module_%p.rego", module) - } - r.parsedModules[filename] = module - } + return v1.ParsedModule(module) } // Load returns an argument that adds a filesystem path to load data @@ -953,9 +377,7 @@ func ParsedModule(module *ast.Module) func(*Rego) { // The Load option can only be used once. // Note: Loading files will require a write transaction on the store. func Load(paths []string, filter loader.Filter) func(r *Rego) { - return func(r *Rego) { - r.loadPaths = loadPaths{paths, filter} - } + return v1.Load(paths, filter) } // LoadBundle returns an argument that adds a filesystem path to load @@ -963,23 +385,17 @@ func Load(paths []string, filter loader.Filter) func(r *Rego) { // to be loaded as a bundle. // Note: Loading bundles will require a write transaction on the store. func LoadBundle(path string) func(r *Rego) { - return func(r *Rego) { - r.bundlePaths = append(r.bundlePaths, path) - } + return v1.LoadBundle(path) } // ParsedBundle returns an argument that adds a bundle to be loaded. func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { - return func(r *Rego) { - r.bundles[name] = b - } + return v1.ParsedBundle(name, b) } // Compiler returns an argument that sets the Rego compiler. func Compiler(c *ast.Compiler) func(r *Rego) { - return func(r *Rego) { - r.compiler = c - } + return v1.Compiler(c) } // Store returns an argument that sets the policy engine's data storage layer. @@ -988,9 +404,14 @@ func Compiler(c *ast.Compiler) func(r *Rego) { // must also be provided via the Transaction() option. After loading files // or bundles the transaction should be aborted or committed. func Store(s storage.Store) func(r *Rego) { - return func(r *Rego) { - r.store = s - } + return v1.Store(s) +} + +// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. +// +// Only applicable when no store has been set on the Rego object through the Store option. +func StoreReadAST(enabled bool) func(r *Rego) { + return v1.StoreReadAST(enabled) } // Transaction returns an argument that sets the transaction to use for storage @@ -1000,93 +421,65 @@ func Store(s storage.Store) func(r *Rego) { // Store() option. If using Load(), LoadBundle(), or ParsedBundle() options // the transaction will likely require write params. func Transaction(txn storage.Transaction) func(r *Rego) { - return func(r *Rego) { - r.txn = txn - } + return v1.Transaction(txn) } // Metrics returns an argument that sets the metrics collection. func Metrics(m metrics.Metrics) func(r *Rego) { - return func(r *Rego) { - r.metrics = m - } + return v1.Metrics(m) } // Instrument returns an argument that enables instrumentation for diagnosing // performance issues. func Instrument(yes bool) func(r *Rego) { - return func(r *Rego) { - r.instrument = yes - } + return v1.Instrument(yes) } // Trace returns an argument that enables tracing on r. func Trace(yes bool) func(r *Rego) { - return func(r *Rego) { - r.trace = yes - } + return v1.Trace(yes) } // Tracer returns an argument that adds a query tracer to r. // Deprecated: Use QueryTracer instead. func Tracer(t topdown.Tracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) - } - } + return v1.Tracer(t) } // QueryTracer returns an argument that adds a query tracer to r. func QueryTracer(t topdown.QueryTracer) func(r *Rego) { - return func(r *Rego) { - if t != nil { - r.queryTracers = append(r.queryTracers, t) - } - } + return v1.QueryTracer(t) } // Runtime returns an argument that sets the runtime data to provide to the // evaluation engine. func Runtime(term *ast.Term) func(r *Rego) { - return func(r *Rego) { - r.runtime = term - } + return v1.Runtime(term) } // Time sets the wall clock time to use during policy evaluation. Prepared queries // do not inherit this parameter. Use EvalTime to set the wall clock time when // executing a prepared query. func Time(x time.Time) func(r *Rego) { - return func(r *Rego) { - r.time = x - } + return v1.Time(x) } // Seed sets a reader that will seed randomization required by built-in functions. // If a seed is not provided crypto/rand.Reader is used. func Seed(r io.Reader) func(*Rego) { - return func(e *Rego) { - e.seed = r - } + return v1.Seed(r) } // PrintTrace is a helper function to write a human-readable version of the // trace to the writer w. func PrintTrace(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTrace(w, *r.tracebuf) + v1.PrintTrace(w, r) } // PrintTraceWithLocation is a helper function to write a human-readable version of the // trace to the writer w. func PrintTraceWithLocation(w io.Writer, r *Rego) { - if r == nil || r.tracebuf == nil { - return - } - topdown.PrettyTraceWithLocation(w, *r.tracebuf) + v1.PrintTraceWithLocation(w, r) } // UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. @@ -1094,96 +487,76 @@ func PrintTraceWithLocation(w io.Writer, r *Rego) { // compiler. This option is always honored for query compilation. Provide an // empty (non-nil) map to disable checks on queries. func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { - return func(r *Rego) { - r.unsafeBuiltins = unsafeBuiltins - } + return v1.UnsafeBuiltins(unsafeBuiltins) } // SkipBundleVerification skips verification of a signed bundle. func SkipBundleVerification(yes bool) func(r *Rego) { - return func(r *Rego) { - r.skipBundleVerification = yes - } + return v1.SkipBundleVerification(yes) } // InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize // during evaluation. func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { - return func(r *Rego) { - r.interQueryBuiltinCache = c - } + return v1.InterQueryBuiltinCache(c) +} + +// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { + return v1.InterQueryBuiltinValueCache(c) } // NDBuiltinCache sets the non-deterministic builtins cache. func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { - return func(r *Rego) { - r.ndBuiltinCache = c - } + return v1.NDBuiltinCache(c) } // StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. func StrictBuiltinErrors(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strictBuiltinErrors = yes - } + return v1.StrictBuiltinErrors(yes) } // BuiltinErrorList supplies an error slice to store built-in function errors. func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { - return func(r *Rego) { - r.builtinErrorList = list - } + return v1.BuiltinErrorList(list) } // Resolver sets a Resolver for a specified ref path. func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { - return func(rego *Rego) { - rego.resolvers = append(rego.resolvers, refResolver{ref, r}) - } + return v1.Resolver(ref, r) } // Schemas sets the schemaSet func Schemas(x *ast.SchemaSet) func(r *Rego) { - return func(r *Rego) { - r.schemaSet = x - } + return v1.Schemas(x) } // Capabilities configures the underlying compiler's capabilities. // This option is ignored for module compilation if the caller supplies the // compiler. func Capabilities(c *ast.Capabilities) func(r *Rego) { - return func(r *Rego) { - r.capabilities = c - } + return v1.Capabilities(c) } // Target sets the runtime to exercise. func Target(t string) func(r *Rego) { - return func(r *Rego) { - r.target = t - } + return v1.Target(t) } // GenerateJSON sets the AST to JSON converter for the results. func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) { - return func(r *Rego) { - r.generateJSON = f - } + return v1.GenerateJSON(f) } // PrintHook sets the object to use for handling print statement outputs. func PrintHook(h print.Hook) func(r *Rego) { - return func(r *Rego) { - r.printHook = h - } + return v1.PrintHook(h) } // DistributedTracingOpts sets the options to be used by distributed tracing. func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { - return func(r *Rego) { - r.distributedTacingOpts = tr - } + return v1.DistributedTracingOpts(tr) } // EnablePrintStatements enables print() calls. If this option is not provided, @@ -1191,1667 +564,65 @@ func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { // queries and policies that passed as raw strings, i.e., this function will not // have any affect if the caller supplies the ast.Compiler instance. func EnablePrintStatements(yes bool) func(r *Rego) { - return func(r *Rego) { - r.enablePrintStatements = yes - } + return v1.EnablePrintStatements(yes) } // Strict enables or disables strict-mode in the compiler func Strict(yes bool) func(r *Rego) { - return func(r *Rego) { - r.strict = yes - } + return v1.Strict(yes) } func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { - return func(r *Rego) { - r.regoVersion = version - } + return v1.SetRegoVersion(version) } // New returns a new Rego object. func New(options ...func(r *Rego)) *Rego { - - r := &Rego{ - parsedModules: map[string]*ast.Module{}, - capture: map[*ast.Expr]ast.Var{}, - compiledQueries: map[queryType]compiledQuery{}, - builtinDecls: map[string]*ast.Builtin{}, - builtinFuncs: map[string]*topdown.Builtin{}, - bundles: map[string]*bundle.Bundle{}, - } - - for _, option := range options { - option(r) - } - - if r.compiler == nil { - r.compiler = ast.NewCompiler(). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithBuiltins(r.builtinDecls). - WithDebug(r.dump). - WithSchemas(r.schemaSet). - WithCapabilities(r.capabilities). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(r.strict). - WithUseTypeCheckAnnotations(true) - - // topdown could be target "" or "rego", but both could be overridden by - // a target plugin (checked below) - if r.target == targetWasm { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + opts := make([]func(r *Rego), 0, len(options)+1) + opts = append(opts, options...) + opts = append(opts, func(r *Rego) { + if r.RegoVersion() == ast.RegoUndefined { + SetRegoVersion(ast.DefaultRegoVersion)(r) } - } - - if r.store == nil { - r.store = inmem.New() - r.ownStore = true - } else { - r.ownStore = false - } - - if r.metrics == nil { - r.metrics = metrics.New() - } - - if r.instrument { - r.instrumentation = topdown.NewInstrumentation(r.metrics) - r.compiler.WithMetrics(r.metrics) - } - - if r.trace { - r.tracebuf = topdown.NewBufferTracer() - r.queryTracers = append(r.queryTracers, r.tracebuf) - } - - if r.partialNamespace == "" { - r.partialNamespace = defaultPartialNamespace - } - - if r.generateJSON == nil { - r.generateJSON = generateJSON - } - - if r.pluginMgr != nil { - for _, name := range r.pluginMgr.Plugins() { - p := r.pluginMgr.Plugin(name) - if p0, ok := p.(TargetPlugin); ok { - r.plugins = append(r.plugins, p0) - } - } - } - - if t := r.targetPlugin(r.target); t != nil { - r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) - } - - return r -} - -// Eval evaluates this Rego object and returns a ResultSet. -func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForEval(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalTime(r.time), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - EvalSeed(r.seed), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, qt := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(qt)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - rs, err := pq.Eval(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return rs, err -} - -// PartialEval has been deprecated and renamed to PartialResult. -func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { - return r.PartialResult(ctx) -} - -// PartialResult partially evaluates this Rego object and returns a PartialResult. -func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.PrepareForEval(ctx, WithPartialEval()) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PartialResult{}, err - } - if txnErr != nil { - return PartialResult{}, txnErr - } - - pr := PartialResult{ - compiler: pq.r.compiler, - store: pq.r.store, - body: pq.r.parsedQuery, - builtinDecls: pq.r.builtinDecls, - builtinFuncs: pq.r.builtinFuncs, - } - - return pr, nil -} - -// Partial runs partial evaluation on r and returns the result. -func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - pq, err := r.PrepareForPartial(ctx) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return nil, err - } - - evalArgs := []EvalOption{ - EvalTransaction(r.txn), - EvalMetrics(r.metrics), - EvalInstrument(r.instrument), - EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), - } - - if r.ndBuiltinCache != nil { - evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) - } - - for _, t := range r.queryTracers { - evalArgs = append(evalArgs, EvalQueryTracer(t)) - } - - for i := range r.resolvers { - evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) - } - - pqs, err := pq.Partial(ctx, evalArgs...) - txnErr := txnClose(ctx, err) // Always call closer - if err == nil { - err = txnErr - } - return pqs, err + }) + + return v1.New(opts...) } // CompileOption defines a function to set options on Compile calls. -type CompileOption func(*CompileContext) +type CompileOption = v1.CompileOption // CompileContext contains options for Compile calls. -type CompileContext struct { - partial bool -} +type CompileContext = v1.CompileContext // CompilePartial defines an option to control whether partial evaluation is run // before the query is planned and compiled. func CompilePartial(yes bool) CompileOption { - return func(cfg *CompileContext) { - cfg.partial = yes - } -} - -// Compile returns a compiled policy query. -func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { - - var cfg CompileContext - - for _, opt := range opts { - opt(&cfg) - } - - var queries []ast.Body - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - - if cfg.partial { - - pq, err := r.Partial(ctx) - if err != nil { - return nil, err - } - if r.dump != nil { - if len(pq.Queries) != 0 { - msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Queries { - fmt.Println(pq.Queries[i]) - } - fmt.Fprintln(r.dump) - } - if len(pq.Support) != 0 { - msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) - fmt.Fprintln(r.dump, msg) - fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) - for i := range pq.Support { - fmt.Println(pq.Support[i]) - } - fmt.Fprintln(r.dump) - } - } - - queries = pq.Queries - modules = pq.Support - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - } else { - var err error - // If creating a new transaction it should be closed before calling the - // planner to avoid holding open the transaction longer than needed. - // - // TODO(tsandall): in future, planner could make use of store, in which - // case this will need to change. - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return nil, err - } - - err = r.prepare(ctx, compileQueryType, nil) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return nil, err - } - if txnErr != nil { - return nil, err - } - - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries = []ast.Body{r.compiledQueries[compileQueryType].query} - } - - if tgt := r.targetPlugin(r.target); tgt != nil { - return nil, fmt.Errorf("unsupported for rego target plugins") - } - - return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here -} - -func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { - policy, err := r.planQuery(queries, qType) - if err != nil { - return nil, err - } - - m, err := wasm.New().WithPolicy(policy).Compile() - if err != nil { - return nil, err - } - - var out bytes.Buffer - if err := encoding.WriteModule(&out, m); err != nil { - return nil, err - } - - return &CompileResult{ - Bytes: out.Bytes(), - }, nil + return v1.CompilePartial(yes) } // PrepareOption defines a function to set an option to control // the behavior of the Prepare call. -type PrepareOption func(*PrepareConfig) +type PrepareOption = v1.PrepareOption // PrepareConfig holds settings to control the behavior of the // Prepare call. -type PrepareConfig struct { - doPartialEval bool - disableInlining *[]string - builtinFuncs map[string]*topdown.Builtin -} +type PrepareConfig = v1.PrepareConfig // WithPartialEval configures an option for PrepareForEval // which will have it perform partial evaluation while preparing // the query (similar to rego.Rego#PartialResult) func WithPartialEval() PrepareOption { - return func(p *PrepareConfig) { - p.doPartialEval = true - } + return v1.WithPartialEval() } // WithNoInline adds a set of paths to exclude from partial evaluation inlining. func WithNoInline(paths []string) PrepareOption { - return func(p *PrepareConfig) { - p.disableInlining = &paths - } + return v1.WithNoInline(paths) } // WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions // to the target plugins. func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { - return func(p *PrepareConfig) { - if p.builtinFuncs == nil { - p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis)) - } - for k, v := range bis { - p.builtinFuncs[k] = v - } - } -} - -// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption -// WithBuiltinFuncs. -func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { - return p.builtinFuncs -} - -// PrepareForEval will parse inputs, modules, and query arguments in preparation -// of evaluating them. -func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { - if !r.hasQuery() { - return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedEvalQuery{}, err - } - - // If the caller wanted to do partial evaluation as part of preparation - // do it now and use the new Rego object. - if pCfg.doPartialEval { - - pr, err := r.partialResult(ctx, pCfg) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // Prepare the new query using the result of partial evaluation - pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) - txnErr := txnClose(ctx, err) - if err != nil { - return pq, err - } - return pq, txnErr - } - - err = r.prepare(ctx, evalQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteToCaptureValue", - MetricName: "query_compile_stage_rewrite_to_capture_value", - Stage: r.rewriteQueryToCaptureValue, - }, - }, - }) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - switch r.target { - case targetWasm: // TODO(sr): make wasm a target plugin, too - - if r.hasWasmModule() { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported") - } - - var modules []*ast.Module - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - - e, err := opa.LookupEngine(targetWasm) - if err != nil { - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - cr, err := r.compileWasm(modules, queries, evalQueryType) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - // nolint: staticcheck // SA4006 false positive - data, err := r.store.Read(ctx, r.txn, storage.Path{}) - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - - o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() - if err != nil { - _ = txnClose(ctx, err) // Ignore error - return PreparedEvalQuery{}, err - } - r.opa = o - - case targetRego: // do nothing, don't lookup default plugin - default: // either a specific plugin target, or one that is default - if tgt := r.targetPlugin(r.target); tgt != nil { - queries := []ast.Body{r.compiledQueries[evalQueryType].query} - pol, err := r.planQuery(queries, evalQueryType) - if err != nil { - return PreparedEvalQuery{}, err - } - // always add the builtins provided via rego.FunctionN options - opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) - r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) - if err != nil { - return PreparedEvalQuery{}, err - } - } - } - - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedEvalQuery{}, err - } - if txnErr != nil { - return PreparedEvalQuery{}, txnErr - } - - return PreparedEvalQuery{preparedQuery{r, pCfg}}, err -} - -// PrepareForPartial will parse inputs, modules, and query arguments in preparation -// of partially evaluating them. -func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { - if !r.hasQuery() { - return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query") - } - - pCfg := &PrepareConfig{} - for _, o := range opts { - o(pCfg) - } - - var err error - var txnClose transactionCloser - r.txn, txnClose, err = r.getTxn(ctx) - if err != nil { - return PreparedPartialQuery{}, err - } - - err = r.prepare(ctx, partialQueryType, []extraStage{ - { - after: "CheckSafety", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteEquals", - MetricName: "query_compile_stage_rewrite_equals", - Stage: r.rewriteEqualsForPartialQueryCompile, - }, - }, - }) - txnErr := txnClose(ctx, err) // Always call closer - if err != nil { - return PreparedPartialQuery{}, err - } - if txnErr != nil { - return PreparedPartialQuery{}, txnErr - } - - return PreparedPartialQuery{preparedQuery{r, pCfg}}, err -} - -func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { - var err error - - r.parsedInput, err = r.parseInput() - if err != nil { - return err - } - - err = r.loadFiles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.loadBundles(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - err = r.parseModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - // Compile the modules *before* the query, else functions - // defined in the module won't be found... - err = r.compileModules(ctx, r.txn, r.metrics) - if err != nil { - return err - } - - imports, err := r.prepareImports() - if err != nil { - return err - } - - queryImports := []*ast.Import{} - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { - queryImports = append(queryImports, imp) - } - } - - r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) - if err != nil { - return err - } - - err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) - if err != nil { - return err - } - - return nil -} - -func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.modules) == 0 { - return nil - } - - ids, err := r.store.ListPolicies(ctx, txn) - if err != nil { - return err - } - - m.Timer(metrics.RegoModuleParse).Start() - defer m.Timer(metrics.RegoModuleParse).Stop() - var errs Errors - - // Parse any modules that are saved to the store, but only if - // another compile step is going to occur (ie. we have parsed modules - // that need to be compiled). - for _, id := range ids { - // if it is already on the compiler we're using - // then don't bother to re-parse it from source - if _, haveMod := r.compiler.Modules[id]; haveMod { - continue - } - - bs, err := r.store.GetPolicy(ctx, txn, id) - if err != nil { - return err - } - - parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - errs = append(errs, err) - } - - r.parsedModules[id] = parsed - } - - // Parse any passed in as arguments to the Rego object - for _, module := range r.modules { - p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion}) - if err != nil { - switch errorWithType := err.(type) { - case ast.Errors: - for _, e := range errorWithType { - errs = append(errs, e) - } - default: - errs = append(errs, errorWithType) - } - } - r.parsedModules[module.filename] = p - } - - if len(errs) > 0 { - return errs - } - - return nil -} - -func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - if len(r.loadPaths.paths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadFiles).Start() - defer m.Timer(metrics.RegoLoadFiles).Stop() - - result, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithRegoVersion(r.regoVersion). - Filtered(r.loadPaths.paths, r.loadPaths.filter) - if err != nil { - return err - } - for name, mod := range result.Modules { - r.parsedModules[name] = mod.Parsed - } - - if len(result.Documents) > 0 { - err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) - if err != nil { - return err - } - } - return nil -} - -func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { - if len(r.bundlePaths) == 0 { - return nil - } - - m.Timer(metrics.RegoLoadBundles).Start() - defer m.Timer(metrics.RegoLoadBundles).Stop() - - for _, path := range r.bundlePaths { - bndl, err := loader.NewFileLoader(). - WithMetrics(m). - WithProcessAnnotation(true). - WithSkipBundleVerification(r.skipBundleVerification). - WithRegoVersion(r.regoVersion). - AsBundle(path) - if err != nil { - return fmt.Errorf("loading error: %s", err) - } - r.bundles[path] = bndl - } - return nil -} - -func (r *Rego) parseInput() (ast.Value, error) { - if r.parsedInput != nil { - return r.parsedInput, nil - } - return r.parseRawInput(r.rawInput, r.metrics) -} - -func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) { - var input ast.Value - - if rawInput == nil { - return input, nil - } - - m.Timer(metrics.RegoInputParse).Start() - defer m.Timer(metrics.RegoInputParse).Stop() - - rawPtr := util.Reference(rawInput) - - // roundtrip through json: this turns slices (e.g. []string, []bool) into - // []interface{}, the only array type ast.InterfaceToValue can work with - if err := util.RoundTrip(rawPtr); err != nil { - return nil, err - } - - return ast.InterfaceToValue(*rawPtr) -} - -func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { - if r.parsedQuery != nil { - return r.parsedQuery, nil - } - - m.Timer(metrics.RegoQueryParse).Start() - defer m.Timer(metrics.RegoQueryParse).Stop() - - popts, err := future.ParserOptionsFromFutureImports(queryImports) - if err != nil { - return nil, err - } - popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) - if err != nil { - return nil, err - } - popts.SkipRules = true - return ast.ParseBodyWithOpts(r.query, popts) -} - -func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { - for _, imp := range imports { - path := imp.Path.Value.(ast.Ref) - if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { - popts.RegoVersion = ast.RegoV1 - return popts, nil - } - } - return popts, nil -} - -func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { - - // Only compile again if there are new modules. - if len(r.bundles) > 0 || len(r.parsedModules) > 0 { - - // The bundle.Activate call will activate any bundles passed in - // (ie compile + handle data store changes), and include any of - // the additional modules passed in. If no bundles are provided - // it will only compile the passed in modules. - // Use this as the single-point of compiling everything only a - // single time. - opts := &bundle.ActivateOpts{ - Ctx: ctx, - Store: r.store, - Txn: txn, - Compiler: r.compilerForTxn(ctx, r.store, txn), - Metrics: m, - Bundles: r.bundles, - ExtraModules: r.parsedModules, - ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, - } - err := bundle.Activate(opts) - if err != nil { - return err - } - } - - // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. - if len(r.resolvers) == 0 { - resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) - if err != nil { - return err - } - - for _, rslvr := range resolvers { - for _, ep := range rslvr.Entrypoints() { - r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) - } - } - } - return nil -} - -func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { - m.Timer(metrics.RegoQueryCompile).Start() - defer m.Timer(metrics.RegoQueryCompile).Stop() - - cachedQuery, ok := r.compiledQueries[qType] - if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { - return nil - } - - qc, compiled, err := r.compileQuery(query, imports, m, extras) - if err != nil { - return err - } - - // cache the query for future use - r.compiledQueries[qType] = compiledQuery{ - query: compiled, - compiler: qc, - } - return nil -} - -func (r *Rego) prepareImports() ([]*ast.Import, error) { - imports := r.parsedImports - - if len(r.imports) > 0 { - s := make([]string, len(r.imports)) - for i := range r.imports { - s[i] = fmt.Sprintf("import %v", r.imports[i]) - } - parsed, err := ast.ParseImports(strings.Join(s, "\n")) - if err != nil { - return nil, err - } - imports = append(imports, parsed...) - } - return imports, nil -} - -func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { - var pkg *ast.Package - - if r.pkg != "" { - var err error - pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) - if err != nil { - return nil, nil, err - } - } else { - pkg = r.parsedPackage - } - - qctx := ast.NewQueryContext(). - WithPackage(pkg). - WithImports(imports) - - qc := r.compiler.QueryCompiler(). - WithContext(qctx). - WithUnsafeBuiltins(r.unsafeBuiltins). - WithEnablePrintStatements(r.enablePrintStatements). - WithStrict(false) - - for _, extra := range extras { - qc = qc.WithStageAfter(extra.after, extra.stage) - } - - compiled, err := qc.Compile(query) - - return qc, compiled, err - -} - -func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - switch { - case r.targetPrepState != nil: // target plugin flow - var val ast.Value - if r.runtime != nil { - val = r.runtime.Value - } - s, err := r.targetPrepState.Eval(ctx, ectx, val) - if err != nil { - return nil, err - } - return r.valueToQueryResult(s, ectx) - case r.target == targetWasm: - return r.evalWasm(ctx, ectx) - case r.target == targetRego: // continue - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithStrictBuiltinErrors(r.strictBuiltinErrors). - WithBuiltinErrorList(r.builtinErrorList). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook). - WithDistributedTracingOpts(r.distributedTacingOpts). - WithVirtualCache(ectx.virtualCache) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - var rs ResultSet - err := q.Iter(ctx, func(qr topdown.QueryResult) error { - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - if err != nil { - return nil, err - } - - if len(rs) == 0 { - return nil, nil - } - - return rs, nil -} - -func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { - - input := ectx.rawInput - if ectx.parsedInput != nil { - i := interface{}(ectx.parsedInput) - input = &i - } - result, err := r.opa.Eval(ctx, opa.EvalOpts{ - Metrics: r.metrics, - Input: input, - Time: ectx.time, - Seed: ectx.seed, - InterQueryBuiltinCache: ectx.interQueryBuiltinCache, - NDBuiltinCache: ectx.ndBuiltinCache, - PrintHook: ectx.printHook, - Capabilities: ectx.capabilities, - }) - if err != nil { - return nil, err - } - - parsed, err := ast.ParseTerm(string(result.Result)) - if err != nil { - return nil, err - } - - return r.valueToQueryResult(parsed.Value, ectx) -} - -func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { - resultSet, ok := res.(ast.Set) - if !ok { - return nil, fmt.Errorf("illegal result type") - } - - if resultSet.Len() == 0 { - return nil, nil - } - - var rs ResultSet - err := resultSet.Iter(func(term *ast.Term) error { - obj, ok := term.Value.(ast.Object) - if !ok { - return fmt.Errorf("illegal result type") - } - qr := topdown.QueryResult{} - obj.Foreach(func(k, v *ast.Term) { - kvt := ast.VarTerm(string(k.Value.(ast.String))) - qr[kvt.Value.(ast.Var)] = v - }) - result, err := r.generateResult(qr, ectx) - if err != nil { - return err - } - rs = append(rs, result) - return nil - }) - - return rs, err -} - -func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { - - rewritten := ectx.compiledQuery.compiler.RewrittenVars() - - result := newResult() - for k, term := range qr { - v, err := r.generateJSON(term, ectx) - if err != nil { - return result, err - } - - if rw, ok := rewritten[k]; ok { - k = rw - } - if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { - continue - } - result.Bindings[string(k)] = v - } - - for _, expr := range ectx.compiledQuery.query { - if expr.Generated { - continue - } - - if k, ok := r.capture[expr]; ok { - v, err := r.generateJSON(qr[k], ectx) - if err != nil { - return result, err - } - result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) - } else { - result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) - } - - } - return result, nil -} - -func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { - - err := r.prepare(ctx, partialResultQueryType, []extraStage{ - { - after: "ResolveRefs", - stage: ast.QueryCompilerStageDefinition{ - Name: "RewriteForPartialEval", - MetricName: "query_compile_stage_rewrite_for_partial_eval", - Stage: r.rewriteQueryForPartialEval, - }, - }, - }) - if err != nil { - return PartialResult{}, err - } - - ectx := &EvalContext{ - parsedInput: r.parsedInput, - metrics: r.metrics, - txn: r.txn, - partialNamespace: r.partialNamespace, - queryTracers: r.queryTracers, - compiledQuery: r.compiledQueries[partialResultQueryType], - instrumentation: r.instrumentation, - indexing: true, - resolvers: r.resolvers, - capabilities: r.capabilities, - strictBuiltinErrors: r.strictBuiltinErrors, - } - - disableInlining := r.disableInlining - - if pCfg.disableInlining != nil { - disableInlining = *pCfg.disableInlining - } - - ectx.disableInlining, err = parseStringsToRefs(disableInlining) - if err != nil { - return PartialResult{}, err - } - - pq, err := r.partial(ctx, ectx) - if err != nil { - return PartialResult{}, err - } - - // Construct module for queries. - id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) - - module, err := ast.ParseModule(id, "package "+ectx.partialNamespace) - if err != nil { - return PartialResult{}, fmt.Errorf("bad partial namespace") - } - - module.Rules = make([]*ast.Rule, len(pq.Queries)) - for i, body := range pq.Queries { - rule := &ast.Rule{ - Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), - Body: body, - Module: module, - } - module.Rules[i] = rule - if checkPartialResultForRecursiveRefs(body, rule.Path()) { - return PartialResult{}, Errors{errPartialEvaluationNotEffective} - } - } - - // Update compiler with partial evaluation output. - r.compiler.Modules[id] = module - for i, module := range pq.Support { - r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module - } - - r.metrics.Timer(metrics.RegoModuleCompile).Start() - r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) - r.metrics.Timer(metrics.RegoModuleCompile).Stop() - - if r.compiler.Failed() { - return PartialResult{}, r.compiler.Errors - } - - result := PartialResult{ - compiler: r.compiler, - store: r.store, - body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), - builtinDecls: r.builtinDecls, - builtinFuncs: r.builtinFuncs, - } - - return result, nil -} - -func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { - - var unknowns []*ast.Term - - switch { - case ectx.parsedUnknowns != nil: - unknowns = ectx.parsedUnknowns - case ectx.unknowns != nil: - unknowns = make([]*ast.Term, len(ectx.unknowns)) - for i := range ectx.unknowns { - var err error - unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) - if err != nil { - return nil, err - } - } - default: - // Use input document as unknown if caller has not specified any. - unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} - } - - q := topdown.NewQuery(ectx.compiledQuery.query). - WithQueryCompiler(ectx.compiledQuery.compiler). - WithCompiler(r.compiler). - WithStore(r.store). - WithTransaction(ectx.txn). - WithBuiltins(r.builtinFuncs). - WithMetrics(ectx.metrics). - WithInstrumentation(ectx.instrumentation). - WithUnknowns(unknowns). - WithDisableInlining(ectx.disableInlining). - WithRuntime(r.runtime). - WithIndexing(ectx.indexing). - WithEarlyExit(ectx.earlyExit). - WithPartialNamespace(ectx.partialNamespace). - WithSkipPartialNamespace(r.skipPartialNamespace). - WithShallowInlining(r.shallowInlining). - WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). - WithStrictBuiltinErrors(ectx.strictBuiltinErrors). - WithSeed(ectx.seed). - WithPrintHook(ectx.printHook) - - if !ectx.time.IsZero() { - q = q.WithTime(ectx.time) - } - - if ectx.ndBuiltinCache != nil { - q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) - } - - for i := range ectx.queryTracers { - q = q.WithQueryTracer(ectx.queryTracers[i]) - } - - if ectx.parsedInput != nil { - q = q.WithInput(ast.NewTerm(ectx.parsedInput)) - } - - for i := range ectx.resolvers { - q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) - } - - // Cancel query if context is cancelled or deadline is reached. - c := topdown.NewCancel() - q = q.WithCancel(c) - exit := make(chan struct{}) - defer close(exit) - go waitForDone(ctx, exit, func() { - c.Cancel() - }) - - queries, support, err := q.PartialRun(ctx) - if err != nil { - return nil, err - } - - if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) { - // If the target rego-version in v0, and the rego.v1 import is available, then we attempt to apply it to support modules. - - for i, mod := range support { - if mod.RegoVersion() != ast.RegoV0 { - continue - } - - // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that - // conflict with future keywords. - applyRegoVersion := true - - ast.WalkRules(mod, func(r *ast.Rule) bool { - name := r.Head.Name - if name == "" && len(r.Head.Reference) > 0 { - name = r.Head.Reference[0].Value.(ast.Var) - } - if ast.IsFutureKeyword(name.String()) { - applyRegoVersion = false - return true - } - return false - }) - - if applyRegoVersion { - ast.WalkVars(mod, func(v ast.Var) bool { - if ast.IsFutureKeyword(v.String()) { - applyRegoVersion = false - return true - } - return false - }) - } - - if applyRegoVersion { - support[i].SetRegoVersion(ast.RegoV0CompatV1) - } else { - support[i].SetRegoVersion(r.regoVersion) - } - } - } else { - // If the target rego-version is not v0, then we apply the target rego-version to the support modules. - for i := range support { - support[i].SetRegoVersion(r.regoVersion) - } - } - - pq := &PartialQueries{ - Queries: queries, - Support: support, - } - - return pq, nil -} - -func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - - checkCapture := iteration(query) || len(query) > 1 - - for _, expr := range query { - - if expr.Negated { - continue - } - - if expr.IsAssignment() || expr.IsEquality() { - continue - } - - var capture *ast.Term - - // If the expression can be evaluated as a function, rewrite it to - // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but - // plus(1,2,x) does not get rewritten. - switch terms := expr.Terms.(type) { - case *ast.Term: - capture = r.generateTermVar() - expr.Terms = ast.Equality.Expr(terms, capture).Terms - r.capture[expr] = capture.Value.(ast.Var) - case []*ast.Term: - tpe := r.compiler.TypeEnv.Get(terms[0]) - if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { - capture = r.generateTermVar() - expr.Terms = append(terms, capture) - r.capture[expr] = capture.Value.(ast.Var) - } - } - - if capture != nil && checkCapture { - cpy := expr.Copy() - cpy.Terms = capture - cpy.Generated = true - cpy.With = nil - query.Append(cpy) - } - } - - return query, nil -} - -func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - if len(query) != 1 { - return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)") - } - - term, ok := query[0].Terms.(*ast.Term) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not expression)") - } - - ref, ok := term.Value.(ast.Ref) - if !ok { - return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.TypeName(term.Value)) - } - - if !ref.IsGround() { - return nil, fmt.Errorf("partial evaluation requires ground ref") - } - - return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil -} - -// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally -// this wouldn't be done, except for handling queries with the `Partial` API -// where rewriting them can substantially simplify the result, and it is unlikely -// that the caller would need expression values. -func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { - doubleEq := ast.Equal.Ref() - unifyOp := ast.Equality.Ref() - ast.WalkExprs(query, func(x *ast.Expr) bool { - if x.IsCall() { - operator := x.Operator() - if operator.Equal(doubleEq) && len(x.Operands()) == 2 { - x.SetOperator(ast.NewTerm(unifyOp)) - } - } - return false - }) - return query, nil -} - -func (r *Rego) generateTermVar() *ast.Term { - r.termVarID++ - prefix := ast.WildcardPrefix - if p := r.targetPlugin(r.target); p != nil { - prefix = wasmVarPrefix - } else if r.target == targetWasm { - prefix = wasmVarPrefix - } - return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) -} - -func (r Rego) hasQuery() bool { - return len(r.query) != 0 || len(r.parsedQuery) != 0 -} - -func (r Rego) hasWasmModule() bool { - for _, b := range r.bundles { - if len(b.WasmModules) > 0 { - return true - } - } - return false -} - -type transactionCloser func(ctx context.Context, err error) error - -// getTxn will conditionally create a read or write transaction suitable for -// the configured Rego object. The returned function should be used to close the txn -// regardless of status. -func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { - - noopCloser := func(_ context.Context, _ error) error { - return nil // no-op default - } - - if r.txn != nil { - // Externally provided txn - return r.txn, noopCloser, nil - } - - // Create a new transaction.. - params := storage.TransactionParams{} - - // Bundles and data paths may require writing data files or manifests to storage - if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { - - // If we were given a store we will *not* write to it, only do that on one - // which was created automatically on behalf of the user. - if !r.ownStore { - return nil, noopCloser, errors.New("unable to start write transaction when store was provided") - } - - params.Write = true - } - - txn, err := r.store.NewTransaction(ctx, params) - if err != nil { - return nil, noopCloser, err - } - - // Setup a closer function that will abort or commit as needed. - closer := func(ctx context.Context, txnErr error) error { - var err error - - if txnErr == nil && params.Write { - err = r.store.Commit(ctx, txn) - } else { - r.store.Abort(ctx, txn) - } - - // Clear the auto created transaction now that it is closed. - r.txn = nil - - return err - } - - return txn, closer, nil -} - -func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { - // Update the compiler to have a valid path conflict check - // for the current context and transaction. - return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) -} - -func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { - var stop bool - ast.WalkRefs(body, func(x ast.Ref) bool { - if !stop { - if path.HasPrefix(x) { - stop = true - } - } - return stop - }) - return stop -} - -func isTermVar(v ast.Var) bool { - return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") -} - -func isTermWasmVar(v ast.Var) bool { - return strings.HasPrefix(string(v), wasmVarPrefix+"term") -} - -func waitForDone(ctx context.Context, exit chan struct{}, f func()) { - select { - case <-exit: - return - case <-ctx.Done(): - f() - return - } -} - -type rawModule struct { - filename string - module string -} - -func (m rawModule) Parse() (*ast.Module, error) { - return ast.ParseModule(m.filename, m.module) -} - -func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { - return ast.ParseModuleWithOpts(m.filename, m.module, opts) -} - -type extraStage struct { - after string - stage ast.QueryCompilerStageDefinition -} - -type refResolver struct { - ref ast.Ref - r resolver.Resolver -} - -func iteration(x interface{}) bool { - - var stopped bool - - vis := ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case *ast.Term: - if ast.IsComprehension(x.Value) { - return true - } - case ast.Ref: - if !stopped { - if bi := ast.BuiltinMap[x.String()]; bi != nil { - if bi.Relation { - stopped = true - return stopped - } - } - for i := 1; i < len(x); i++ { - if _, ok := x[i].Value.(ast.Var); ok { - stopped = true - return stopped - } - } - } - return stopped - } - return stopped - }) - - vis.Walk(x) - - return stopped -} - -func parseStringsToRefs(s []string) ([]ast.Ref, error) { - - refs := make([]ast.Ref, len(s)) - for i := range refs { - var err error - refs[i], err = ast.ParseRef(s[i]) - if err != nil { - return nil, err - } - } - - return refs, nil -} - -// helper function to finish a built-in function call. If an error occurred, -// wrap the error and return it. Otherwise, invoke the iterator if the result -// was defined. -func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { - if err != nil { - var e *HaltError - if errors.As(err, &e) { - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, e.Error()), - Location: bctx.Location, - } - return topdown.Halt{Err: tdErr.Wrap(e)} - } - tdErr := &topdown.Error{ - Code: topdown.BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: bctx.Location, - } - return tdErr.Wrap(err) - } - if result == nil { - return nil - } - return iter(result) -} - -// helper function to return an option that sets a custom built-in function. -func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { - return func(r *Rego) { - r.builtinDecls[decl.Name] = &ast.Builtin{ - Name: decl.Name, - Decl: decl.Decl, - Nondeterministic: decl.Nondeterministic, - } - r.builtinFuncs[decl.Name] = &topdown.Builtin{ - Decl: r.builtinDecls[decl.Name], - Func: f, - } - } -} - -func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { - return ast.JSONWithOpt(term.Value, - ast.JSONOpt{ - SortSets: ectx.sortSets, - CopyMaps: ectx.copyMaps, - }) -} - -func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { - modules := make([]*ast.Module, 0, len(r.compiler.Modules)) - for _, module := range r.compiler.Modules { - modules = append(modules, module) - } - - decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) - - for k, v := range ast.BuiltinMap { - decls[k] = v - } - - for k, v := range r.builtinDecls { - decls[k] = v - } - - const queryName = "eval" // NOTE(tsandall): the query name is arbitrary - - p := planner.New(). - WithQueries([]planner.QuerySet{ - { - Name: queryName, - Queries: queries, - RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), - }, - }). - WithModules(modules). - WithBuiltinDecls(decls). - WithDebug(r.dump) - - policy, err := p.Plan() - if err != nil { - return nil, err - } - if r.dump != nil { - fmt.Fprintln(r.dump, "PLAN:") - fmt.Fprintln(r.dump, "-----") - err = ir.Pretty(r.dump, policy) - if err != nil { - return nil, err - } - fmt.Fprintln(r.dump) - } - return policy, nil + return v1.WithBuiltinFuncs(bis) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/rego/resultset.go b/constraint/vendor/github.com/open-policy-agent/opa/rego/resultset.go index e60fa6fbe..5c03360df 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/rego/resultset.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/rego/resultset.go @@ -1,90 +1,22 @@ package rego import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/rego" ) // ResultSet represents a collection of output from Rego evaluation. An empty // result set represents an undefined query. -type ResultSet []Result +type ResultSet = v1.ResultSet // Vars represents a collection of variable bindings. The keys are the variable // names and the values are the binding values. -type Vars map[string]interface{} - -// WithoutWildcards returns a copy of v with wildcard variables removed. -func (v Vars) WithoutWildcards() Vars { - n := Vars{} - for k, v := range v { - if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { - continue - } - n[k] = v - } - return n -} +type Vars = v1.Vars // Result defines the output of Rego evaluation. -type Result struct { - Expressions []*ExpressionValue `json:"expressions"` - Bindings Vars `json:"bindings,omitempty"` -} - -func newResult() Result { - return Result{ - Bindings: Vars{}, - } -} +type Result = v1.Result // Location defines a position in a Rego query or module. -type Location struct { - Row int `json:"row"` - Col int `json:"col"` -} +type Location = v1.Location // ExpressionValue defines the value of an expression in a Rego query. -type ExpressionValue struct { - Value interface{} `json:"value"` - Text string `json:"text"` - Location *Location `json:"location"` -} - -func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue { - result := &ExpressionValue{ - Value: value, - } - if expr.Location != nil { - result.Text = string(expr.Location.Text) - result.Location = &Location{ - Row: expr.Location.Row, - Col: expr.Location.Col, - } - } - return result -} - -func (ev *ExpressionValue) String() string { - return fmt.Sprint(ev.Value) -} - -// Allowed is a helper method that'll return true if all of these conditions hold: -// - the result set only has one element -// - there is only one expression in the result set's only element -// - that expression has the value `true` -// - there are no bindings. -// -// If bindings are present, this will yield `false`: it would be a pitfall to -// return `true` for a query like `data.authz.allow = x`, which always has result -// set element with value true, but could also have a binding `x: false`. -func (rs ResultSet) Allowed() bool { - if len(rs) == 1 && len(rs[0].Bindings) == 0 { - if exprs := rs[0].Expressions; len(exprs) == 1 { - if b, ok := exprs[0].Value.(bool); ok { - return b - } - } - } - return false -} +type ExpressionValue = v1.ExpressionValue diff --git a/constraint/vendor/github.com/open-policy-agent/opa/resolver/interface.go b/constraint/vendor/github.com/open-policy-agent/opa/resolver/interface.go deleted file mode 100644 index fc02329f5..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/resolver/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package resolver - -import ( - "context" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" -) - -// Resolver defines an external value resolver for OPA evaluations. -type Resolver interface { - Eval(context.Context, Input) (Result, error) -} - -// Input as provided to a Resolver instance when evaluating. -type Input struct { - Ref ast.Ref - Input *ast.Term - Metrics metrics.Metrics -} - -// Result of resolving a ref. -type Result struct { - Value ast.Value -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/doc.go index 6fa2f86d9..c33db689e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/doc.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package storage exposes the policy engine's storage layer. +// +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package storage diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/errors.go index 8c789052e..1403b3a98 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/errors.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/errors.go @@ -5,118 +5,69 @@ package storage import ( - "fmt" + v1 "github.com/open-policy-agent/opa/v1/storage" ) const ( // InternalErr indicates an unknown, internal error has occurred. - InternalErr = "storage_internal_error" + InternalErr = v1.InternalErr // NotFoundErr indicates the path used in the storage operation does not // locate a document. - NotFoundErr = "storage_not_found_error" + NotFoundErr = v1.NotFoundErr // WriteConflictErr indicates a write on the path enocuntered a conflicting // value inside the transaction. - WriteConflictErr = "storage_write_conflict_error" + WriteConflictErr = v1.WriteConflictErr // InvalidPatchErr indicates an invalid patch/write was issued. The patch // was rejected. - InvalidPatchErr = "storage_invalid_patch_error" + InvalidPatchErr = v1.InvalidPatchErr // InvalidTransactionErr indicates an invalid operation was performed // inside of the transaction. - InvalidTransactionErr = "storage_invalid_txn_error" + InvalidTransactionErr = v1.InvalidTransactionErr // TriggersNotSupportedErr indicates the caller attempted to register a // trigger against a store that does not support them. - TriggersNotSupportedErr = "storage_triggers_not_supported_error" + TriggersNotSupportedErr = v1.TriggersNotSupportedErr // WritesNotSupportedErr indicate the caller attempted to perform a write // against a store that does not support them. - WritesNotSupportedErr = "storage_writes_not_supported_error" + WritesNotSupportedErr = v1.WritesNotSupportedErr // PolicyNotSupportedErr indicate the caller attempted to perform a policy // management operation against a store that does not support them. - PolicyNotSupportedErr = "storage_policy_not_supported_error" + PolicyNotSupportedErr = v1.PolicyNotSupportedErr ) // Error is the error type returned by the storage layer. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` -} - -func (err *Error) Error() string { - if err.Message != "" { - return fmt.Sprintf("%v: %v", err.Code, err.Message) - } - return err.Code -} +type Error = v1.Error // IsNotFound returns true if this error is a NotFoundErr. func IsNotFound(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == NotFoundErr - } - return false + return v1.IsNotFound(err) } // IsWriteConflictError returns true if this error a WriteConflictErr. func IsWriteConflictError(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == WriteConflictErr - } - return false + return v1.IsWriteConflictError(err) } // IsInvalidPatch returns true if this error is a InvalidPatchErr. func IsInvalidPatch(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidPatchErr - } - return false + return v1.IsInvalidPatch(err) } // IsInvalidTransaction returns true if this error is a InvalidTransactionErr. func IsInvalidTransaction(err error) bool { - switch err := err.(type) { - case *Error: - return err.Code == InvalidTransactionErr - } - return false + return v1.IsInvalidTransaction(err) } // IsIndexingNotSupported is a stub for backwards-compatibility. // // Deprecated: We no longer return IndexingNotSupported errors, so it is // unnecessary to check for them. -func IsIndexingNotSupported(error) bool { return false } - -func writeConflictError(path Path) *Error { - return &Error{ - Code: WriteConflictErr, - Message: path.String(), - } -} - -func triggersNotSupportedError() *Error { - return &Error{ - Code: TriggersNotSupportedErr, - } -} - -func writesNotSupportedError() *Error { - return &Error{ - Code: WritesNotSupportedErr, - } -} - -func policyNotSupportedError() *Error { - return &Error{ - Code: PolicyNotSupportedErr, - } +func IsIndexingNotSupported(err error) bool { + return v1.IsIndexingNotSupported(err) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go new file mode 100644 index 000000000..5f536b66d --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/doc.go @@ -0,0 +1,8 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package inmem diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go index b6433795a..0a41b9d0d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/inmem.go @@ -16,391 +16,41 @@ package inmem import ( - "context" - "fmt" "io" - "path/filepath" - "strings" - "sync" - "sync/atomic" - "github.com/open-policy-agent/opa/internal/merge" "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/storage/inmem" ) // New returns an empty in-memory store. func New() storage.Store { - return NewWithOpts() + return v1.New() } // NewWithOpts returns an empty in-memory store, with extra options passed. func NewWithOpts(opts ...Opt) storage.Store { - s := &store{ - data: map[string]interface{}{}, - triggers: map[*handle]storage.TriggerConfig{}, - policies: map[string][]byte{}, - roundTripOnWrite: true, - } - - for _, opt := range opts { - opt(s) - } - - return s + return v1.NewWithOpts(opts...) } // NewFromObject returns a new in-memory store from the supplied data object. func NewFromObject(data map[string]interface{}) storage.Store { - return NewFromObjectWithOpts(data) + return v1.NewFromObject(data) } -// NewFromObject returns a new in-memory store from the supplied data object, with the +// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the // options passed. func NewFromObjectWithOpts(data map[string]interface{}, opts ...Opt) storage.Store { - db := NewWithOpts(opts...) - ctx := context.Background() - txn, err := db.NewTransaction(ctx, storage.WriteParams) - if err != nil { - panic(err) - } - if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil { - panic(err) - } - if err := db.Commit(ctx, txn); err != nil { - panic(err) - } - return db + return v1.NewFromObjectWithOpts(data, opts...) } // NewFromReader returns a new in-memory store from a reader that produces a // JSON serialized object. This function is for test purposes. func NewFromReader(r io.Reader) storage.Store { - return NewFromReaderWithOpts(r) + return v1.NewFromReader(r) } // NewFromReader returns a new in-memory store from a reader that produces a // JSON serialized object, with extra options. This function is for test purposes. func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store { - d := util.NewJSONDecoder(r) - var data map[string]interface{} - if err := d.Decode(&data); err != nil { - panic(err) - } - return NewFromObjectWithOpts(data, opts...) -} - -type store struct { - rmu sync.RWMutex // reader-writer lock - wmu sync.Mutex // writer lock - xid uint64 // last generated transaction id - data map[string]interface{} // raw data - policies map[string][]byte // raw policies - triggers map[*handle]storage.TriggerConfig // registered triggers - - // roundTripOnWrite, if true, means that every call to Write round trips the - // data through JSON before adding the data to the store. Defaults to true. - roundTripOnWrite bool -} - -type handle struct { - db *store -} - -func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) { - var write bool - var ctx *storage.Context - if len(params) > 0 { - write = params[0].Write - ctx = params[0].Context - } - xid := atomic.AddUint64(&db.xid, uint64(1)) - if write { - db.wmu.Lock() - } else { - db.rmu.RLock() - } - return newTransaction(xid, write, ctx, db), nil -} - -// Truncate implements the storage.Store interface. This method must be called within a transaction. -func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error { - var update *storage.Update - var err error - mergedData := map[string]interface{}{} - - underlying, err := db.underlying(txn) - if err != nil { - return err - } - - for { - update, err = it.Next() - if err != nil { - break - } - - if update.IsPolicy { - err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value) - if err != nil { - return err - } - } else { - var value interface{} - err = util.Unmarshal(update.Value, &value) - if err != nil { - return err - } - - var key []string - dirpath := strings.TrimLeft(update.Path.String(), "/") - if len(dirpath) > 0 { - key = strings.Split(dirpath, "/") - } - - if value != nil { - obj, err := mktree(key, value) - if err != nil { - return err - } - - merged, ok := merge.InterfaceMaps(mergedData, obj) - if !ok { - return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) - } - mergedData = merged - } - } - } - - if err != nil && err != io.EOF { - return err - } - - // For backwards compatibility, check if `RootOverwrite` was configured. - if params.RootOverwrite { - newPath, ok := storage.ParsePathEscaped("/") - if !ok { - return fmt.Errorf("storage path invalid: %v", newPath) - } - return underlying.Write(storage.AddOp, newPath, mergedData) - } - - for _, root := range params.BasePaths { - newPath, ok := storage.ParsePathEscaped("/" + root) - if !ok { - return fmt.Errorf("storage path invalid: %v", newPath) - } - - if value, ok := lookup(newPath, mergedData); ok { - if len(newPath) > 0 { - if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil { - return err - } - } - if err := underlying.Write(storage.AddOp, newPath, value); err != nil { - return err - } - } - } - return nil -} - -func (db *store) Commit(ctx context.Context, txn storage.Transaction) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - if underlying.write { - db.rmu.Lock() - event := underlying.Commit() - db.runOnCommitTriggers(ctx, txn, event) - // Mark the transaction stale after executing triggers, so they can - // perform store operations if needed. - underlying.stale = true - db.rmu.Unlock() - db.wmu.Unlock() - } else { - db.rmu.RUnlock() - } - return nil -} - -func (db *store) Abort(_ context.Context, txn storage.Transaction) { - underlying, err := db.underlying(txn) - if err != nil { - panic(err) - } - underlying.stale = true - if underlying.write { - db.wmu.Unlock() - } else { - db.rmu.RUnlock() - } -} - -func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - return underlying.ListPolicies(), nil -} - -func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - return underlying.GetPolicy(id) -} - -func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - return underlying.UpsertPolicy(id, bs) -} - -func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - if _, err := underlying.GetPolicy(id); err != nil { - return err - } - return underlying.DeletePolicy(id) -} - -func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - if !underlying.write { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "triggers must be registered with a write transaction", - } - } - h := &handle{db} - db.triggers[h] = config - return h, nil -} - -func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) { - underlying, err := db.underlying(txn) - if err != nil { - return nil, err - } - return underlying.Read(path) -} - -func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error { - underlying, err := db.underlying(txn) - if err != nil { - return err - } - val := util.Reference(value) - if db.roundTripOnWrite { - if err := util.RoundTrip(val); err != nil { - return err - } - } - return underlying.Write(op, path, *val) -} - -func (h *handle) Unregister(_ context.Context, txn storage.Transaction) { - underlying, err := h.db.underlying(txn) - if err != nil { - panic(err) - } - if !underlying.write { - panic(&storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "triggers must be unregistered with a write transaction", - }) - } - delete(h.db.triggers, h) -} - -func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { - for _, t := range db.triggers { - t.OnCommit(ctx, txn, event) - } -} - -func (db *store) underlying(txn storage.Transaction) (*transaction, error) { - underlying, ok := txn.(*transaction) - if !ok { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: fmt.Sprintf("unexpected transaction type %T", txn), - } - } - if underlying.db != db { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "unknown transaction", - } - } - if underlying.stale { - return nil, &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "stale transaction", - } - } - return underlying, nil -} - -const rootMustBeObjectMsg = "root must be object" -const rootCannotBeRemovedMsg = "root cannot be removed" - -func invalidPatchError(f string, a ...interface{}) *storage.Error { - return &storage.Error{ - Code: storage.InvalidPatchErr, - Message: fmt.Sprintf(f, a...), - } -} - -func mktree(path []string, value interface{}) (map[string]interface{}, error) { - if len(path) == 0 { - // For 0 length path the value is the full tree. - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, invalidPatchError(rootMustBeObjectMsg) - } - return obj, nil - } - - dir := map[string]interface{}{} - for i := len(path) - 1; i > 0; i-- { - dir[path[i]] = value - value = dir - dir = map[string]interface{}{} - } - dir[path[0]] = value - - return dir, nil -} - -func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { - if len(path) == 0 { - return data, true - } - for i := 0; i < len(path)-1; i++ { - value, ok := data[path[i]] - if !ok { - return nil, false - } - obj, ok := value.(map[string]interface{}) - if !ok { - return nil, false - } - data = obj - } - value, ok := data[path[len(path)-1]] - return value, ok + return v1.NewFromReaderWithOpts(r, opts...) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go index fb8dc8e2b..43f03ef27 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/opts.go @@ -1,7 +1,9 @@ package inmem +import v1 "github.com/open-policy-agent/opa/v1/storage/inmem" + // An Opt modifies store at instantiation. -type Opt func(*store) +type Opt = v1.Opt // OptRoundTripOnWrite sets whether incoming objects written to store are // round-tripped through JSON to ensure they are serializable to JSON. @@ -19,7 +21,15 @@ type Opt func(*store) // and that mutations happening to the objects after they have been passed into // Write() don't affect their logic. func OptRoundTripOnWrite(enabled bool) Opt { - return func(s *store) { - s.roundTripOnWrite = enabled - } + return v1.OptRoundTripOnWrite(enabled) +} + +// OptReturnASTValuesOnRead sets whether data values added to the store should be +// eagerly converted to AST values, which are then returned on read. +// +// When enabled, this feature does not sanity check data before converting it to AST values, +// which may result in panics if the data is not valid. Callers should ensure that passed data +// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite. +func OptReturnASTValuesOnRead(enabled bool) Opt { + return v1.OptReturnASTValuesOnRead(enabled) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go deleted file mode 100644 index 3a6101829..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/inmem/txn.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package inmem - -import ( - "container/list" - "encoding/json" - "strconv" - - "github.com/open-policy-agent/opa/internal/deepcopy" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" - "github.com/open-policy-agent/opa/storage/internal/ptr" -) - -// transaction implements the low-level read/write operations on the in-memory -// store and contains the state required for pending transactions. -// -// For write transactions, the struct contains a logical set of updates -// performed by write operations in the transaction. Each write operation -// compacts the set such that two updates never overlap: -// -// - If new update path is a prefix of existing update path, existing update is -// removed, new update is added. -// -// - If existing update path is a prefix of new update path, existing update is -// modified. -// -// - Otherwise, new update is added. -// -// Read transactions do not require any special handling and simply passthrough -// to the underlying store. Read transactions do not support upgrade. -type transaction struct { - xid uint64 - write bool - stale bool - db *store - updates *list.List - policies map[string]policyUpdate - context *storage.Context -} - -type policyUpdate struct { - value []byte - remove bool -} - -func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction { - return &transaction{ - xid: xid, - write: write, - db: db, - policies: map[string]policyUpdate{}, - updates: list.New(), - context: context, - } -} - -func (txn *transaction) ID() uint64 { - return txn.xid -} - -func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value interface{}) error { - - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "data write during read transaction", - } - } - - if len(path) == 0 { - return txn.updateRoot(op, value) - } - - for curr := txn.updates.Front(); curr != nil; { - update := curr.Value.(*update) - - // Check if new update masks existing update exactly. In this case, the - // existing update can be removed and no other updates have to be - // visited (because no two updates overlap.) - if update.path.Equal(path) { - if update.remove { - if op != storage.AddOp { - return errors.NewNotFoundError(path) - } - } - txn.updates.Remove(curr) - break - } - - // Check if new update masks existing update. In this case, the - // existing update has to be removed but other updates may overlap, so - // we must continue. - if update.path.HasPrefix(path) { - remove := curr - curr = curr.Next() - txn.updates.Remove(remove) - continue - } - - // Check if new update modifies existing update. In this case, the - // existing update is mutated. - if path.HasPrefix(update.path) { - if update.remove { - return errors.NewNotFoundError(path) - } - suffix := path[len(update.path):] - newUpdate, err := newUpdate(update.value, op, suffix, 0, value) - if err != nil { - return err - } - update.value = newUpdate.Apply(update.value) - return nil - } - - curr = curr.Next() - } - - update, err := newUpdate(txn.db.data, op, path, 0, value) - if err != nil { - return err - } - - txn.updates.PushFront(update) - return nil -} - -func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error { - if op == storage.RemoveOp { - return invalidPatchError(rootCannotBeRemovedMsg) - } - if _, ok := value.(map[string]interface{}); !ok { - return invalidPatchError(rootMustBeObjectMsg) - } - txn.updates.Init() - txn.updates.PushFront(&update{ - path: storage.Path{}, - remove: false, - value: value, - }) - return nil -} - -func (txn *transaction) Commit() (result storage.TriggerEvent) { - result.Context = txn.context - for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { - action := curr.Value.(*update) - updated := action.Apply(txn.db.data) - txn.db.data = updated.(map[string]interface{}) - - result.Data = append(result.Data, storage.DataEvent{ - Path: action.path, - Data: action.value, - Removed: action.remove, - }) - } - for id, update := range txn.policies { - if update.remove { - delete(txn.db.policies, id) - } else { - txn.db.policies[id] = update.value - } - - result.Policy = append(result.Policy, storage.PolicyEvent{ - ID: id, - Data: update.value, - Removed: update.remove, - }) - } - return result -} - -func (txn *transaction) Read(path storage.Path) (interface{}, error) { - - if !txn.write { - return ptr.Ptr(txn.db.data, path) - } - - merge := []*update{} - - for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { - - update := curr.Value.(*update) - - if path.HasPrefix(update.path) { - if update.remove { - return nil, errors.NewNotFoundError(path) - } - return ptr.Ptr(update.value, path[len(update.path):]) - } - - if update.path.HasPrefix(path) { - merge = append(merge, update) - } - } - - data, err := ptr.Ptr(txn.db.data, path) - - if err != nil { - return nil, err - } - - if len(merge) == 0 { - return data, nil - } - - cpy := deepcopy.DeepCopy(data) - - for _, update := range merge { - cpy = update.Relative(path).Apply(cpy) - } - - return cpy, nil -} - -func (txn *transaction) ListPolicies() []string { - var ids []string - for id := range txn.db.policies { - if _, ok := txn.policies[id]; !ok { - ids = append(ids, id) - } - } - for id, update := range txn.policies { - if !update.remove { - ids = append(ids, id) - } - } - return ids -} - -func (txn *transaction) GetPolicy(id string) ([]byte, error) { - if update, ok := txn.policies[id]; ok { - if !update.remove { - return update.value, nil - } - return nil, errors.NewNotFoundErrorf("policy id %q", id) - } - if exist, ok := txn.db.policies[id]; ok { - return exist, nil - } - return nil, errors.NewNotFoundErrorf("policy id %q", id) -} - -func (txn *transaction) UpsertPolicy(id string, bs []byte) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } - } - txn.policies[id] = policyUpdate{bs, false} - return nil -} - -func (txn *transaction) DeletePolicy(id string) error { - if !txn.write { - return &storage.Error{ - Code: storage.InvalidTransactionErr, - Message: "policy write during read transaction", - } - } - txn.policies[id] = policyUpdate{nil, true} - return nil -} - -// update contains state associated with an update to be applied to the -// in-memory data store. -type update struct { - path storage.Path // data path modified by update - remove bool // indicates whether update removes the value at path - value interface{} // value to add/replace at path (ignored if remove is true) -} - -func newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) { - - switch data := data.(type) { - case map[string]interface{}: - return newUpdateObject(data, op, path, idx, value) - - case []interface{}: - return newUpdateArray(data, op, path, idx, value) - - case nil, bool, json.Number, string: - return nil, errors.NewNotFoundError(path) - } - - return nil, &storage.Error{ - Code: storage.InternalErr, - Message: "invalid data value encountered", - } -} - -func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) { - - if idx == len(path)-1 { - if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) { - if op != storage.AddOp { - return nil, invalidPatchError("%v: invalid patch path", path) - } - cpy := make([]interface{}, len(data)+1) - copy(cpy, data) - cpy[len(data)] = value - return &update{path[:len(path)-1], false, cpy}, nil - } - - pos, err := ptr.ValidateArrayIndex(data, path[idx], path) - if err != nil { - return nil, err - } - - switch op { - case storage.AddOp: - cpy := make([]interface{}, len(data)+1) - copy(cpy[:pos], data[:pos]) - copy(cpy[pos+1:], data[pos:]) - cpy[pos] = value - return &update{path[:len(path)-1], false, cpy}, nil - - case storage.RemoveOp: - cpy := make([]interface{}, len(data)-1) - copy(cpy[:pos], data[:pos]) - copy(cpy[pos:], data[pos+1:]) - return &update{path[:len(path)-1], false, cpy}, nil - - default: - cpy := make([]interface{}, len(data)) - copy(cpy, data) - cpy[pos] = value - return &update{path[:len(path)-1], false, cpy}, nil - } - } - - pos, err := ptr.ValidateArrayIndex(data, path[idx], path) - if err != nil { - return nil, err - } - - return newUpdate(data[pos], op, path, idx+1, value) -} - -func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (*update, error) { - - if idx == len(path)-1 { - switch op { - case storage.ReplaceOp, storage.RemoveOp: - if _, ok := data[path[idx]]; !ok { - return nil, errors.NewNotFoundError(path) - } - } - return &update{path, op == storage.RemoveOp, value}, nil - } - - if data, ok := data[path[idx]]; ok { - return newUpdate(data, op, path, idx+1, value) - } - - return nil, errors.NewNotFoundError(path) -} -func (u *update) Apply(data interface{}) interface{} { - if len(u.path) == 0 { - return u.value - } - parent, err := ptr.Ptr(data, u.path[:len(u.path)-1]) - if err != nil { - panic(err) - } - key := u.path[len(u.path)-1] - if u.remove { - obj := parent.(map[string]interface{}) - delete(obj, key) - return data - } - switch parent := parent.(type) { - case map[string]interface{}: - if parent == nil { - parent = make(map[string]interface{}, 1) - } - parent[key] = u.value - case []interface{}: - idx, err := strconv.Atoi(key) - if err != nil { - panic(err) - } - parent[idx] = u.value - } - return data -} - -func (u *update) Relative(path storage.Path) *update { - cpy := *u - cpy.path = cpy.path[len(path):] - return &cpy -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/interface.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/interface.go index 6baca9a59..0192c459c 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/interface.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/interface.go @@ -5,243 +5,82 @@ package storage import ( - "context" - - "github.com/open-policy-agent/opa/metrics" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Transaction defines the interface that identifies a consistent snapshot over // the policy engine's storage layer. -type Transaction interface { - ID() uint64 -} +type Transaction = v1.Transaction // Store defines the interface for the storage layer's backend. -type Store interface { - Trigger - Policy - - // NewTransaction is called create a new transaction in the store. - NewTransaction(context.Context, ...TransactionParams) (Transaction, error) - - // Read is called to fetch a document referred to by path. - Read(context.Context, Transaction, Path) (interface{}, error) - - // Write is called to modify a document referred to by path. - Write(context.Context, Transaction, PatchOp, Path, interface{}) error - - // Commit is called to finish the transaction. If Commit returns an error, the - // transaction must be automatically aborted by the Store implementation. - Commit(context.Context, Transaction) error - - // Truncate is called to make a copy of the underlying store, write documents in the new store - // by creating multiple transactions in the new store as needed and finally swapping - // over to the new storage instance. This method must be called within a transaction on the original store. - Truncate(context.Context, Transaction, TransactionParams, Iterator) error - - // Abort is called to cancel the transaction. - Abort(context.Context, Transaction) -} +type Store = v1.Store // MakeDirer defines the interface a Store could realize to override the // generic MakeDir functionality in storage.MakeDir -type MakeDirer interface { - MakeDir(context.Context, Transaction, Path) error -} +type MakeDirer = v1.MakeDirer // TransactionParams describes a new transaction. -type TransactionParams struct { - - // BasePaths indicates the top-level paths where write operations will be performed in this transaction. - BasePaths []string - - // RootOverwrite is deprecated. Use BasePaths instead. - RootOverwrite bool - - // Write indicates if this transaction will perform any write operations. - Write bool - - // Context contains key/value pairs passed to triggers. - Context *Context -} +type TransactionParams = v1.TransactionParams // Context is a simple container for key/value pairs. -type Context struct { - values map[interface{}]interface{} -} +type Context = v1.Context // NewContext returns a new context object. func NewContext() *Context { - return &Context{ - values: map[interface{}]interface{}{}, - } -} - -// Get returns the key value in the context. -func (ctx *Context) Get(key interface{}) interface{} { - if ctx == nil { - return nil - } - return ctx.values[key] -} - -// Put adds a key/value pair to the context. -func (ctx *Context) Put(key, value interface{}) { - ctx.values[key] = value -} - -var metricsKey = struct{}{} - -// WithMetrics allows passing metrics via the Context. -// It puts the metrics object in the ctx, and returns the same -// ctx (not a copy) for convenience. -func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { - ctx.values[metricsKey] = m - return ctx -} - -// Metrics() allows using a Context's metrics. Returns nil if metrics -// were not attached to the Context. -func (ctx *Context) Metrics() metrics.Metrics { - if m, ok := ctx.values[metricsKey]; ok { - if met, ok := m.(metrics.Metrics); ok { - return met - } - } - return nil + return v1.NewContext() } // WriteParams specifies the TransactionParams for a write transaction. -var WriteParams = TransactionParams{ - Write: true, -} +var WriteParams = v1.WriteParams // PatchOp is the enumeration of supposed modifications. -type PatchOp int +type PatchOp = v1.PatchOp // Patch supports add, remove, and replace operations. const ( - AddOp PatchOp = iota - RemoveOp = iota - ReplaceOp = iota + AddOp = v1.AddOp + RemoveOp = v1.RemoveOp + ReplaceOp = v1.ReplaceOp ) // WritesNotSupported provides a default implementation of the write // interface which may be used if the backend does not support writes. -type WritesNotSupported struct{} - -func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error { - return writesNotSupportedError() -} +type WritesNotSupported = v1.WritesNotSupported // Policy defines the interface for policy module storage. -type Policy interface { - ListPolicies(context.Context, Transaction) ([]string, error) - GetPolicy(context.Context, Transaction, string) ([]byte, error) - UpsertPolicy(context.Context, Transaction, string, []byte) error - DeletePolicy(context.Context, Transaction, string) error -} +type Policy = v1.Policy // PolicyNotSupported provides a default implementation of the policy interface // which may be used if the backend does not support policy storage. -type PolicyNotSupported struct{} - -// ListPolicies always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { - return nil, policyNotSupportedError() -} - -// GetPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { - return nil, policyNotSupportedError() -} - -// UpsertPolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { - return policyNotSupportedError() -} - -// DeletePolicy always returns a PolicyNotSupportedErr. -func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { - return policyNotSupportedError() -} +type PolicyNotSupported = v1.PolicyNotSupported // PolicyEvent describes a change to a policy. -type PolicyEvent struct { - ID string - Data []byte - Removed bool -} +type PolicyEvent = v1.PolicyEvent // DataEvent describes a change to a base data document. -type DataEvent struct { - Path Path - Data interface{} - Removed bool -} +type DataEvent = v1.DataEvent // TriggerEvent describes the changes that caused the trigger to be invoked. -type TriggerEvent struct { - Policy []PolicyEvent - Data []DataEvent - Context *Context -} - -// IsZero returns true if the TriggerEvent indicates no changes occurred. This -// function is primarily for test purposes. -func (e TriggerEvent) IsZero() bool { - return !e.PolicyChanged() && !e.DataChanged() -} - -// PolicyChanged returns true if the trigger was caused by a policy change. -func (e TriggerEvent) PolicyChanged() bool { - return len(e.Policy) > 0 -} - -// DataChanged returns true if the trigger was caused by a data change. -func (e TriggerEvent) DataChanged() bool { - return len(e.Data) > 0 -} +type TriggerEvent = v1.TriggerEvent // TriggerConfig contains the trigger registration configuration. -type TriggerConfig struct { - - // OnCommit is invoked when a transaction is successfully committed. The - // callback is invoked with a handle to the write transaction that - // successfully committed before other clients see the changes. - OnCommit func(context.Context, Transaction, TriggerEvent) -} +type TriggerConfig = v1.TriggerConfig // Trigger defines the interface that stores implement to register for change // notifications when the store is changed. -type Trigger interface { - Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) -} +type Trigger = v1.Trigger // TriggersNotSupported provides default implementations of the Trigger // interface which may be used if the backend does not support triggers. -type TriggersNotSupported struct{} - -// Register always returns an error indicating triggers are not supported. -func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { - return nil, triggersNotSupportedError() -} +type TriggersNotSupported = v1.TriggersNotSupported // TriggerHandle defines the interface that can be used to unregister triggers that have // been registered on a Store. -type TriggerHandle interface { - Unregister(context.Context, Transaction) -} +type TriggerHandle = v1.TriggerHandle // Iterator defines the interface that can be used to read files from a directory starting with // files at the base of the directory, then sub-directories etc. -type Iterator interface { - Next() (*Update, error) -} +type Iterator = v1.Iterator // Update contains information about a file -type Update struct { - Path Path - Value []byte - IsPolicy bool -} +type Update = v1.Update diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go deleted file mode 100644 index 0bba74b90..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/errors/errors.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2021 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package errors contains reusable error-related code for the storage layer. -package errors - -import ( - "fmt" - - "github.com/open-policy-agent/opa/storage" -) - -const ArrayIndexTypeMsg = "array index must be integer" -const DoesNotExistMsg = "document does not exist" -const OutOfRangeMsg = "array index out of range" - -func NewNotFoundError(path storage.Path) *storage.Error { - return NewNotFoundErrorWithHint(path, DoesNotExistMsg) -} - -func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { - return NewNotFoundErrorf("%v: %v", path.String(), hint) -} - -func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error { - msg := fmt.Sprintf(f, a...) - return &storage.Error{ - Code: storage.NotFoundErr, - Message: msg, - } -} - -func NewWriteConflictError(p storage.Path) *storage.Error { - return &storage.Error{ - Code: storage.WriteConflictErr, - Message: p.String(), - } -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go deleted file mode 100644 index 56772f797..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/internal/ptr/ptr.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package ptr provides utilities for pointer operations using storage layer paths. -package ptr - -import ( - "strconv" - - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/storage/internal/errors" -) - -func Ptr(data interface{}, path storage.Path) (interface{}, error) { - node := data - for i := range path { - key := path[i] - switch curr := node.(type) { - case map[string]interface{}: - var ok bool - if node, ok = curr[key]; !ok { - return nil, errors.NewNotFoundError(path) - } - case []interface{}: - pos, err := ValidateArrayIndex(curr, key, path) - if err != nil { - return nil, err - } - node = curr[pos] - default: - return nil, errors.NewNotFoundError(path) - } - } - - return node, nil -} - -func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) { - idx, ok := isInt(s) - if !ok { - return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) - } - return inRange(idx, arr, path) -} - -// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an -// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error -// if it is not. -func ValidateArrayIndexForWrite(arr []interface{}, s string, i int, path storage.Path) (int, error) { - idx, ok := isInt(s) - if !ok { - return 0, errors.NewWriteConflictError(path[:i-1]) - } - return inRange(idx, arr, path) -} - -func isInt(s string) (int, bool) { - idx, err := strconv.Atoi(s) - return idx, err == nil -} - -func inRange(i int, arr []interface{}, path storage.Path) (int, error) { - if i < 0 || i >= len(arr) { - return 0, errors.NewNotFoundErrorWithHint(path, errors.OutOfRangeMsg) - } - return i, nil -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/path.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/path.go index 02ef4cab4..91d4f34f2 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/path.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/path.go @@ -5,150 +5,30 @@ package storage import ( - "fmt" - "net/url" - "strconv" - "strings" - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/storage" ) // Path refers to a document in storage. -type Path []string +type Path = v1.Path // ParsePath returns a new path for the given str. func ParsePath(str string) (path Path, ok bool) { - if len(str) == 0 { - return nil, false - } - if str[0] != '/' { - return nil, false - } - if len(str) == 1 { - return Path{}, true - } - parts := strings.Split(str[1:], "/") - return parts, true + return v1.ParsePath(str) } // ParsePathEscaped returns a new path for the given escaped str. func ParsePathEscaped(str string) (path Path, ok bool) { - path, ok = ParsePath(str) - if !ok { - return - } - for i := range path { - segment, err := url.PathUnescape(path[i]) - if err == nil { - path[i] = segment - } - } - return + return v1.ParsePathEscaped(str) } // NewPathForRef returns a new path for the given ref. func NewPathForRef(ref ast.Ref) (path Path, err error) { - - if len(ref) == 0 { - return nil, fmt.Errorf("empty reference (indicates error in caller)") - } - - if len(ref) == 1 { - return Path{}, nil - } - - path = make(Path, 0, len(ref)-1) - - for _, term := range ref[1:] { - switch v := term.Value.(type) { - case ast.String: - path = append(path, string(v)) - case ast.Number: - path = append(path, v.String()) - case ast.Boolean, ast.Null: - return nil, &Error{ - Code: NotFoundErr, - Message: fmt.Sprintf("%v: does not exist", ref), - } - case *ast.Array, ast.Object, ast.Set: - return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) - default: - return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) - } - } - - return path, nil -} - -// Compare performs lexigraphical comparison on p and other and returns -1 if p -// is less than other, 0 if p is equal to other, or 1 if p is greater than -// other. -func (p Path) Compare(other Path) (cmp int) { - min := len(p) - if len(other) < min { - min = len(other) - } - for i := 0; i < min; i++ { - if cmp := strings.Compare(p[i], other[i]); cmp != 0 { - return cmp - } - } - if len(p) < len(other) { - return -1 - } - if len(p) == len(other) { - return 0 - } - return 1 -} - -// Equal returns true if p is the same as other. -func (p Path) Equal(other Path) bool { - return p.Compare(other) == 0 -} - -// HasPrefix returns true if p starts with other. -func (p Path) HasPrefix(other Path) bool { - if len(other) > len(p) { - return false - } - for i := range other { - if p[i] != other[i] { - return false - } - } - return true -} - -// Ref returns a ref that represents p rooted at head. -func (p Path) Ref(head *ast.Term) (ref ast.Ref) { - ref = make(ast.Ref, len(p)+1) - ref[0] = head - for i := range p { - idx, err := strconv.ParseInt(p[i], 10, 64) - if err == nil { - ref[i+1] = ast.UIntNumberTerm(uint64(idx)) - } else { - ref[i+1] = ast.StringTerm(p[i]) - } - } - return ref -} - -func (p Path) String() string { - buf := make([]string, len(p)) - for i := range buf { - buf[i] = url.PathEscape(p[i]) - } - return "/" + strings.Join(buf, "/") + return v1.NewPathForRef(ref) } // MustParsePath returns a new Path for s. If s cannot be parsed, this function // will panic. This is mostly for test purposes. func MustParsePath(s string) Path { - path, ok := ParsePath(s) - if !ok { - panic(s) - } - return path + return v1.MustParsePath(s) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/storage/storage.go b/constraint/vendor/github.com/open-policy-agent/opa/storage/storage.go index 1e290c50b..c02773d98 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/storage/storage.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/storage/storage.go @@ -6,79 +6,35 @@ package storage import ( "context" + + v1 "github.com/open-policy-agent/opa/v1/storage" ) // NewTransactionOrDie is a helper function to create a new transaction. If the // storage layer cannot create a new transaction, this function will panic. This // function should only be used for tests. func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { - txn, err := store.NewTransaction(ctx, params...) - if err != nil { - panic(err) - } - return txn + return v1.NewTransactionOrDie(ctx, store, params...) } // ReadOne is a convenience function to read a single value from the provided Store. It // will create a new Transaction to perform the read with, and clean up after itself // should an error occur. func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) { - txn, err := store.NewTransaction(ctx) - if err != nil { - return nil, err - } - defer store.Abort(ctx, txn) - - return store.Read(ctx, txn, path) + return v1.ReadOne(ctx, store, path) } // WriteOne is a convenience function to write a single value to the provided Store. It // will create a new Transaction to perform the write with, and clean up after itself // should an error occur. func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error { - txn, err := store.NewTransaction(ctx, WriteParams) - if err != nil { - return err - } - - if err := store.Write(ctx, txn, op, path, value); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) + return v1.WriteOne(ctx, store, op, path, value) } // MakeDir inserts an empty object at path. If the parent path does not exist, // MakeDir will create it recursively. func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { - - // Allow the Store implementation to deal with this in its own way. - if md, ok := store.(MakeDirer); ok { - return md.MakeDir(ctx, txn, path) - } - - if len(path) == 0 { - return nil - } - - node, err := store.Read(ctx, txn, path) - if err != nil { - if !IsNotFound(err) { - return err - } - - if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { - return err - } - - return store.Write(ctx, txn, AddOp, path, map[string]interface{}{}) - } - - if _, ok := node.(map[string]interface{}); ok { - return nil - } - return writeConflictError(path) + return v1.MakeDir(ctx, store, txn, path) } // Txn is a convenience function that executes f inside a new transaction @@ -86,41 +42,12 @@ func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error // aborted and the error is returned. Otherwise, the transaction is committed // and the result of the commit is returned. func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { - - txn, err := store.NewTransaction(ctx, params) - if err != nil { - return err - } - - if err := f(txn); err != nil { - store.Abort(ctx, txn) - return err - } - - return store.Commit(ctx, txn) + return v1.Txn(ctx, store, params, f) } // NonEmpty returns a function that tests if a path is non-empty. A // path is non-empty if a Read on the path returns a value or a Read // on any of the path prefixes returns a non-object value. func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { - return func(path []string) (bool, error) { - if _, err := store.Read(ctx, txn, Path(path)); err == nil { - return true, nil - } else if !IsNotFound(err) { - return false, err - } - for i := len(path) - 1; i > 0; i-- { - val, err := store.Read(ctx, txn, Path(path[:i])) - if err != nil && !IsNotFound(err) { - return false, err - } else if err == nil { - if _, ok := val.(map[string]interface{}); ok { - return false, nil - } - return true, nil - } - } - return false, nil - } + return v1.NonEmpty(ctx, store, txn) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins.go index 30c488050..f28c6c795 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins.go @@ -5,218 +5,63 @@ package topdown import ( - "context" - "encoding/binary" - "fmt" - "io" - "math/rand" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) type ( // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error) + FunctionalBuiltin1 = v1.FunctionalBuiltin1 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error) + FunctionalBuiltin2 = v1.FunctionalBuiltin2 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error) + FunctionalBuiltin3 = v1.FunctionalBuiltin3 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. - FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error) + FunctionalBuiltin4 = v1.FunctionalBuiltin4 //nolint:staticcheck // SA1019: Intentional use of deprecated type. // BuiltinContext contains context from the evaluator that may be used by // built-in functions. - BuiltinContext struct { - Context context.Context // request context that was passed when query started - Metrics metrics.Metrics // metrics registry for recording built-in specific metrics - Seed io.Reader // randomization source - Time *ast.Term // wall clock time - Cancel Cancel // atomic value that signals evaluation to halt - Runtime *ast.Term // runtime information on the OPA instance - Cache builtins.Cache // built-in function state cache - InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache - NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state - Location *ast.Location // location of built-in call - Tracers []Tracer // Deprecated: Use QueryTracers instead - QueryTracers []QueryTracer // tracer objects for trace() built-in function - TraceEnabled bool // indicates whether tracing is enabled for the evaluation - QueryID uint64 // identifies query being evaluated - ParentID uint64 // identifies parent of query being evaluated - PrintHook print.Hook // provides callback function to use for printing - DistributedTracingOpts tracing.Options // options to be used by distributed tracing. - rand *rand.Rand // randomization source for non-security-sensitive operations - Capabilities *ast.Capabilities - } + BuiltinContext = v1.BuiltinContext // BuiltinFunc defines an interface for implementing built-in functions. // The built-in function is called with the plugged operands from the call // (including the output operands.) The implementation should evaluate the // operands and invoke the iterator for each successful/defined output // value. - BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error + BuiltinFunc = v1.BuiltinFunc ) -// Rand returns a random number generator based on the Seed for this built-in -// context. The random number will be re-used across multiple calls to this -// function. If a random number generator cannot be created, an error is -// returned. -func (bctx *BuiltinContext) Rand() (*rand.Rand, error) { - - if bctx.rand != nil { - return bctx.rand, nil - } - - seed, err := readInt64(bctx.Seed) - if err != nil { - return nil, err - } - - bctx.rand = rand.New(rand.NewSource(seed)) - return bctx.rand, nil -} - // RegisterBuiltinFunc adds a new built-in function to the evaluation engine. func RegisterBuiltinFunc(name string, f BuiltinFunc) { - builtinFunctions[name] = builtinErrorWrapper(name, f) + v1.RegisterBuiltinFunc(name, f) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) { - builtinFunctions[name] = functionalWrapper1(name, fun) + v1.RegisterFunctionalBuiltin1(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) { - builtinFunctions[name] = functionalWrapper2(name, fun) + v1.RegisterFunctionalBuiltin2(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) { - builtinFunctions[name] = functionalWrapper3(name, fun) + v1.RegisterFunctionalBuiltin3(name, fun) } // Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) { - builtinFunctions[name] = functionalWrapper4(name, fun) + v1.RegisterFunctionalBuiltin4(name, fun) } // GetBuiltin returns a built-in function implementation, nil if no built-in found. func GetBuiltin(name string) BuiltinFunc { - return builtinFunctions[name] + return v1.GetBuiltin(name) } // Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead. -type BuiltinEmpty struct{} - -func (BuiltinEmpty) Error() string { - return "" -} - -var builtinFunctions = map[string]BuiltinFunc{} - -func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - err := fn(bctx, args, iter) - if err == nil { - return nil - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value, args[2].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc { - return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { - result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value) - if err == nil { - return iter(ast.NewTerm(result)) - } - if _, empty := err.(BuiltinEmpty); empty { - return nil - } - return handleBuiltinErr(name, bctx.Location, err) - } -} - -func handleBuiltinErr(name string, loc *ast.Location, err error) error { - switch err := err.(type) { - case BuiltinEmpty: - return nil - case *Error, Halt: - return err - case builtins.ErrOperand: - e := &Error{ - Code: TypeErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: loc, - } - return e.Wrap(err) - default: - e := &Error{ - Code: BuiltinErr, - Message: fmt.Sprintf("%v: %v", name, err.Error()), - Location: loc, - } - return e.Wrap(err) - } -} - -func readInt64(r io.Reader) (int64, error) { - bs := make([]byte, 8) - n, err := io.ReadFull(r, bs) - if n != len(bs) || err != nil { - return 0, err - } - return int64(binary.BigEndian.Uint64(bs)), nil -} - -// Used to get older-style (ast.Term, error) tuples out of newer functions. -func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) { - var result *ast.Term - extractionFn := func(r *ast.Term) error { - result = r - return nil - } - err := fn(BuiltinContext{}, operands, extractionFn) - if err != nil { - return nil, err - } - return result, nil -} +type BuiltinEmpty = v1.Builtin diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go deleted file mode 100644 index 353f95684..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/builtins/builtins.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package builtins contains utilities for implementing built-in functions. -package builtins - -import ( - "encoding/json" - "fmt" - "math/big" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" -) - -// Cache defines the built-in cache used by the top-down evaluation. The keys -// must be comparable and should not be of type string. -type Cache map[interface{}]interface{} - -// Put updates the cache for the named built-in. -func (c Cache) Put(k, v interface{}) { - c[k] = v -} - -// Get returns the cached value for k. -func (c Cache) Get(k interface{}) (interface{}, bool) { - v, ok := c[k] - return v, ok -} - -// We use an ast.Object for the cached keys/values because a naive -// map[ast.Value]ast.Value will not correctly detect value equality of -// the member keys. -type NDBCache map[string]ast.Object - -func (c NDBCache) AsValue() ast.Value { - out := ast.NewObject() - for bname, obj := range c { - out.Insert(ast.StringTerm(bname), ast.NewTerm(obj)) - } - return out -} - -// Put updates the cache for the named built-in. -// Automatically creates the 2-level hierarchy as needed. -func (c NDBCache) Put(name string, k, v ast.Value) { - if _, ok := c[name]; !ok { - c[name] = ast.NewObject() - } - c[name].Insert(ast.NewTerm(k), ast.NewTerm(v)) -} - -// Get returns the cached value for k for the named builtin. -func (c NDBCache) Get(name string, k ast.Value) (ast.Value, bool) { - if m, ok := c[name]; ok { - v := m.Get(ast.NewTerm(k)) - if v != nil { - return v.Value, true - } - return nil, false - } - return nil, false -} - -// Convenience functions for serializing the data structure. -func (c NDBCache) MarshalJSON() ([]byte, error) { - v, err := ast.JSON(c.AsValue()) - if err != nil { - return nil, err - } - return json.Marshal(v) -} - -func (c *NDBCache) UnmarshalJSON(data []byte) error { - out := map[string]ast.Object{} - var incoming interface{} - - // Note: We use util.Unmarshal instead of json.Unmarshal to get - // correct deserialization of number types. - err := util.Unmarshal(data, &incoming) - if err != nil { - return err - } - - // Convert interface types back into ast.Value types. - nestedObject, err := ast.InterfaceToValue(incoming) - if err != nil { - return err - } - - // Reconstruct NDBCache from nested ast.Object structure. - if source, ok := nestedObject.(ast.Object); ok { - err = source.Iter(func(k, v *ast.Term) error { - if obj, ok := v.Value.(ast.Object); ok { - out[string(k.Value.(ast.String))] = obj - return nil - } - return fmt.Errorf("expected Object, got other Value type in conversion") - }) - if err != nil { - return err - } - } - - *c = out - - return nil -} - -// ErrOperand represents an invalid operand has been passed to a built-in -// function. Built-ins should return ErrOperand to indicate a type error has -// occurred. -type ErrOperand string - -func (err ErrOperand) Error() string { - return string(err) -} - -// NewOperandErr returns a generic operand error. -func NewOperandErr(pos int, f string, a ...interface{}) error { - f = fmt.Sprintf("operand %v ", pos) + f - return ErrOperand(fmt.Sprintf(f, a...)) -} - -// NewOperandTypeErr returns an operand error indicating the operand's type was wrong. -func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error { - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.TypeName(got)) - } - - return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.TypeName(got)) -} - -// NewOperandElementErr returns an operand error indicating an element in the -// composite operand was wrong. -func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error { - - tpe := ast.TypeName(composite) - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.TypeName(got)) - } - - return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.TypeName(got)) -} - -// NewOperandEnumErr returns an operand error indicating a value was wrong. -func NewOperandEnumErr(pos int, expected ...string) error { - - if len(expected) == 1 { - return NewOperandErr(pos, "must be %v", expected[0]) - } - - return NewOperandErr(pos, "must be one of {%v}", strings.Join(expected, ", ")) -} - -// IntOperand converts x to an int. If the cast fails, a descriptive error is -// returned. -func IntOperand(x ast.Value, pos int) (int, error) { - n, ok := x.(ast.Number) - if !ok { - return 0, NewOperandTypeErr(pos, x, "number") - } - - i, ok := n.Int() - if !ok { - return 0, NewOperandErr(pos, "must be integer number but got floating-point number") - } - - return i, nil -} - -// BigIntOperand converts x to a big int. If the cast fails, a descriptive error -// is returned. -func BigIntOperand(x ast.Value, pos int) (*big.Int, error) { - n, err := NumberOperand(x, 1) - if err != nil { - return nil, NewOperandTypeErr(pos, x, "integer") - } - bi, err := NumberToInt(n) - if err != nil { - return nil, NewOperandErr(pos, "must be integer number but got floating-point number") - } - - return bi, nil -} - -// NumberOperand converts x to a number. If the cast fails, a descriptive error is -// returned. -func NumberOperand(x ast.Value, pos int) (ast.Number, error) { - n, ok := x.(ast.Number) - if !ok { - return ast.Number(""), NewOperandTypeErr(pos, x, "number") - } - return n, nil -} - -// SetOperand converts x to a set. If the cast fails, a descriptive error is -// returned. -func SetOperand(x ast.Value, pos int) (ast.Set, error) { - s, ok := x.(ast.Set) - if !ok { - return nil, NewOperandTypeErr(pos, x, "set") - } - return s, nil -} - -// StringOperand converts x to a string. If the cast fails, a descriptive error is -// returned. -func StringOperand(x ast.Value, pos int) (ast.String, error) { - s, ok := x.(ast.String) - if !ok { - return ast.String(""), NewOperandTypeErr(pos, x, "string") - } - return s, nil -} - -// ObjectOperand converts x to an object. If the cast fails, a descriptive -// error is returned. -func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { - o, ok := x.(ast.Object) - if !ok { - return nil, NewOperandTypeErr(pos, x, "object") - } - return o, nil -} - -// ArrayOperand converts x to an array. If the cast fails, a descriptive -// error is returned. -func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { - a, ok := x.(*ast.Array) - if !ok { - return ast.NewArray(), NewOperandTypeErr(pos, x, "array") - } - return a, nil -} - -// NumberToFloat converts n to a big float. -func NumberToFloat(n ast.Number) *big.Float { - r, ok := new(big.Float).SetString(string(n)) - if !ok { - panic("illegal value") - } - return r -} - -// FloatToNumber converts f to a number. -func FloatToNumber(f *big.Float) ast.Number { - var format byte = 'g' - if f.IsInt() { - format = 'f' - } - return ast.Number(f.Text(format, -1)) -} - -// NumberToInt converts n to a big int. -// If n cannot be converted to an big int, an error is returned. -func NumberToInt(n ast.Number) (*big.Int, error) { - f := NumberToFloat(n) - r, accuracy := f.Int(nil) - if accuracy != big.Exact { - return nil, fmt.Errorf("illegal value") - } - return r, nil -} - -// IntToNumber converts i to a number. -func IntToNumber(i *big.Int) ast.Number { - return ast.Number(i.String()) -} - -// StringSliceOperand converts x to a []string. If the cast fails, a descriptive error is -// returned. -func StringSliceOperand(a ast.Value, pos int) ([]string, error) { - type iterable interface { - Iter(func(*ast.Term) error) error - Len() int - } - - strs, ok := a.(iterable) - if !ok { - return nil, NewOperandTypeErr(pos, a, "array", "set") - } - - var outStrs = make([]string, 0, strs.Len()) - if err := strs.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return NewOperandElementErr(pos, a, x.Value, "string") - } - outStrs = append(outStrs, string(s)) - return nil - }); err != nil { - return nil, err - } - - return outStrs, nil -} - -// RuneSliceOperand converts x to a []rune. If the cast fails, a descriptive error is -// returned. -func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) { - a, err := ArrayOperand(x, pos) - if err != nil { - return nil, err - } - - var f = make([]rune, a.Len()) - for k := 0; k < a.Len(); k++ { - b := a.Elem(k) - c, ok := b.Value.(ast.String) - if !ok { - return nil, NewOperandElementErr(pos, x, b.Value, "string") - } - - d := []rune(string(c)) - if len(d) != 1 { - return nil, NewOperandElementErr(pos, x, b.Value, "rune") - } - - f[k] = d[0] - } - - return f, nil -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache.go index 265457e02..bb39df03e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache.go @@ -5,348 +5,15 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // VirtualCache defines the interface for a cache that stores the results of // evaluated virtual documents (rules). // The cache is a stack of frames, where each frame is a mapping from references // to values. -type VirtualCache interface { - // Push pushes a new, empty frame of value mappings onto the stack. - Push() - - // Pop pops the top frame of value mappings from the stack, removing all associated entries. - Pop() - - // Get returns the value associated with the given reference. The second return value - // indicates whether the reference has a recorded 'undefined' result. - Get(ref ast.Ref) (*ast.Term, bool) - - // Put associates the given reference with the given value. If the value is nil, the reference - // is marked as having an 'undefined' result. - Put(ref ast.Ref, value *ast.Term) - - // Keys returns the set of keys that have been cached for the active frame. - Keys() []ast.Ref -} - -type virtualCache struct { - stack []*virtualCacheElem -} - -type virtualCacheElem struct { - value *ast.Term - children *util.HashMap - undefined bool -} +type VirtualCache = v1.VirtualCache func NewVirtualCache() VirtualCache { - cache := &virtualCache{} - cache.Push() - return cache -} - -func (c *virtualCache) Push() { - c.stack = append(c.stack, newVirtualCacheElem()) -} - -func (c *virtualCache) Pop() { - c.stack = c.stack[:len(c.stack)-1] -} - -// Returns the resolved value of the AST term and a flag indicating if the value -// should be interpretted as undefined: -// -// nil, true indicates the ref is undefined -// ast.Term, false indicates the ref is defined -// nil, false indicates the ref has not been cached -// ast.Term, true is impossible -func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { - node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { - x, ok := node.children.Get(ref[i]) - if !ok { - return nil, false - } - node = x.(*virtualCacheElem) - } - if node.undefined { - return nil, true - } - - return node.value, false -} - -// If value is a nil pointer, set the 'undefined' flag on the cache element to -// indicate that the Ref has resolved to undefined. -func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { - node := c.stack[len(c.stack)-1] - for i := 0; i < len(ref); i++ { - x, ok := node.children.Get(ref[i]) - if ok { - node = x.(*virtualCacheElem) - } else { - next := newVirtualCacheElem() - node.children.Put(ref[i], next) - node = next - } - } - if value != nil { - node.value = value - } else { - node.undefined = true - } -} - -func (c *virtualCache) Keys() []ast.Ref { - node := c.stack[len(c.stack)-1] - return keysRecursive(nil, node) -} - -func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { - var keys []ast.Ref - node.children.Iter(func(k, v util.T) bool { - ref := root.Append(k.(*ast.Term)) - if v.(*virtualCacheElem).value != nil { - keys = append(keys, ref) - } - if v.(*virtualCacheElem).children.Len() > 0 { - keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...) - } - return false - }) - return keys -} - -func newVirtualCacheElem() *virtualCacheElem { - return &virtualCacheElem{children: newVirtualCacheHashMap()} -} - -func newVirtualCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) -} - -// baseCache implements a trie structure to cache base documents read out of -// storage. Values inserted into the cache may contain other values that were -// previously inserted. In this case, the previous values are erased from the -// structure. -type baseCache struct { - root *baseCacheElem -} - -func newBaseCache() *baseCache { - return &baseCache{ - root: newBaseCacheElem(), - } -} - -func (c *baseCache) Get(ref ast.Ref) ast.Value { - node := c.root - for i := 0; i < len(ref); i++ { - node = node.children[ref[i].Value] - if node == nil { - return nil - } else if node.value != nil { - result, err := node.value.Find(ref[i+1:]) - if err != nil { - return nil - } - return result - } - } - return nil -} - -func (c *baseCache) Put(ref ast.Ref, value ast.Value) { - node := c.root - for i := 0; i < len(ref); i++ { - if child, ok := node.children[ref[i].Value]; ok { - node = child - } else { - child := newBaseCacheElem() - node.children[ref[i].Value] = child - node = child - } - } - node.set(value) -} - -type baseCacheElem struct { - value ast.Value - children map[ast.Value]*baseCacheElem -} - -func newBaseCacheElem() *baseCacheElem { - return &baseCacheElem{ - children: map[ast.Value]*baseCacheElem{}, - } -} - -func (e *baseCacheElem) set(value ast.Value) { - e.value = value - e.children = map[ast.Value]*baseCacheElem{} -} - -type refStack struct { - sl []refStackElem -} - -type refStackElem struct { - refs []ast.Ref -} - -func newRefStack() *refStack { - return &refStack{} -} - -func (s *refStack) Push(refs []ast.Ref) { - s.sl = append(s.sl, refStackElem{refs: refs}) -} - -func (s *refStack) Pop() { - s.sl = s.sl[:len(s.sl)-1] -} - -func (s *refStack) Prefixed(ref ast.Ref) bool { - if s != nil { - for i := len(s.sl) - 1; i >= 0; i-- { - for j := range s.sl[i].refs { - if ref.HasPrefix(s.sl[i].refs[j]) { - return true - } - } - } - } - return false -} - -type comprehensionCache struct { - stack []map[*ast.Term]*comprehensionCacheElem -} - -type comprehensionCacheElem struct { - value *ast.Term - children *util.HashMap -} - -func newComprehensionCache() *comprehensionCache { - cache := &comprehensionCache{} - cache.Push() - return cache -} - -func (c *comprehensionCache) Push() { - c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{}) -} - -func (c *comprehensionCache) Pop() { - c.stack = c.stack[:len(c.stack)-1] -} - -func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) { - elem, ok := c.stack[len(c.stack)-1][t] - return elem, ok -} - -func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) { - c.stack[len(c.stack)-1][t] = elem -} - -func newComprehensionCacheElem() *comprehensionCacheElem { - return &comprehensionCacheElem{children: newComprehensionCacheHashMap()} -} - -func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term { - node := c - for i := 0; i < len(key); i++ { - x, ok := node.children.Get(key[i]) - if !ok { - return nil - } - node = x.(*comprehensionCacheElem) - } - return node.value -} - -func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { - node := c - for i := 0; i < len(key); i++ { - x, ok := node.children.Get(key[i]) - if ok { - node = x.(*comprehensionCacheElem) - } else { - next := newComprehensionCacheElem() - node.children.Put(key[i], next) - node = next - } - } - node.value = value -} - -func newComprehensionCacheHashMap() *util.HashMap { - return util.NewHashMap(func(a, b util.T) bool { - return a.(*ast.Term).Equal(b.(*ast.Term)) - }, func(x util.T) int { - return x.(*ast.Term).Hash() - }) -} - -type functionMocksStack struct { - stack []*functionMocksElem -} - -type functionMocksElem []frame - -type frame map[string]*ast.Term - -func newFunctionMocksStack() *functionMocksStack { - stack := &functionMocksStack{} - stack.Push() - return stack -} - -func newFunctionMocksElem() *functionMocksElem { - return &functionMocksElem{} -} - -func (s *functionMocksStack) Push() { - s.stack = append(s.stack, newFunctionMocksElem()) -} - -func (s *functionMocksStack) Pop() { - s.stack = s.stack[:len(s.stack)-1] -} - -func (s *functionMocksStack) PopPairs() { - current := s.stack[len(s.stack)-1] - *current = (*current)[:len(*current)-1] -} - -func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) { - el := frame{} - for i := range mocks { - el[mocks[i][0].Value.String()] = mocks[i][1] - } - s.Put(el) -} - -func (s *functionMocksStack) Put(el frame) { - current := s.stack[len(s.stack)-1] - *current = append(*current, el) -} - -func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) { - current := *s.stack[len(s.stack)-1] - for i := len(current) - 1; i >= 0; i-- { - if r, ok := current[i][f.String()]; ok { - return r, true - } - } - return nil, false + return v1.NewVirtualCache() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go deleted file mode 100644 index c83c9828b..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cache/cache.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -// Package cache defines the inter-query cache interface that can cache data across queries -package cache - -import ( - "container/list" - "context" - "fmt" - "math" - "sync" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" -) - -const ( - defaultMaxSizeBytes = int64(0) // unlimited - defaultForcedEvictionThresholdPercentage = int64(100) // trigger at max_size_bytes - defaultStaleEntryEvictionPeriodSeconds = int64(0) // never -) - -// Config represents the configuration of the inter-query cache. -type Config struct { - InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"` -} - -// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize. -// MaxSizeBytes - max capacity of cache in bytes -// ForcedEvictionThresholdPercentage - capacity usage in percentage after which forced FIFO eviction starts -// StaleEntryEvictionPeriodSeconds - time period between end of previous and start of new stale entry eviction routine -type InterQueryBuiltinCacheConfig struct { - MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"` - ForcedEvictionThresholdPercentage *int64 `json:"forced_eviction_threshold_percentage,omitempty"` - StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"` -} - -// ParseCachingConfig returns the config for the inter-query cache. -func ParseCachingConfig(raw []byte) (*Config, error) { - if raw == nil { - maxSize := new(int64) - *maxSize = defaultMaxSizeBytes - threshold := new(int64) - *threshold = defaultForcedEvictionThresholdPercentage - period := new(int64) - *period = defaultStaleEntryEvictionPeriodSeconds - return &Config{InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{MaxSizeBytes: maxSize, ForcedEvictionThresholdPercentage: threshold, StaleEntryEvictionPeriodSeconds: period}}, nil - } - - var config Config - - if err := util.Unmarshal(raw, &config); err == nil { - if err = config.validateAndInjectDefaults(); err != nil { - return nil, err - } - } else { - return nil, err - } - - return &config, nil -} - -func (c *Config) validateAndInjectDefaults() error { - if c.InterQueryBuiltinCache.MaxSizeBytes == nil { - maxSize := new(int64) - *maxSize = defaultMaxSizeBytes - c.InterQueryBuiltinCache.MaxSizeBytes = maxSize - } - if c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage == nil { - threshold := new(int64) - *threshold = defaultForcedEvictionThresholdPercentage - c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage = threshold - } else { - threshold := *c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage - if threshold < 0 || threshold > 100 { - return fmt.Errorf("invalid forced_eviction_threshold_percentage %v", threshold) - } - } - if c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds == nil { - period := new(int64) - *period = defaultStaleEntryEvictionPeriodSeconds - c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds = period - } else { - period := *c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds - if period < 0 { - return fmt.Errorf("invalid stale_entry_eviction_period_seconds %v", period) - } - } - return nil -} - -// InterQueryCacheValue defines the interface for the data that the inter-query cache holds. -type InterQueryCacheValue interface { - SizeInBytes() int64 - Clone() (InterQueryCacheValue, error) -} - -// InterQueryCache defines the interface for the inter-query cache. -type InterQueryCache interface { - Get(key ast.Value) (value InterQueryCacheValue, found bool) - Insert(key ast.Value, value InterQueryCacheValue) int - InsertWithExpiry(key ast.Value, value InterQueryCacheValue, expiresAt time.Time) int - Delete(key ast.Value) - UpdateConfig(config *Config) - Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) -} - -// NewInterQueryCache returns a new inter-query cache. -// The cache uses a FIFO eviction policy when it reaches the forced eviction threshold. -// Parameters: -// -// config - to configure the InterQueryCache -func NewInterQueryCache(config *Config) InterQueryCache { - return newCache(config) -} - -// NewInterQueryCacheWithContext returns a new inter-query cache with context. -// The cache uses a combination of FIFO eviction policy when it reaches the forced eviction threshold -// and a periodic cleanup routine to remove stale entries that exceed their expiration time, if specified. -// If configured with a zero stale_entry_eviction_period_seconds value, the stale entry cleanup routine is disabled. -// -// Parameters: -// -// ctx - used to control lifecycle of the stale entry cleanup routine -// config - to configure the InterQueryCache -func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache { - iqCache := newCache(config) - if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 { - cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) - go func() { - for { - select { - case <-cleanupTicker.C: - cleanupTicker.Stop() - iqCache.cleanStaleValues() - cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) - case <-ctx.Done(): - cleanupTicker.Stop() - return - } - } - }() - } - - return iqCache -} - -type cacheItem struct { - value InterQueryCacheValue - expiresAt time.Time - keyElement *list.Element -} - -type cache struct { - items map[string]cacheItem - usage int64 - config *Config - l *list.List - mtx sync.Mutex -} - -func newCache(config *Config) *cache { - return &cache{ - items: map[string]cacheItem{}, - usage: 0, - config: config, - l: list.New(), - } -} - -// InsertWithExpiry inserts a key k into the cache with value v with an expiration time expiresAt. -// A zero time value for expiresAt indicates no expiry -func (c *cache) InsertWithExpiry(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { - c.mtx.Lock() - defer c.mtx.Unlock() - return c.unsafeInsert(k, v, expiresAt) -} - -// Insert inserts a key k into the cache with value v with no expiration time. -func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) { - return c.InsertWithExpiry(k, v, time.Time{}) -} - -// Get returns the value in the cache for k. -func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) { - c.mtx.Lock() - defer c.mtx.Unlock() - cacheItem, ok := c.unsafeGet(k) - - if ok { - return cacheItem.value, true - } - return nil, false -} - -// Delete deletes the value in the cache for k. -func (c *cache) Delete(k ast.Value) { - c.mtx.Lock() - defer c.mtx.Unlock() - c.unsafeDelete(k) -} - -func (c *cache) UpdateConfig(config *Config) { - if config == nil { - return - } - c.mtx.Lock() - defer c.mtx.Unlock() - c.config = config -} - -func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) { - c.mtx.Lock() - defer c.mtx.Unlock() - return c.unsafeClone(value) -} - -func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { - size := v.SizeInBytes() - limit := int64(math.Ceil(float64(c.forcedEvictionThresholdPercentage())/100.0) * (float64(c.maxSizeBytes()))) - if limit > 0 { - if size > limit { - dropped++ - return dropped - } - - for key := c.l.Front(); key != nil && (c.usage+size > limit); key = c.l.Front() { - dropKey := key.Value.(ast.Value) - c.unsafeDelete(dropKey) - dropped++ - } - } - - // By deleting the old value, if it exists, we ensure the usage variable stays correct - c.unsafeDelete(k) - - c.items[k.String()] = cacheItem{ - value: v, - expiresAt: expiresAt, - keyElement: c.l.PushBack(k), - } - c.usage += size - return dropped -} - -func (c *cache) unsafeGet(k ast.Value) (cacheItem, bool) { - value, ok := c.items[k.String()] - return value, ok -} - -func (c *cache) unsafeDelete(k ast.Value) { - cacheItem, ok := c.unsafeGet(k) - if !ok { - return - } - - c.usage -= cacheItem.value.SizeInBytes() - delete(c.items, k.String()) - c.l.Remove(cacheItem.keyElement) -} - -func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { - return value.Clone() -} - -func (c *cache) maxSizeBytes() int64 { - if c.config == nil { - return defaultMaxSizeBytes - } - return *c.config.InterQueryBuiltinCache.MaxSizeBytes -} - -func (c *cache) forcedEvictionThresholdPercentage() int64 { - if c.config == nil { - return defaultForcedEvictionThresholdPercentage - } - return *c.config.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage -} - -func (c *cache) staleEntryEvictionTimePeriodSeconds() int64 { - if c.config == nil { - return defaultStaleEntryEvictionPeriodSeconds - } - return *c.config.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds -} - -func (c *cache) cleanStaleValues() (dropped int) { - c.mtx.Lock() - defer c.mtx.Unlock() - for key := c.l.Front(); key != nil; { - nextKey := key.Next() - // if expiresAt is zero, the item doesn't have an expiry - if ea := c.items[(key.Value.(ast.Value)).String()].expiresAt; !ea.IsZero() && ea.Before(time.Now()) { - c.unsafeDelete(key.Value.(ast.Value)) - dropped++ - } - key = nextKey - } - return dropped -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cancel.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/cancel.go index 534e0799a..395a14a80 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cancel.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/cancel.go @@ -5,29 +5,14 @@ package topdown import ( - "sync/atomic" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Cancel defines the interface for cancelling topdown queries. Cancel // operations are thread-safe and idempotent. -type Cancel interface { - Cancel() - Cancelled() bool -} - -type cancel struct { - flag int32 -} +type Cancel = v1.Cancel // NewCancel returns a new Cancel object. func NewCancel() Cancel { - return &cancel{} -} - -func (c *cancel) Cancel() { - atomic.StoreInt32(&c.flag, 1) -} - -func (c *cancel) Cancelled() bool { - return atomic.LoadInt32(&c.flag) != 0 + return v1.NewCancel() } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/doc.go index 9aa7aa45c..a303ef788 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/doc.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/doc.go @@ -7,4 +7,8 @@ // The topdown implementation is a modified version of the standard top-down // evaluation algorithm used in Datalog. References and comprehensions are // evaluated eagerly while all other terms are evaluated lazily. +// +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. package topdown diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/errors.go index 918df6c85..47853ec6d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/errors.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/errors.go @@ -5,145 +5,50 @@ package topdown import ( - "errors" - "fmt" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Halt is a special error type that built-in function implementations return to indicate // that policy evaluation should stop immediately. -type Halt struct { - Err error -} - -func (h Halt) Error() string { - return h.Err.Error() -} - -func (h Halt) Unwrap() error { return h.Err } +type Halt = v1.Halt // Error is the error type returned by the Eval and Query functions when // an evaluation error occurs. -type Error struct { - Code string `json:"code"` - Message string `json:"message"` - Location *ast.Location `json:"location,omitempty"` - err error `json:"-"` -} +type Error = v1.Error const ( // InternalErr represents an unknown evaluation error. - InternalErr string = "eval_internal_error" + InternalErr = v1.InternalErr // CancelErr indicates the evaluation process was cancelled. - CancelErr string = "eval_cancel_error" + CancelErr = v1.CancelErr // ConflictErr indicates a conflict was encountered during evaluation. For // instance, a conflict occurs if a rule produces multiple, differing values // for the same key in an object. Conflict errors indicate the policy does // not account for the data loaded into the policy engine. - ConflictErr string = "eval_conflict_error" + ConflictErr = v1.ConflictErr // TypeErr indicates evaluation stopped because an expression was applied to // a value of an inappropriate type. - TypeErr string = "eval_type_error" + TypeErr = v1.TypeErr // BuiltinErr indicates a built-in function received a semantically invalid // input or encountered some kind of runtime error, e.g., connection // timeout, connection refused, etc. - BuiltinErr string = "eval_builtin_error" + BuiltinErr = v1.BuiltinErr // WithMergeErr indicates that the real and replacement data could not be merged. - WithMergeErr string = "eval_with_merge_error" + WithMergeErr = v1.WithMergeErr ) // IsError returns true if the err is an Error. func IsError(err error) bool { - var e *Error - return errors.As(err, &e) + return v1.IsError(err) } // IsCancel returns true if err was caused by cancellation. func IsCancel(err error) bool { - return errors.Is(err, &Error{Code: CancelErr}) -} - -// Is allows matching topdown errors using errors.Is (see IsCancel). -func (e *Error) Is(target error) bool { - var t *Error - if errors.As(target, &t) { - return (t.Code == "" || e.Code == t.Code) && - (t.Message == "" || e.Message == t.Message) && - (t.Location == nil || t.Location.Compare(e.Location) == 0) - } - return false -} - -func (e *Error) Error() string { - msg := fmt.Sprintf("%v: %v", e.Code, e.Message) - - if e.Location != nil { - msg = e.Location.String() + ": " + msg - } - - return msg -} - -func (e *Error) Wrap(err error) *Error { - e.err = err - return e -} - -func (e *Error) Unwrap() error { - return e.err -} - -func functionConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "functions must not produce multiple outputs for same inputs", - } -} - -func completeDocConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "complete rules must not produce multiple outputs", - } -} - -func objectDocKeyConflictErr(loc *ast.Location) error { - return &Error{ - Code: ConflictErr, - Location: loc, - Message: "object keys must be unique", - } -} - -func unsupportedBuiltinErr(loc *ast.Location) error { - return &Error{ - Code: InternalErr, - Location: loc, - Message: "unsupported built-in", - } -} - -func mergeConflictErr(loc *ast.Location) error { - return &Error{ - Code: WithMergeErr, - Location: loc, - Message: "real and replacement data could not be merged", - } -} - -func internalErr(loc *ast.Location, msg string) error { - return &Error{ - Code: InternalErr, - Location: loc, - Message: msg, - } + return v1.IsCancel(err) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/glob.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/glob.go deleted file mode 100644 index 116602db7..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/glob.go +++ /dev/null @@ -1,95 +0,0 @@ -package topdown - -import ( - "strings" - "sync" - - "github.com/gobwas/glob" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const globCacheMaxSize = 100 - -var globCacheLock = sync.Mutex{} -var globCache map[string]glob.Glob - -func builtinGlobMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - pattern, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - var delimiters []rune - switch operands[1].Value.(type) { - case ast.Null: - delimiters = []rune{} - case *ast.Array: - delimiters, err = builtins.RuneSliceOperand(operands[1].Value, 2) - if err != nil { - return err - } - if len(delimiters) == 0 { - delimiters = []rune{'.'} - } - default: - return builtins.NewOperandTypeErr(2, operands[1].Value, "array", "null") - } - - match, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - - builder := strings.Builder{} - builder.WriteString(string(pattern)) - builder.WriteRune('-') - for _, v := range delimiters { - builder.WriteRune(v) - } - id := builder.String() - - m, err := globCompileAndMatch(id, string(pattern), string(match), delimiters) - if err != nil { - return err - } - return iter(ast.BooleanTerm(m)) -} - -func globCompileAndMatch(id, pattern, match string, delimiters []rune) (bool, error) { - globCacheLock.Lock() - defer globCacheLock.Unlock() - p, ok := globCache[id] - if !ok { - var err error - if p, err = glob.Compile(pattern, delimiters...); err != nil { - return false, err - } - if len(globCache) >= globCacheMaxSize { - // Delete a (semi-)random key to make room for the new one. - for k := range globCache { - delete(globCache, k) - break - } - } - globCache[id] = p - } - out := p.Match(match) - return out, nil -} - -func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - pattern, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(glob.QuoteMeta(string(pattern)))) -} - -func init() { - globCache = map[string]glob.Glob{} - RegisterBuiltinFunc(ast.GlobMatch.Name, builtinGlobMatch) - RegisterBuiltinFunc(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/graphql.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/graphql.go index 8fb1b58a7..0d6ebda0a 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/graphql.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/graphql.go @@ -16,8 +16,8 @@ import ( // Side-effecting import. Triggers GraphQL library's validation rule init() functions. _ "github.com/open-policy-agent/opa/internal/gqlparser/validator/rules" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Parses a GraphQL schema, and returns the GraphQL AST for the schema. @@ -295,7 +295,7 @@ func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter f var err error unverified := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewObject()), ast.NewTerm(ast.NewObject()), ) @@ -353,7 +353,7 @@ func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter f // Construct return value. verified := ast.ArrayTerm( - ast.BooleanTerm(true), + ast.InternedBooleanTerm(true), ast.NewTerm(queryResult), ast.NewTerm(querySchema), ) @@ -421,10 +421,10 @@ func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*as queryDoc, err = objectToQueryDocument(x) default: // Error if wrong type. - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } switch x := operands[1].Value.(type) { @@ -434,23 +434,23 @@ func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*as schemaDoc, err = objectToSchemaDocument(x) default: // Error if wrong type. - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } // Validate the query against the schema, erroring if there's an issue. schema, err := convertSchema(schemaDoc) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if err := validateQuery(schema, queryDoc); err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } // If we got this far, the GraphQL query passed validation. - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -464,15 +464,15 @@ func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter fu schemaDoc, err = objectToSchemaDocument(x) default: // Error if wrong type. - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } // Validate the schema, this determines the result _, err = convertSchema(schemaDoc) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedBooleanTerm(err == nil)) } func init() { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/http.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/http.go index 9d01bc14b..693ea4048 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/http.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/http.go @@ -5,1614 +5,13 @@ package topdown import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "math" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) -type cachingMode string - const ( - defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT" - defaultCachingMode cachingMode = "serialized" - cachingModeDeserialized cachingMode = "deserialized" -) - -var defaultHTTPRequestTimeout = time.Second * 5 - -var allowedKeyNames = [...]string{ - "method", - "url", - "body", - "enable_redirect", - "force_json_decode", - "force_yaml_decode", - "headers", - "raw_body", - "tls_use_system_certs", - "tls_ca_cert", - "tls_ca_cert_file", - "tls_ca_cert_env_variable", - "tls_client_cert", - "tls_client_cert_file", - "tls_client_cert_env_variable", - "tls_client_key", - "tls_client_key_file", - "tls_client_key_env_variable", - "tls_insecure_skip_verify", - "tls_server_name", - "timeout", - "cache", - "force_cache", - "force_cache_duration_seconds", - "raise_error", - "caching_mode", - "max_retry_attempts", - "cache_ignored_headers", -} - -// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 -var cacheableHTTPStatusCodes = [...]int{ - http.StatusOK, - http.StatusNonAuthoritativeInfo, - http.StatusNoContent, - http.StatusPartialContent, - http.StatusMultipleChoices, - http.StatusMovedPermanently, - http.StatusNotFound, - http.StatusMethodNotAllowed, - http.StatusGone, - http.StatusRequestURITooLong, - http.StatusNotImplemented, -} - -var ( - allowedKeys = ast.NewSet() - cacheableCodes = ast.NewSet() - requiredKeys = ast.NewSet(ast.StringTerm("method"), ast.StringTerm("url")) - httpSendLatencyMetricKey = "rego_builtin_" + strings.ReplaceAll(ast.HTTPSend.Name, ".", "_") - httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" -) - -type httpSendKey string - -const ( - // httpSendBuiltinCacheKey is the key in the builtin context cache that - // points to the http.send() specific cache resides at. - httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY" - // HTTPSendInternalErr represents a runtime evaluation error. - HTTPSendInternalErr string = "eval_http_send_internal_error" + HTTPSendInternalErr = v1.HTTPSendInternalErr // HTTPSendNetworkErr represents a network error. - HTTPSendNetworkErr string = "eval_http_send_network_error" - - // minRetryDelay is amount of time to backoff after the first failure. - minRetryDelay = time.Millisecond * 100 - - // maxRetryDelay is the upper bound of backoff delay. - maxRetryDelay = time.Second * 60 + HTTPSendNetworkErr = v1.HTTPSendNetworkErr ) - -func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - obj, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) - } - - raiseError, err := getRaiseErrorValue(obj) - if err != nil { - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) - } - - req, err := validateHTTPRequestOperand(operands[0], 1) - if err != nil { - if raiseError { - return handleHTTPSendErr(bctx, err) - } - - return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err))) - } - - result, err := getHTTPResponse(bctx, req) - if err != nil { - if raiseError { - return handleHTTPSendErr(bctx, err) - } - - result = generateRaiseErrorResult(err) - } - return iter(result) -} - -func generateRaiseErrorResult(err error) *ast.Term { - obj := ast.NewObject() - obj.Insert(ast.StringTerm("status_code"), ast.IntNumberTerm(0)) - - errObj := ast.NewObject() - - switch err.(type) { - case *url.Error: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendNetworkErr)) - default: - errObj.Insert(ast.StringTerm("code"), ast.StringTerm(HTTPSendInternalErr)) - } - - errObj.Insert(ast.StringTerm("message"), ast.StringTerm(err.Error())) - obj.Insert(ast.StringTerm("error"), ast.NewTerm(errObj)) - - return ast.NewTerm(obj) -} - -func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { - - bctx.Metrics.Timer(httpSendLatencyMetricKey).Start() - - key, err := getKeyFromRequest(req) - if err != nil { - return nil, err - } - - reqExecutor, err := newHTTPRequestExecutor(bctx, req, key) - if err != nil { - return nil, err - } - // Check if cache already has a response for this query - // set headers to exclude cache_ignored_headers - resp, err := reqExecutor.CheckCache() - if err != nil { - return nil, err - } - - if resp == nil { - httpResp, err := reqExecutor.ExecuteHTTPRequest() - if err != nil { - reqExecutor.InsertErrorIntoCache(err) - return nil, err - } - defer util.Close(httpResp) - // Add result to intra/inter-query cache. - resp, err = reqExecutor.InsertIntoCache(httpResp) - if err != nil { - return nil, err - } - } - - bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop() - - return ast.NewTerm(resp), nil -} - -// getKeyFromRequest returns a key to be used for caching HTTP responses -// deletes headers from request object mentioned in cache_ignored_headers -func getKeyFromRequest(req ast.Object) (ast.Object, error) { - // deep copy so changes to key do not reflect in the request object - key := req.Copy() - cacheIgnoredHeadersTerm := req.Get(ast.StringTerm("cache_ignored_headers")) - allHeadersTerm := req.Get(ast.StringTerm("headers")) - // skip because no headers to delete - if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { - // need to explicitly set cache_ignored_headers to null - // equivalent requests might have different sets of exclusion lists - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) - return key, nil - } - var cacheIgnoredHeaders []string - var allHeaders map[string]interface{} - err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) - if err != nil { - return nil, err - } - err = ast.As(allHeadersTerm.Value, &allHeaders) - if err != nil { - return nil, err - } - for _, header := range cacheIgnoredHeaders { - delete(allHeaders, header) - } - val, err := ast.InterfaceToValue(allHeaders) - if err != nil { - return nil, err - } - key.Insert(ast.StringTerm("headers"), ast.NewTerm(val)) - // remove cache_ignored_headers key - key.Insert(ast.StringTerm("cache_ignored_headers"), ast.NullTerm()) - return key, nil -} - -func init() { - createAllowedKeys() - createCacheableHTTPStatusCodes() - initDefaults() - RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) -} - -func handleHTTPSendErr(bctx BuiltinContext, err error) error { - // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened. - // Do not do this if the builtin context was cancelled and is what caused the request to stop. - if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil { - err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL) - } - if err := bctx.Context.Err(); err != nil { - return Halt{ - Err: &Error{ - Code: CancelErr, - Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()), - }, - } - } - return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) -} - -func initDefaults() { - timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv) - if timeoutDuration != "" { - var err error - defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration) - if err != nil { - // If it is set to something not valid don't let the process continue in a state - // that will almost definitely give unexpected results by having it set at 0 - // which means no timeout.. - // This environment variable isn't considered part of the public API. - // TODO(patrick-east): Remove the environment variable - panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err)) - } - } -} - -func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) { - - obj, err := builtins.ObjectOperand(term.Value, pos) - if err != nil { - return nil, err - } - - requestKeys := ast.NewSet(obj.Keys()...) - - invalidKeys := requestKeys.Diff(allowedKeys) - if invalidKeys.Len() != 0 { - return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys) - } - - missingKeys := requiredKeys.Diff(requestKeys) - if missingKeys.Len() != 0 { - return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys) - } - - return obj, nil - -} - -// canonicalizeHeaders returns a copy of the headers where the keys are in -// canonical HTTP form. -func canonicalizeHeaders(headers map[string]interface{}) map[string]interface{} { - canonicalized := map[string]interface{}{} - - for k, v := range headers { - canonicalized[http.CanonicalHeaderKey(k)] = v - } - - return canonicalized -} - -// useSocket examines the url for "unix://" and returns a *http.Transport with -// a DialContext that opens a socket (specified in the http call). -// The url is expected to contain socket=/path/to/socket (url encoded) -// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock" -func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) { - u, err := url.Parse(rawURL) - if err != nil { - return false, "", nil - } - - if u.Scheme != "unix" || u.RawQuery == "" { - return false, rawURL, nil - } - - v, err := url.ParseQuery(u.RawQuery) - if err != nil { - return false, rawURL, nil - } - - // Rewrite URL targeting the UNIX domain socket. - u.Scheme = "http" - - // Extract the path to the socket. - // Only retrieve the first value. Subsequent values are ignored and removed - // to prevent HTTP parameter pollution. - socket := v.Get("socket") - v.Del("socket") - u.RawQuery = v.Encode() - - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) - } - tr.TLSClientConfig = tlsConfig - tr.DisableKeepAlives = true - - return true, u.String(), tr -} - -func verifyHost(bctx BuiltinContext, host string) error { - if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { - return nil - } - - for _, allowed := range bctx.Capabilities.AllowNet { - if allowed == host { - return nil - } - } - - return fmt.Errorf("unallowed host: %s", host) -} - -func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error { - // Eager return to avoid unnecessary URL parsing - if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { - return nil - } - - parsedURL, err := url.Parse(unverifiedURL) - if err != nil { - return err - } - - host := strings.Split(parsedURL.Host, ":")[0] - - return verifyHost(bctx, host) -} - -func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) { - var url string - var method string - - // Additional CA certificates loading options. - var tlsCaCert []byte - var tlsCaCertEnvVar string - var tlsCaCertFile string - - // Client TLS certificate and key options. Each input source - // comes in a matched pair. - var tlsClientCert []byte - var tlsClientKey []byte - - var tlsClientCertEnvVar string - var tlsClientKeyEnvVar string - - var tlsClientCertFile string - var tlsClientKeyFile string - - var tlsServerName string - var body *bytes.Buffer - var rawBody *bytes.Buffer - var enableRedirect bool - var tlsUseSystemCerts *bool - var tlsConfig tls.Config - var customHeaders map[string]interface{} - var tlsInsecureSkipVerify bool - var timeout = defaultHTTPRequestTimeout - - for _, val := range obj.Keys() { - key, err := ast.JSON(val.Value) - if err != nil { - return nil, nil, err - } - - key = key.(string) - - var strVal string - - if s, ok := obj.Get(val).Value.(ast.String); ok { - strVal = strings.Trim(string(s), "\"") - } else { - // Most parameters are strings, so consolidate the type checking. - switch key { - case "method", - "url", - "raw_body", - "tls_ca_cert", - "tls_ca_cert_file", - "tls_ca_cert_env_variable", - "tls_client_cert", - "tls_client_cert_file", - "tls_client_cert_env_variable", - "tls_client_key", - "tls_client_key_file", - "tls_client_key_env_variable", - "tls_server_name": - return nil, nil, fmt.Errorf("%q must be a string", key) - } - } - - switch key { - case "method": - method = strings.ToUpper(strVal) - case "url": - err := verifyURLHost(bctx, strVal) - if err != nil { - return nil, nil, err - } - url = strVal - case "enable_redirect": - enableRedirect, err = strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - case "body": - bodyVal := obj.Get(val).Value - bodyValInterface, err := ast.JSON(bodyVal) - if err != nil { - return nil, nil, err - } - - bodyValBytes, err := json.Marshal(bodyValInterface) - if err != nil { - return nil, nil, err - } - body = bytes.NewBuffer(bodyValBytes) - case "raw_body": - rawBody = bytes.NewBuffer([]byte(strVal)) - case "tls_use_system_certs": - tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - tlsUseSystemCerts = &tempTLSUseSystemCerts - case "tls_ca_cert": - tlsCaCert = []byte(strVal) - case "tls_ca_cert_file": - tlsCaCertFile = strVal - case "tls_ca_cert_env_variable": - tlsCaCertEnvVar = strVal - case "tls_client_cert": - tlsClientCert = []byte(strVal) - case "tls_client_cert_file": - tlsClientCertFile = strVal - case "tls_client_cert_env_variable": - tlsClientCertEnvVar = strVal - case "tls_client_key": - tlsClientKey = []byte(strVal) - case "tls_client_key_file": - tlsClientKeyFile = strVal - case "tls_client_key_env_variable": - tlsClientKeyEnvVar = strVal - case "tls_server_name": - tlsServerName = strVal - case "headers": - headersVal := obj.Get(val).Value - headersValInterface, err := ast.JSON(headersVal) - if err != nil { - return nil, nil, err - } - var ok bool - customHeaders, ok = headersValInterface.(map[string]interface{}) - if !ok { - return nil, nil, fmt.Errorf("invalid type for headers key") - } - case "tls_insecure_skip_verify": - tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String()) - if err != nil { - return nil, nil, err - } - case "timeout": - timeout, err = parseTimeout(obj.Get(val).Value) - if err != nil { - return nil, nil, err - } - case "cache", "caching_mode", - "force_cache", "force_cache_duration_seconds", - "force_json_decode", "force_yaml_decode", - "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op - default: - return nil, nil, fmt.Errorf("invalid parameter %q", key) - } - } - - isTLS := false - client := &http.Client{ - Timeout: timeout, - CheckRedirect: func(*http.Request, []*http.Request) error { - return http.ErrUseLastResponse - }, - } - - if tlsInsecureSkipVerify { - isTLS = true - tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify - } - - if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 { - cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - if tlsClientCertFile != "" && tlsClientKeyFile != "" { - cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" { - cert, err := tls.X509KeyPair( - []byte(os.Getenv(tlsClientCertEnvVar)), - []byte(os.Getenv(tlsClientKeyEnvVar))) - if err != nil { - return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w", - tlsClientCertEnvVar, tlsClientKeyEnvVar, err) - } - - isTLS = true - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - - // Use system certs if no CA cert is provided - // or system certs flag is not set - if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil { - trueValue := true - tlsUseSystemCerts = &trueValue - } - - // Check the system certificates config first so that we - // load additional certificated into the correct pool. - if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" { - pool, err := x509.SystemCertPool() - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if len(tlsCaCert) != 0 { - tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1) - pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if tlsCaCertFile != "" { - pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if tlsCaCertEnvVar != "" { - pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar) - if err != nil { - return nil, nil, err - } - - isTLS = true - tlsConfig.RootCAs = pool - } - - if isTLS { - if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok { - client.Transport = tr - url = parsedURL - } else { - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.TLSClientConfig = &tlsConfig - tr.DisableKeepAlives = true - client.Transport = tr - } - } else { - if ok, parsedURL, tr := useSocket(url, nil); ok { - client.Transport = tr - url = parsedURL - } - } - - // check if redirects are enabled - if enableRedirect { - client.CheckRedirect = func(req *http.Request, _ []*http.Request) error { - return verifyURLHost(bctx, req.URL.String()) - } - } - - if rawBody != nil { - body = rawBody - } else if body == nil { - body = bytes.NewBufferString("") - } - - // create the http request, use the builtin context's context to ensure - // the request is cancelled if evaluation is cancelled. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, nil, err - } - - req = req.WithContext(bctx.Context) - - // Add custom headers - if len(customHeaders) != 0 { - customHeaders = canonicalizeHeaders(customHeaders) - - for k, v := range customHeaders { - header, ok := v.(string) - if !ok { - return nil, nil, fmt.Errorf("invalid type for headers value %q", v) - } - - req.Header.Add(k, header) - } - - // Don't overwrite or append to one that was set in the custom headers - if _, hasUA := customHeaders["User-Agent"]; !hasUA { - req.Header.Add("User-Agent", version.UserAgent) - } - - // If the caller specifies the Host header, use it for the HTTP - // request host and the TLS server name. - if host, hasHost := customHeaders["Host"]; hasHost { - host := host.(string) // We already checked that it's a string. - req.Host = host - - // Only default the ServerName if the caller has - // specified the host. If we don't specify anything, - // Go will default to the target hostname. This name - // is not the same as the default that Go populates - // `req.Host` with, which is why we don't just set - // this unconditionally. - tlsConfig.ServerName = host - } - } - - if tlsServerName != "" { - tlsConfig.ServerName = tlsServerName - } - - if len(bctx.DistributedTracingOpts) > 0 { - client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts) - } - - return req, client, nil -} - -func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) { - var err error - var retry int - - retry, err = getNumberValFromReqObj(inputReqObj, ast.StringTerm("max_retry_attempts")) - if err != nil { - return nil, err - } - - for i := 0; true; i++ { - - var resp *http.Response - resp, err = client.Do(req) - if err == nil { - return resp, nil - } - - // final attempt - if i == retry { - break - } - - if err == context.Canceled { - return nil, err - } - - select { - case <-time.After(util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i)): - case <-req.Context().Done(): - return nil, context.Canceled - } - } - return nil, err -} - -func isContentType(header http.Header, typ ...string) bool { - for _, t := range typ { - if strings.Contains(header.Get("Content-Type"), t) { - return true - } - } - return false -} - -type httpSendCacheEntry struct { - response *ast.Value - error error -} - -// The httpSendCache is used for intra-query caching of http.send results. -type httpSendCache struct { - entries *util.HashMap -} - -func newHTTPSendCache() *httpSendCache { - return &httpSendCache{ - entries: util.NewHashMap(valueEq, valueHash), - } -} - -func valueHash(v util.T) int { - return ast.StringTerm(v.(ast.Value).String()).Hash() -} - -func valueEq(a, b util.T) bool { - av := a.(ast.Value) - bv := b.(ast.Value) - return av.String() == bv.String() -} - -func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { - if v, ok := cache.entries.Get(k); ok { - v := v.(httpSendCacheEntry) - return &v - } - return nil -} - -func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) { - cache.entries.Put(k, httpSendCacheEntry{response: v}) -} - -func (cache *httpSendCache) putError(k ast.Value, v error) { - cache.entries.Put(k, httpSendCacheEntry{error: v}) -} - -// In the BuiltinContext cache we only store a single entry that points to -// our ValueMap which is the "real" http.send() cache. -func getHTTPSendCache(bctx BuiltinContext) *httpSendCache { - raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey) - if !ok { - // Initialize if it isn't there - c := newHTTPSendCache() - bctx.Cache.Put(httpSendBuiltinCacheKey, c) - return c - } - - c, ok := raw.(*httpSendCache) - if !ok { - return nil - } - return c -} - -// checkHTTPSendCache checks for the given key's value in the cache -func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - return nil, nil - } - - v := requestCache.get(key) - if v != nil { - if v.error != nil { - return nil, v.error - } - if v.response != nil { - return *v.response, nil - } - // This should never happen - } - - return nil, nil -} - -func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - // Should never happen.. if it does just skip caching the value - // FIXME: return error instead, to prevent inconsistencies? - return - } - requestCache.putResponse(key, &value) -} - -func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) { - requestCache := getHTTPSendCache(bctx) - if requestCache == nil { - // Should never happen.. if it does just skip caching the value - // FIXME: return error instead, to prevent inconsistencies? - return - } - requestCache.putError(key, err) -} - -// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache -func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { - requestCache := c.bctx.InterQueryBuiltinCache - - cachedValue, found := requestCache.Get(c.key) - if !found { - return nil, nil - } - - value, cerr := requestCache.Clone(cachedValue) - if cerr != nil { - return nil, handleHTTPSendErr(c.bctx, cerr) - } - - c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr() - var cachedRespData *interQueryCacheData - - switch v := value.(type) { - case *interQueryCacheValue: - var err error - cachedRespData, err = v.copyCacheData() - if err != nil { - return nil, err - } - case *interQueryCacheData: - cachedRespData = v - default: - return nil, nil - } - - if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) { - return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) - } - - var err error - c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - headers := parseResponseHeaders(cachedRespData.Headers) - - // check with the server if the stale response is still up-to-date. - // If server returns a new response (ie. status_code=200), update the cache with the new response - // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response - result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers) - requestCache.Delete(c.key) - if err != nil || result == nil { - return nil, err - } - - defer result.Body.Close() - - if !modified { - // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response - for headerName, values := range result.Header { - cachedRespData.Headers.Del(headerName) - for _, v := range values { - cachedRespData.Headers.Add(headerName, v) - } - } - - if forceCaching(c.forceCacheParams) { - createdAt := getCurrentTime(c.bctx) - cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds)) - } else { - expiresAt, err := expiryFromHeaders(result.Header) - if err != nil { - return nil, err - } - cachedRespData.ExpiresAt = expiresAt - } - - cachingMode, err := getCachingMode(c.key) - if err != nil { - return nil, err - } - - var pcv cache.InterQueryCacheValue - - if cachingMode == defaultCachingMode { - pcv, err = cachedRespData.toCacheValue() - if err != nil { - return nil, err - } - } else { - pcv = cachedRespData - } - - c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt) - - return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) - } - - newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode) - if err != nil { - return nil, err - } - - if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil { - return nil, err - } - - return newValue, nil -} - -// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache -func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error { - if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.IntNumberTerm(resp.StatusCode)) { - return nil - } - - requestCache := bctx.InterQueryBuiltinCache - - obj, ok := key.(ast.Object) - if !ok { - return fmt.Errorf("interface conversion error") - } - - cachingMode, err := getCachingMode(obj) - if err != nil { - return err - } - - var pcv cache.InterQueryCacheValue - var pcvData *interQueryCacheData - if cachingMode == defaultCachingMode { - pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams) - } else { - pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams) - pcv = pcvData - } - - if err != nil { - return err - } - - requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt) - return nil -} - -func createAllowedKeys() { - for _, element := range allowedKeyNames { - allowedKeys.Add(ast.StringTerm(element)) - } -} - -func createCacheableHTTPStatusCodes() { - for _, element := range cacheableHTTPStatusCodes { - cacheableCodes.Add(ast.IntNumberTerm(element)) - } -} - -func parseTimeout(timeoutVal ast.Value) (time.Duration, error) { - var timeout time.Duration - switch t := timeoutVal.(type) { - case ast.Number: - timeoutInt, ok := t.Int64() - if !ok { - return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal) - } - return time.Duration(timeoutInt), nil - case ast.String: - // Support strings without a unit, treat them the same as just a number value (ns) - var err error - timeoutInt, err := strconv.ParseInt(string(t), 10, 64) - if err == nil { - return time.Duration(timeoutInt), nil - } - - // Try parsing it as a duration (requires a supported units suffix) - timeout, err = time.ParseDuration(string(t)) - if err != nil { - return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err) - } - return timeout, nil - default: - return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.TypeName(t)) - } -} - -func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) { - var b ast.Boolean - var ok bool - if v := req.Get(key); v != nil { - if b, ok = v.Value.(ast.Boolean); !ok { - return false, fmt.Errorf("invalid value for %v field", key.String()) - } - } - return bool(b), nil -} - -func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { - term := req.Get(key) - if term == nil { - return 0, nil - } - - if t, ok := term.Value.(ast.Number); ok { - num, ok := t.Int() - if !ok || num < 0 { - return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String()) - } - return num, nil - } - - return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String()) -} - -func getCachingMode(req ast.Object) (cachingMode, error) { - key := ast.StringTerm("caching_mode") - var s ast.String - var ok bool - if v := req.Get(key); v != nil { - if s, ok = v.Value.(ast.String); !ok { - return "", fmt.Errorf("invalid value for %v field", key.String()) - } - - switch cachingMode(s) { - case defaultCachingMode, cachingModeDeserialized: - return cachingMode(s), nil - default: - return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s)) - } - } - return defaultCachingMode, nil -} - -type interQueryCacheValue struct { - Data []byte -} - -func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) { - data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams) - if err != nil { - return nil, nil, err - } - - b, err := json.Marshal(data) - if err != nil { - return nil, nil, err - } - return &interQueryCacheValue{Data: b}, data, nil -} - -func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) { - dup := make([]byte, len(cb.Data)) - copy(dup, cb.Data) - return &interQueryCacheValue{Data: dup}, nil -} - -func (cb interQueryCacheValue) SizeInBytes() int64 { - return int64(len(cb.Data)) -} - -func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) { - var res interQueryCacheData - err := util.UnmarshalJSON(cb.Data, &res) - if err != nil { - return nil, err - } - return &res, nil -} - -type interQueryCacheData struct { - RespBody []byte - Status string - StatusCode int - Headers http.Header - ExpiresAt time.Time -} - -func forceCaching(cacheParams *forceCacheParams) bool { - return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0 -} - -func expiryFromHeaders(headers http.Header) (time.Time, error) { - var expiresAt time.Time - maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers)) - if err != nil { - return time.Time{}, err - } - if maxAge != -1 { - createdAt, err := getResponseHeaderDate(headers) - if err != nil { - return time.Time{}, err - } - expiresAt = createdAt.Add(time.Second * time.Duration(maxAge)) - } else { - expiresAt = getResponseHeaderExpires(headers) - } - return expiresAt, nil -} - -func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) { - var expiresAt time.Time - - if forceCaching(cacheParams) { - createdAt := getCurrentTime(bctx) - expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds)) - } else { - var err error - expiresAt, err = expiryFromHeaders(resp.Header) - if err != nil { - return nil, err - } - } - - cv := interQueryCacheData{ - ExpiresAt: expiresAt, - RespBody: respBody, - Status: resp.Status, - StatusCode: resp.StatusCode, - Headers: resp.Header} - - return &cv, nil -} - -func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) { - return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode) -} - -func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - return &interQueryCacheValue{Data: b}, nil -} - -func (c *interQueryCacheData) SizeInBytes() int64 { - return 0 -} - -func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { - dup := make([]byte, len(c.RespBody)) - copy(dup, c.RespBody) - - return &interQueryCacheData{ - ExpiresAt: c.ExpiresAt, - RespBody: dup, - Status: c.Status, - StatusCode: c.StatusCode, - Headers: c.Headers.Clone()}, nil -} - -type responseHeaders struct { - etag string // identifier for a specific version of the response - lastModified string // date and time response was last modified as per origin server -} - -// deltaSeconds specifies a non-negative integer, representing -// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1 -type deltaSeconds int32 - -func parseResponseHeaders(headers http.Header) *responseHeaders { - result := responseHeaders{} - - result.etag = headers.Get("etag") - - result.lastModified = headers.Get("last-modified") - - return &result -} - -func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) { - etag := headers.etag - lastModified := headers.lastModified - - if etag == "" && lastModified == "" { - return nil, false, nil - } - - cloneReq := req.Clone(req.Context()) - - if etag != "" { - cloneReq.Header.Set("if-none-match", etag) - } - - if lastModified != "" { - cloneReq.Header.Set("if-modified-since", lastModified) - } - - response, err := executeHTTPRequest(cloneReq, client, inputReqObj) - if err != nil { - return nil, false, err - } - - switch response.StatusCode { - case http.StatusOK: - return response, true, nil - - case http.StatusNotModified: - return response, false, nil - } - util.Close(response) - return nil, false, nil -} - -func canStore(headers http.Header) bool { - ccHeaders := parseCacheControlHeader(headers) - - // Check "no-store" cache directive - // The "no-store" response directive indicates that a cache MUST NOT - // store any part of either the immediate request or response. - if _, ok := ccHeaders["no-store"]; ok { - return false - } - return true -} - -func getCurrentTime(bctx BuiltinContext) time.Time { - var current time.Time - - value, err := ast.JSON(bctx.Time.Value) - if err != nil { - return current - } - - valueNum, ok := value.(json.Number) - if !ok { - return current - } - - valueNumInt, err := valueNum.Int64() - if err != nil { - return current - } - - current = time.Unix(0, valueNumInt).UTC() - return current -} - -func parseCacheControlHeader(headers http.Header) map[string]string { - ccDirectives := map[string]string{} - ccHeader := headers.Get("cache-control") - - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - items := strings.Split(part, "=") - if len(items) != 2 { - continue - } - ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",") - } else { - ccDirectives[part] = "" - } - } - - return ccDirectives -} - -func getResponseHeaderDate(headers http.Header) (date time.Time, err error) { - dateHeader := headers.Get("date") - if dateHeader == "" { - err = fmt.Errorf("no date header") - return - } - return http.ParseTime(dateHeader) -} - -func getResponseHeaderExpires(headers http.Header) time.Time { - expiresHeader := headers.Get("expires") - if expiresHeader == "" { - return time.Time{} - } - - date, err := http.ParseTime(expiresHeader) - if err != nil { - // servers can set `Expires: 0` which is an invalid date to indicate expired content - return time.Time{} - } - - return date -} - -// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per -// https://tools.ietf.org/html/rfc7234#section-1.2.1 -func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) { - maxAge, ok := cc["max-age"] - if !ok { - return deltaSeconds(-1), nil - } - - val, err := strconv.ParseUint(maxAge, 10, 32) - if err != nil { - if numError, ok := err.(*strconv.NumError); ok { - if numError.Err == strconv.ErrRange { - return deltaSeconds(math.MaxInt32), nil - } - } - return deltaSeconds(-1), err - } - - if val > math.MaxInt32 { - return deltaSeconds(math.MaxInt32), nil - } - return deltaSeconds(val), nil -} - -func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) { - - resultRawBody, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode) - if err != nil { - return nil, nil, err - } - - return resultObj, resultRawBody, nil -} - -func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) { - var resultBody interface{} - - // If the response body cannot be JSON/YAML decoded, - // an error will not be returned. Instead, the "body" field - // in the result will be null. - switch { - case forceJSONDecode || isContentType(headers, "application/json"): - _ = util.UnmarshalJSON(body, &resultBody) - case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"): - _ = util.Unmarshal(body, &resultBody) - } - - result := make(map[string]interface{}) - result["status"] = status - result["status_code"] = statusCode - result["body"] = resultBody - result["raw_body"] = string(body) - result["headers"] = getResponseHeaders(headers) - - resultObj, err := ast.InterfaceToValue(result) - if err != nil { - return nil, err - } - - return resultObj, nil -} - -func getResponseHeaders(headers http.Header) map[string]interface{} { - respHeaders := map[string]interface{}{} - for headerName, values := range headers { - var respValues []interface{} - for _, v := range values { - respValues = append(respValues, v) - } - respHeaders[strings.ToLower(headerName)] = respValues - } - return respHeaders -} - -// httpRequestExecutor defines an interface for the http send cache -type httpRequestExecutor interface { - CheckCache() (ast.Value, error) - InsertIntoCache(value *http.Response) (ast.Value, error) - InsertErrorIntoCache(err error) - ExecuteHTTPRequest() (*http.Response, error) -} - -// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or -// intra-query cache implementation -func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) { - useInterQueryCache, forceCacheParams, err := useInterQueryCache(req) - if err != nil { - return nil, handleHTTPSendErr(bctx, err) - } - - if useInterQueryCache && bctx.InterQueryBuiltinCache != nil { - return newInterQueryCache(bctx, req, key, forceCacheParams) - } - return newIntraQueryCache(bctx, req, key) -} - -type interQueryCache struct { - bctx BuiltinContext - req ast.Object - key ast.Object - httpReq *http.Request - httpClient *http.Client - forceJSONDecode bool - forceYAMLDecode bool - forceCacheParams *forceCacheParams -} - -func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { - return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil -} - -// CheckCache checks the cache for the value of the key set on this object -func (c *interQueryCache) CheckCache() (ast.Value, error) { - var err error - - // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query. - resp, err := checkHTTPSendCache(c.bctx, c.key) - if err != nil { - return nil, err - } - if resp != nil { - return resp, nil - } - - c.forceJSONDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - resp, err = c.checkHTTPSendInterQueryCache() - // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query. - if err != nil { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) - } - if resp != nil { - insertIntoHTTPSendCache(c.bctx, c.key, resp) - } - return resp, err -} - -// InsertIntoCache inserts the key set on this object into the cache with the given value -func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - // Always insert into the intra-query cache, to maintain consistency within the same query. - insertIntoHTTPSendCache(c.bctx, c.key, result) - - // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache, - // and query consistency is our primary concern. - _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams) - return result, nil -} - -func (c *interQueryCache) InsertErrorIntoCache(err error) { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) -} - -// ExecuteHTTPRequest executes a HTTP request -func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { - var err error - c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - return executeHTTPRequest(c.httpReq, c.httpClient, c.req) -} - -type intraQueryCache struct { - bctx BuiltinContext - req ast.Object - key ast.Object -} - -func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) { - return &intraQueryCache{bctx: bctx, req: req, key: key}, nil -} - -// CheckCache checks the cache for the value of the key set on this object -func (c *intraQueryCache) CheckCache() (ast.Value, error) { - return checkHTTPSendCache(c.bctx, c.key) -} - -// InsertIntoCache inserts the key set on this object into the cache with the given value -func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { - forceJSONDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_json_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - forceYAMLDecode, err := getBoolValFromReqObj(c.key, ast.StringTerm("force_yaml_decode")) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - - if cacheableCodes.Contains(ast.IntNumberTerm(value.StatusCode)) { - insertIntoHTTPSendCache(c.bctx, c.key, result) - } - - return result, nil -} - -func (c *intraQueryCache) InsertErrorIntoCache(err error) { - insertErrorIntoHTTPSendCache(c.bctx, c.key, err) -} - -// ExecuteHTTPRequest executes a HTTP request -func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { - httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req) - if err != nil { - return nil, handleHTTPSendErr(c.bctx, err) - } - return executeHTTPRequest(httpReq, httpClient, c.req) -} - -func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { - value, err := getBoolValFromReqObj(req, ast.StringTerm("cache")) - if err != nil { - return false, nil, err - } - - valueForceCache, err := getBoolValFromReqObj(req, ast.StringTerm("force_cache")) - if err != nil { - return false, nil, err - } - - if valueForceCache { - forceCacheParams, err := newForceCacheParams(req) - return true, forceCacheParams, err - } - - return value, nil, nil -} - -type forceCacheParams struct { - forceCacheDurationSeconds int32 -} - -func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { - term := req.Get(ast.StringTerm("force_cache_duration_seconds")) - if term == nil { - return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") - } - - forceCacheDurationSeconds := term.String() - - value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32) - if err != nil { - return nil, err - } - - return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil -} - -func getRaiseErrorValue(req ast.Object) (bool, error) { - result := ast.Boolean(true) - var ok bool - if v := req.Get(ast.StringTerm("raise_error")); v != nil { - if result, ok = v.Value.(ast.Boolean); !ok { - return false, fmt.Errorf("invalid value for raise_error field") - } - } - return bool(result), nil -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go index 6eacc338e..845f8da61 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/instrumentation.go @@ -4,60 +4,18 @@ package topdown -import "github.com/open-policy-agent/opa/metrics" - -const ( - evalOpPlug = "eval_op_plug" - evalOpResolve = "eval_op_resolve" - evalOpRuleIndex = "eval_op_rule_index" - evalOpBuiltinCall = "eval_op_builtin_call" - evalOpVirtualCacheHit = "eval_op_virtual_cache_hit" - evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss" - evalOpBaseCacheHit = "eval_op_base_cache_hit" - evalOpBaseCacheMiss = "eval_op_base_cache_miss" - evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip" - evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build" - evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit" - evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss" - partialOpSaveUnify = "partial_op_save_unify" - partialOpSaveSetContains = "partial_op_save_set_contains" - partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec" - partialOpCopyPropagation = "partial_op_copy_propagation" +import ( + "github.com/open-policy-agent/opa/v1/metrics" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Instrumentation implements helper functions to instrument query evaluation // to diagnose performance issues. Instrumentation may be expensive in some // cases, so it is disabled by default. -type Instrumentation struct { - m metrics.Metrics -} +type Instrumentation = v1.Instrumentation // NewInstrumentation returns a new Instrumentation object. Performance // diagnostics recorded on this Instrumentation object will stored in m. func NewInstrumentation(m metrics.Metrics) *Instrumentation { - return &Instrumentation{ - m: m, - } -} - -func (instr *Instrumentation) startTimer(name string) { - if instr == nil { - return - } - instr.m.Timer(name).Start() -} - -func (instr *Instrumentation) stopTimer(name string) { - if instr == nil { - return - } - delta := instr.m.Timer(name).Stop() - instr.m.Histogram(name).Update(delta) -} - -func (instr *Instrumentation) counterIncr(name string) { - if instr == nil { - return - } - instr.m.Counter(name).Incr() + return v1.NewInstrumentation(m) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/json.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/json.go deleted file mode 100644 index 8a5d23283..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/json.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2019 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "strconv" - "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - - "github.com/open-policy-agent/opa/internal/edittree" -) - -func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Expect an object and a string or array/set of strings - _, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Build a list of json pointers to remove - paths, err := getJSONPaths(operands[1].Value) - if err != nil { - return err - } - - newObj, err := jsonRemove(operands[0], ast.NewTerm(pathsToObject(paths))) - if err != nil { - return err - } - - if newObj == nil { - return nil - } - - return iter(newObj) -} - -// jsonRemove returns a new term that is the result of walking -// through a and omitting removing any values that are in b but -// have ast.Null values (ie leaf nodes for b). -func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { - if b == nil { - // The paths diverged, return a - return a, nil - } - - var bObj ast.Object - switch bValue := b.Value.(type) { - case ast.Object: - bObj = bValue - case ast.Null: - // Means we hit a leaf node on "b", dont add the value for a - return nil, nil - default: - // The paths diverged, return a - return a, nil - } - - switch aValue := a.Value.(type) { - case ast.String, ast.Number, ast.Boolean, ast.Null: - return a, nil - case ast.Object: - newObj := ast.NewObject() - err := aValue.Iter(func(k *ast.Term, v *ast.Term) error { - // recurse and add the diff of sub objects as needed - diffValue, err := jsonRemove(v, bObj.Get(k)) - if err != nil || diffValue == nil { - return err - } - newObj.Insert(k, diffValue) - return nil - }) - if err != nil { - return nil, err - } - return ast.NewTerm(newObj), nil - case ast.Set: - newSet := ast.NewSet() - err := aValue.Iter(func(v *ast.Term) error { - // recurse and add the diff of sub objects as needed - diffValue, err := jsonRemove(v, bObj.Get(v)) - if err != nil || diffValue == nil { - return err - } - newSet.Add(diffValue) - return nil - }) - if err != nil { - return nil, err - } - return ast.NewTerm(newSet), nil - case *ast.Array: - // When indexes are removed we shift left to close empty spots in the array - // as per the JSON patch spec. - newArray := ast.NewArray() - for i := 0; i < aValue.Len(); i++ { - v := aValue.Elem(i) - // recurse and add the diff of sub objects as needed - // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}} - diffValue, err := jsonRemove(v, bObj.Get(ast.StringTerm(strconv.Itoa(i)))) - if err != nil { - return nil, err - } - if diffValue != nil { - newArray = newArray.Append(diffValue) - } - } - return ast.NewTerm(newArray), nil - default: - return nil, fmt.Errorf("invalid value type %T", a) - } -} - -func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Ensure we have the right parameters, expect an object and a string or array/set of strings - obj, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - - // Build a list of filter strings - filters, err := getJSONPaths(operands[1].Value) - if err != nil { - return err - } - - // Actually do the filtering - filterObj := pathsToObject(filters) - r, err := obj.Filter(filterObj) - if err != nil { - return err - } - - return iter(ast.NewTerm(r)) -} - -func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { - var paths []ast.Ref - - switch v := operand.(type) { - case *ast.Array: - for i := 0; i < v.Len(); i++ { - filter, err := parsePath(v.Elem(i)) - if err != nil { - return nil, err - } - paths = append(paths, filter) - } - case ast.Set: - err := v.Iter(func(f *ast.Term) error { - filter, err := parsePath(f) - if err != nil { - return err - } - paths = append(paths, filter) - return nil - }) - if err != nil { - return nil, err - } - default: - return nil, builtins.NewOperandTypeErr(2, v, "set", "array") - } - - return paths, nil -} - -func parsePath(path *ast.Term) (ast.Ref, error) { - // paths can either be a `/` separated json path or - // an array or set of values - var pathSegments ast.Ref - switch p := path.Value.(type) { - case ast.String: - if p == "" { - return ast.Ref{}, nil - } - parts := strings.Split(strings.TrimLeft(string(p), "/"), "/") - for _, part := range parts { - part = strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") - pathSegments = append(pathSegments, ast.StringTerm(part)) - } - case *ast.Array: - p.Foreach(func(term *ast.Term) { - pathSegments = append(pathSegments, term) - }) - default: - return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.TypeName(p)) - } - - return pathSegments, nil -} - -func pathsToObject(paths []ast.Ref) ast.Object { - root := ast.NewObject() - - for _, path := range paths { - node := root - var done bool - - // If the path is an empty JSON path, skip all further processing. - if len(path) == 0 { - done = true - } - - // Otherwise, we should have 1+ path segments to work with. - for i := 0; i < len(path)-1 && !done; i++ { - - k := path[i] - child := node.Get(k) - - if child == nil { - obj := ast.NewObject() - node.Insert(k, ast.NewTerm(obj)) - node = obj - continue - } - - switch v := child.Value.(type) { - case ast.Null: - done = true - case ast.Object: - node = v - default: - panic("unreachable") - } - } - - if !done { - node.Insert(path[len(path)-1], ast.NullTerm()) - } - } - - return root -} - -type jsonPatch struct { - op string - path *ast.Term - from *ast.Term - value *ast.Term -} - -func getPatch(o ast.Object) (jsonPatch, error) { - validOps := map[string]struct{}{"add": {}, "remove": {}, "replace": {}, "move": {}, "copy": {}, "test": {}} - var out jsonPatch - var ok bool - getAttribute := func(attr string) (*ast.Term, error) { - if term := o.Get(ast.StringTerm(attr)); term != nil { - return term, nil - } - - return nil, fmt.Errorf("missing '%s' attribute", attr) - } - - opTerm, err := getAttribute("op") - if err != nil { - return out, err - } - op, ok := opTerm.Value.(ast.String) - if !ok { - return out, fmt.Errorf("attribute 'op' must be a string") - } - out.op = string(op) - if _, found := validOps[out.op]; !found { - out.op = "" - return out, fmt.Errorf("unrecognized op '%s'", string(op)) - } - - pathTerm, err := getAttribute("path") - if err != nil { - return out, err - } - out.path = pathTerm - - // Only fetch the "from" parameter for move/copy ops. - switch out.op { - case "move", "copy": - fromTerm, err := getAttribute("from") - if err != nil { - return out, err - } - out.from = fromTerm - } - - // Only fetch the "value" parameter for add/replace/test ops. - switch out.op { - case "add", "replace", "test": - valueTerm, err := getAttribute("value") - if err != nil { - return out, err - } - out.value = valueTerm - } - - return out, nil -} - -func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { - et := edittree.NewEditTree(source) - for i := 0; i < operations.Len(); i++ { - object, ok := operations.Elem(i).Value.(ast.Object) - if !ok { - return nil, fmt.Errorf("must be an array of JSON-Patch objects, but at least one element is not an object") - } - patch, err := getPatch(object) - if err != nil { - return nil, err - } - path, err := parsePath(patch.path) - if err != nil { - return nil, err - } - - switch patch.op { - case "add": - _, err = et.InsertAtPath(path, patch.value) - if err != nil { - return nil, err - } - case "remove": - _, err = et.DeleteAtPath(path) - if err != nil { - return nil, err - } - case "replace": - _, err = et.DeleteAtPath(path) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, patch.value) - if err != nil { - return nil, err - } - case "move": - from, err := parsePath(patch.from) - if err != nil { - return nil, err - } - chunk, err := et.RenderAtPath(from) - if err != nil { - return nil, err - } - _, err = et.DeleteAtPath(from) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, chunk) - if err != nil { - return nil, err - } - case "copy": - from, err := parsePath(patch.from) - if err != nil { - return nil, err - } - chunk, err := et.RenderAtPath(from) - if err != nil { - return nil, err - } - _, err = et.InsertAtPath(path, chunk) - if err != nil { - return nil, err - } - case "test": - chunk, err := et.RenderAtPath(path) - if err != nil { - return nil, err - } - if !chunk.Equal(patch.value) { - return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", patch.value, chunk) - } - } - } - final := et.Render() - // TODO: Nil check here? - return final, nil -} - -func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // JSON patch supports arrays, objects as well as values as the target. - target := ast.NewTerm(operands[0].Value) - - // Expect an array of operations. - operations, err := builtins.ArrayOperand(operands[1].Value, 2) - if err != nil { - return err - } - - patched, err := applyPatches(target, operations) - if err != nil { - return nil - } - return iter(patched) -} - -func init() { - RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter) - RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove) - RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/numbers.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/numbers.go deleted file mode 100644 index 27f3156b8..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/numbers.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "math/big" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -type randIntCachingKey string - -var one = big.NewInt(1) - -func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - x, err := builtins.BigIntOperand(operands[0].Value, 1) - if err != nil { - return err - } - - y, err := builtins.BigIntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - ast, err := generateRange(bctx, x, y, one, "numbers.range") - if err != nil { - return err - } - - return iter(ast) -} - -func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - x, err := builtins.BigIntOperand(operands[0].Value, 1) - if err != nil { - return err - } - - y, err := builtins.BigIntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - step, err := builtins.BigIntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - if step.Cmp(big.NewInt(0)) <= 0 { - return fmt.Errorf("numbers.range_step: step must be a positive number above zero") - } - - ast, err := generateRange(bctx, x, y, step, "numbers.range_step") - if err != nil { - return err - } - - return iter(ast) -} - -func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) { - - cmp := x.Cmp(y) - - comp := func(i *big.Int, y *big.Int) bool { return i.Cmp(y) <= 0 } - iter := func(i *big.Int) *big.Int { return i.Add(i, step) } - - if cmp > 0 { - comp = func(i *big.Int, y *big.Int) bool { return i.Cmp(y) >= 0 } - iter = func(i *big.Int) *big.Int { return i.Sub(i, step) } - } - - result := ast.NewArray() - haltErr := Halt{ - Err: &Error{ - Code: CancelErr, - Message: fmt.Sprintf("%s: timed out before generating all numbers in range", funcName), - }, - } - - for i := new(big.Int).Set(x); comp(i, y); i = iter(i) { - if bctx.Cancel != nil && bctx.Cancel.Cancelled() { - return nil, haltErr - } - result = result.Append(ast.NewTerm(builtins.IntToNumber(i))) - } - - return ast.NewTerm(result), nil -} - -func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - strOp, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - - } - - n, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - if n == 0 { - return iter(ast.IntNumberTerm(0)) - } - - if n < 0 { - n = -n - } - - var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) - - if val, ok := bctx.Cache.Get(key); ok { - return iter(val.(*ast.Term)) - } - - r, err := bctx.Rand() - if err != nil { - return err - } - result := ast.IntNumberTerm(r.Intn(n)) - bctx.Cache.Put(key, result) - - return iter(result) -} - -func init() { - RegisterBuiltinFunc(ast.NumbersRange.Name, builtinNumbersRange) - RegisterBuiltinFunc(ast.NumbersRangeStep.Name, builtinNumbersRangeStep) - RegisterBuiltinFunc(ast.RandIntn.Name, builtinRandIntn) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/print.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print.go index 765b344b3..5eacd180d 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/print.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print.go @@ -5,82 +5,12 @@ package topdown import ( - "fmt" "io" - "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" "github.com/open-policy-agent/opa/topdown/print" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) func NewPrintHook(w io.Writer) print.Hook { - return printHook{w: w} -} - -type printHook struct { - w io.Writer -} - -func (h printHook) Print(_ print.Context, msg string) error { - _, err := fmt.Fprintln(h.w, msg) - return err -} - -func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - if bctx.PrintHook == nil { - return iter(nil) - } - - arr, err := builtins.ArrayOperand(operands[0].Value, 1) - if err != nil { - return err - } - - buf := make([]string, arr.Len()) - - err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error { - pctx := print.Context{ - Context: bctx.Context, - Location: bctx.Location, - } - return bctx.PrintHook.Print(pctx, strings.Join(buf, " ")) - }) - if err != nil { - return err - } - - return iter(nil) -} - -func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error { - - if i >= operands.Len() { - return f(buf) - } - - xs, ok := operands.Elem(i).Value.(ast.Set) - if !ok { - return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.TypeName(operands.Elem(i).Value)))} - } - - if xs.Len() == 0 { - buf[i] = "" - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) - } - - return xs.Iter(func(x *ast.Term) error { - switch v := x.Value.(type) { - case ast.String: - buf[i] = string(v) - default: - buf[i] = v.String() - } - return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) - }) -} - -func init() { - RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint) + return v1.NewPrintHook(w) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go new file mode 100644 index 000000000..c2ee0eca7 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/doc.go @@ -0,0 +1,8 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package print diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/print.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/print.go index 0fb6abdca..66ffbb176 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/print.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/print/print.go @@ -1,21 +1,14 @@ package print import ( - "context" - - "github.com/open-policy-agent/opa/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown/print" ) // Context provides the Hook implementation context about the print() call. -type Context struct { - Context context.Context // request context passed when query executed - Location *ast.Location // location of print call -} +type Context = v1.Context // Hook defines the interface that callers can implement to receive print // statement outputs. If the hook returns an error, it will be surfaced if // strict builtin error checking is enabled (otherwise, it will not halt // execution.) -type Hook interface { - Print(Context, string) error -} +type Hook = v1.Hook diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/query.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/query.go index bbb4ba58f..d24060991 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/query.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/query.go @@ -1,590 +1,24 @@ package topdown import ( - "context" - "crypto/rand" - "io" - "sort" - "time" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" + "github.com/open-policy-agent/opa/v1/ast" + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // QueryResultSet represents a collection of results returned by a query. -type QueryResultSet []QueryResult +type QueryResultSet = v1.QueryResultSet // QueryResult represents a single result returned by a query. The result // contains bindings for all variables that appear in the query. -type QueryResult map[ast.Var]*ast.Term +type QueryResult = v1.QueryResult // Query provides a configurable interface for performing query evaluation. -type Query struct { - seed io.Reader - time time.Time - cancel Cancel - query ast.Body - queryCompiler ast.QueryCompiler - compiler *ast.Compiler - store storage.Store - txn storage.Transaction - input *ast.Term - external *resolverTrie - tracers []QueryTracer - plugTraceVars bool - unknowns []*ast.Term - partialNamespace string - skipSaveNamespace bool - metrics metrics.Metrics - instr *Instrumentation - disableInlining []ast.Ref - shallowInlining bool - genvarprefix string - runtime *ast.Term - builtins map[string]*Builtin - indexing bool - earlyExit bool - interQueryBuiltinCache cache.InterQueryCache - ndBuiltinCache builtins.NDBCache - strictBuiltinErrors bool - builtinErrorList *[]Error - strictObjects bool - printHook print.Hook - tracingOpts tracing.Options - virtualCache VirtualCache -} +type Query = v1.Query // Builtin represents a built-in function that queries can call. -type Builtin struct { - Decl *ast.Builtin - Func BuiltinFunc -} +type Builtin = v1.Builtin // NewQuery returns a new Query object that can be run. func NewQuery(query ast.Body) *Query { - return &Query{ - query: query, - genvarprefix: ast.WildcardPrefix, - indexing: true, - earlyExit: true, - external: newResolverTrie(), - } -} - -// WithQueryCompiler sets the queryCompiler used for the query. -func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query { - q.queryCompiler = queryCompiler - return q -} - -// WithCompiler sets the compiler to use for the query. -func (q *Query) WithCompiler(compiler *ast.Compiler) *Query { - q.compiler = compiler - return q -} - -// WithStore sets the store to use for the query. -func (q *Query) WithStore(store storage.Store) *Query { - q.store = store - return q -} - -// WithTransaction sets the transaction to use for the query. All queries -// should be performed over a consistent snapshot of the storage layer. -func (q *Query) WithTransaction(txn storage.Transaction) *Query { - q.txn = txn - return q -} - -// WithCancel sets the cancellation object to use for the query. Set this if -// you need to abort queries based on a deadline. This is optional. -func (q *Query) WithCancel(cancel Cancel) *Query { - q.cancel = cancel - return q -} - -// WithInput sets the input object to use for the query. References rooted at -// input will be evaluated against this value. This is optional. -func (q *Query) WithInput(input *ast.Term) *Query { - q.input = input - return q -} - -// WithTracer adds a query tracer to use during evaluation. This is optional. -// Deprecated: Use WithQueryTracer instead. -func (q *Query) WithTracer(tracer Tracer) *Query { - qt, ok := tracer.(QueryTracer) - if !ok { - qt = WrapLegacyTracer(tracer) - } - return q.WithQueryTracer(qt) -} - -// WithQueryTracer adds a query tracer to use during evaluation. This is optional. -// Disabled QueryTracers will be ignored. -func (q *Query) WithQueryTracer(tracer QueryTracer) *Query { - if !tracer.Enabled() { - return q - } - - q.tracers = append(q.tracers, tracer) - - // If *any* of the tracers require local variable metadata we need to - // enabled plugging local trace variables. - conf := tracer.Config() - if conf.PlugLocalVars { - q.plugTraceVars = true - } - - return q -} - -// WithMetrics sets the metrics collection to add evaluation metrics to. This -// is optional. -func (q *Query) WithMetrics(m metrics.Metrics) *Query { - q.metrics = m - return q -} - -// WithInstrumentation sets the instrumentation configuration to enable on the -// evaluation process. By default, instrumentation is turned off. -func (q *Query) WithInstrumentation(instr *Instrumentation) *Query { - q.instr = instr - return q -} - -// WithUnknowns sets the initial set of variables or references to treat as -// unknown during query evaluation. This is required for partial evaluation. -func (q *Query) WithUnknowns(terms []*ast.Term) *Query { - q.unknowns = terms - return q -} - -// WithPartialNamespace sets the namespace to use for supporting rules -// generated as part of the partial evaluation process. The ns value must be a -// valid package path component. -func (q *Query) WithPartialNamespace(ns string) *Query { - q.partialNamespace = ns - return q -} - -// WithSkipPartialNamespace disables namespacing of saved support rules that are generated -// from the original policy (rules which are completely synthetic are still namespaced.) -func (q *Query) WithSkipPartialNamespace(yes bool) *Query { - q.skipSaveNamespace = yes - return q -} - -// WithDisableInlining adds a set of paths to the query that should be excluded from -// inlining. Inlining during partial evaluation can be expensive in some cases -// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive -// computation at the cost of generating support rules. -func (q *Query) WithDisableInlining(paths []ast.Ref) *Query { - q.disableInlining = paths - return q -} - -// WithShallowInlining disables aggressive inlining performed during partial evaluation. -// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined. -// Only rules/values that are completely known will be inlined. -func (q *Query) WithShallowInlining(yes bool) *Query { - q.shallowInlining = yes - return q -} - -// WithRuntime sets the runtime data to execute the query with. The runtime data -// can be returned by the `opa.runtime` built-in function. -func (q *Query) WithRuntime(runtime *ast.Term) *Query { - q.runtime = runtime - return q -} - -// WithBuiltins adds a set of built-in functions that can be called by the -// query. -func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query { - q.builtins = builtins - return q -} - -// WithIndexing will enable or disable using rule indexing for the evaluation -// of the query. The default is enabled. -func (q *Query) WithIndexing(enabled bool) *Query { - q.indexing = enabled - return q -} - -// WithEarlyExit will enable or disable using 'early exit' for the evaluation -// of the query. The default is enabled. -func (q *Query) WithEarlyExit(enabled bool) *Query { - q.earlyExit = enabled - return q -} - -// WithSeed sets a reader that will seed randomization required by built-in functions. -// If a seed is not provided crypto/rand.Reader is used. -func (q *Query) WithSeed(r io.Reader) *Query { - q.seed = r - return q -} - -// WithTime sets the time that will be returned by the time.now_ns() built-in function. -func (q *Query) WithTime(x time.Time) *Query { - q.time = x - return q -} - -// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize. -func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query { - q.interQueryBuiltinCache = c - return q -} - -// WithNDBuiltinCache sets the non-deterministic builtin cache. -func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query { - q.ndBuiltinCache = c - return q -} - -// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. -func (q *Query) WithStrictBuiltinErrors(yes bool) *Query { - q.strictBuiltinErrors = yes - return q -} - -// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors -// encountered during evaluation. This error slice can be inspected after evaluation to determine -// which built-in function errors occurred. -func (q *Query) WithBuiltinErrorList(list *[]Error) *Query { - q.builtinErrorList = list - return q -} - -// WithResolver configures an external resolver to use for the given ref. -func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query { - q.external.Put(ref, r) - return q -} - -func (q *Query) WithPrintHook(h print.Hook) *Query { - q.printHook = h - return q -} - -// WithDistributedTracingOpts sets the options to be used by distributed tracing. -func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query { - q.tracingOpts = tr - return q -} - -// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization -// applied when reading objects from the store. It will result in higher memory -// usage and should only be used temporarily while adjusting code that breaks -// because of the optimization. -func (q *Query) WithStrictObjects(yes bool) *Query { - q.strictObjects = yes - return q -} - -// WithVirtualCache sets the VirtualCache to use during evaluation. This is -// optional, and if not set, the default cache is used. -func (q *Query) WithVirtualCache(vc VirtualCache) *Query { - q.virtualCache = vc - return q -} - -// PartialRun executes partial evaluation on the query with respect to unknown -// values. Partial evaluation attempts to evaluate as much of the query as -// possible without requiring values for the unknowns set on the query. The -// result of partial evaluation is a new set of queries that can be evaluated -// once the unknown value is known. In addition to new queries, partial -// evaluation may produce additional support modules that should be used in -// conjunction with the partially evaluated queries. -func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) { - if q.partialNamespace == "" { - q.partialNamespace = "partial" // lazily initialize partial namespace - } - if q.seed == nil { - q.seed = rand.Reader - } - if !q.time.IsZero() { - q.time = time.Now() - } - if q.metrics == nil { - q.metrics = metrics.New() - } - - f := &queryIDFactory{} - b := newBindings(0, q.instr) - - var vc VirtualCache - if q.virtualCache != nil { - vc = q.virtualCache - } else { - vc = NewVirtualCache() - } - - e := &eval{ - ctx: ctx, - metrics: q.metrics, - seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), - cancel: q.cancel, - query: q.query, - queryCompiler: q.queryCompiler, - queryIDFact: f, - queryID: f.Next(), - bindings: b, - compiler: q.compiler, - store: q.store, - baseCache: newBaseCache(), - targetStack: newRefStack(), - txn: q.txn, - input: q.input, - external: q.external, - tracers: q.tracers, - traceEnabled: len(q.tracers) > 0, - plugTraceVars: q.plugTraceVars, - instr: q.instr, - builtins: q.builtins, - builtinCache: builtins.Cache{}, - functionMocks: newFunctionMocksStack(), - interQueryBuiltinCache: q.interQueryBuiltinCache, - ndBuiltinCache: q.ndBuiltinCache, - virtualCache: vc, - comprehensionCache: newComprehensionCache(), - saveSet: newSaveSet(q.unknowns, b, q.instr), - saveStack: newSaveStack(), - saveSupport: newSaveSupport(), - saveNamespace: ast.StringTerm(q.partialNamespace), - skipSaveNamespace: q.skipSaveNamespace, - inliningControl: &inliningControl{ - shallow: q.shallowInlining, - }, - genvarprefix: q.genvarprefix, - runtime: q.runtime, - indexing: q.indexing, - earlyExit: q.earlyExit, - builtinErrors: &builtinErrors{}, - printHook: q.printHook, - strictObjects: q.strictObjects, - } - - if len(q.disableInlining) > 0 { - e.inliningControl.PushDisable(q.disableInlining, false) - } - - e.caller = e - q.metrics.Timer(metrics.RegoPartialEval).Start() - defer q.metrics.Timer(metrics.RegoPartialEval).Stop() - - livevars := ast.NewVarSet() - for _, t := range q.unknowns { - switch v := t.Value.(type) { - case ast.Var: - livevars.Add(v) - case ast.Ref: - livevars.Add(v[0].Value.(ast.Var)) - } - } - - ast.WalkVars(q.query, func(x ast.Var) bool { - if !x.IsGenerated() { - livevars.Add(x) - } - return false - }) - - p := copypropagation.New(livevars).WithCompiler(q.compiler) - - err = e.Run(func(e *eval) error { - - // Build output from saved expressions. - body := ast.NewBody() - - for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] { - body.Append(elem.Plug(e.bindings)) - } - - // Include bindings as exprs so that when caller evals the result, they - // can obtain values for the vars in their query. - bindingExprs := []*ast.Expr{} - _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error { - bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b)) - return nil - }) // cannot return error - - // Sort binding expressions so that results are deterministic. - sort.Slice(bindingExprs, func(i, j int) bool { - return bindingExprs[i].Compare(bindingExprs[j]) < 0 - }) - - for i := range bindingExprs { - body.Append(bindingExprs[i]) - } - - // Skip this rule body if it fails to type-check. - // Type-checking failure means the rule body will never succeed. - if !e.compiler.PassesTypeCheck(body) { - return nil - } - - if !q.shallowInlining { - body = applyCopyPropagation(p, e.instr, body) - } - - partials = append(partials, body) - return nil - }) - - support = e.saveSupport.List() - - if len(e.builtinErrors.errs) > 0 { - if q.strictBuiltinErrors { - err = e.builtinErrors.errs[0] - } else if q.builtinErrorList != nil { - // If a builtinErrorList has been supplied, we must use pointer indirection - // to append to it. builtinErrorList is a slice pointer so that errors can be - // appended to it without returning a new slice and changing the interface - // of PartialRun. - for _, err := range e.builtinErrors.errs { - if tdError, ok := err.(*Error); ok { - *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) - } else { - *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ - Code: BuiltinErr, - Message: err.Error(), - }) - } - } - } - } - - for i := range support { - sort.Slice(support[i].Rules, func(j, k int) bool { - return support[i].Rules[j].Compare(support[i].Rules[k]) < 0 - }) - } - - return partials, support, err -} - -// Run is a wrapper around Iter that accumulates query results and returns them -// in one shot. -func (q *Query) Run(ctx context.Context) (QueryResultSet, error) { - qrs := QueryResultSet{} - return qrs, q.Iter(ctx, func(qr QueryResult) error { - qrs = append(qrs, qr) - return nil - }) -} - -// Iter executes the query and invokes the iter function with query results -// produced by evaluating the query. -func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { - // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state - if q.compiler != nil && len(q.compiler.Errors) > 0 { - return &Error{ - Code: InternalErr, - Message: "compiler has errors", - } - } - - if q.seed == nil { - q.seed = rand.Reader - } - if q.time.IsZero() { - q.time = time.Now() - } - if q.metrics == nil { - q.metrics = metrics.New() - } - - f := &queryIDFactory{} - - var vc VirtualCache - if q.virtualCache != nil { - vc = q.virtualCache - } else { - vc = NewVirtualCache() - } - - e := &eval{ - ctx: ctx, - metrics: q.metrics, - seed: q.seed, - time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), - cancel: q.cancel, - query: q.query, - queryCompiler: q.queryCompiler, - queryIDFact: f, - queryID: f.Next(), - bindings: newBindings(0, q.instr), - compiler: q.compiler, - store: q.store, - baseCache: newBaseCache(), - targetStack: newRefStack(), - txn: q.txn, - input: q.input, - external: q.external, - tracers: q.tracers, - traceEnabled: len(q.tracers) > 0, - plugTraceVars: q.plugTraceVars, - instr: q.instr, - builtins: q.builtins, - builtinCache: builtins.Cache{}, - functionMocks: newFunctionMocksStack(), - interQueryBuiltinCache: q.interQueryBuiltinCache, - ndBuiltinCache: q.ndBuiltinCache, - virtualCache: vc, - comprehensionCache: newComprehensionCache(), - genvarprefix: q.genvarprefix, - runtime: q.runtime, - indexing: q.indexing, - earlyExit: q.earlyExit, - builtinErrors: &builtinErrors{}, - printHook: q.printHook, - tracingOpts: q.tracingOpts, - strictObjects: q.strictObjects, - } - e.caller = e - q.metrics.Timer(metrics.RegoQueryEval).Start() - err := e.Run(func(e *eval) error { - qr := QueryResult{} - _ = e.bindings.Iter(nil, func(k, v *ast.Term) error { - qr[k.Value.(ast.Var)] = v - return nil - }) // cannot return error - return iter(qr) - }) - - if len(e.builtinErrors.errs) > 0 { - if q.strictBuiltinErrors { - err = e.builtinErrors.errs[0] - } else if q.builtinErrorList != nil { - // If a builtinErrorList has been supplied, we must use pointer indirection - // to append to it. builtinErrorList is a slice pointer so that errors can be - // appended to it without returning a new slice and changing the interface - // of Iter. - for _, err := range e.builtinErrors.errs { - if tdError, ok := err.(*Error); ok { - *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) - } else { - *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ - Code: BuiltinErr, - Message: err.Error(), - }) - } - } - } - } - - q.metrics.Timer(metrics.RegoQueryEval).Stop() - return err + return v1.NewQuery(query) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/regex.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/regex.go deleted file mode 100644 index 877f19e23..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/regex.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "regexp" - "sync" - - gintersect "github.com/yashtewari/glob-intersection" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const regexCacheMaxSize = 100 - -var regexpCacheLock = sync.Mutex{} -var regexpCache map[string]*regexp.Regexp - -func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - _, err = regexp.Compile(string(s)) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - return iter(ast.BooleanTerm(true)) -} - -func builtinRegexMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - re, err := getRegexp(string(s1)) - if err != nil { - return err - } - return iter(ast.BooleanTerm(re.MatchString(string(s2)))) -} - -func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - pattern, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - match, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - start, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - end, err := builtins.StringOperand(operands[3].Value, 4) - if err != nil { - return err - } - if len(start) != 1 { - return fmt.Errorf("start delimiter has to be exactly one character long but is %d long", len(start)) - } - if len(end) != 1 { - return fmt.Errorf("end delimiter has to be exactly one character long but is %d long", len(start)) - } - re, err := getRegexpTemplate(string(pattern), string(start)[0], string(end)[0]) - if err != nil { - return err - } - return iter(ast.BooleanTerm(re.MatchString(string(match)))) -} - -func builtinRegexSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - re, err := getRegexp(string(s1)) - if err != nil { - return err - } - - elems := re.Split(string(s2), -1) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.NewTerm(ast.NewArray(arr...))) -} - -func getRegexp(pat string) (*regexp.Regexp, error) { - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() - re, ok := regexpCache[pat] - if !ok { - var err error - re, err = regexp.Compile(pat) - if err != nil { - return nil, err - } - if len(regexpCache) >= regexCacheMaxSize { - // Delete a (semi-)random key to make room for the new one. - for k := range regexpCache { - delete(regexpCache, k) - break - } - } - regexpCache[pat] = re - } - return re, nil -} - -func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) { - regexpCacheLock.Lock() - defer regexpCacheLock.Unlock() - re, ok := regexpCache[pat] - if !ok { - var err error - re, err = compileRegexTemplate(pat, delimStart, delimEnd) - if err != nil { - return nil, err - } - regexpCache[pat] = re - } - return re, nil -} - -func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - ne, err := gintersect.NonEmpty(string(s1), string(s2)) - if err != nil { - return err - } - return iter(ast.BooleanTerm(ne)) -} - -func builtinRegexFind(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - n, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - re, err := getRegexp(string(s1)) - if err != nil { - return err - } - - elems := re.FindAllString(string(s2), n) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.NewTerm(ast.NewArray(arr...))) -} - -func builtinRegexFindAllStringSubmatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s1, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - s2, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - n, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - re, err := getRegexp(string(s1)) - if err != nil { - return err - } - matches := re.FindAllStringSubmatch(string(s2), n) - - outer := make([]*ast.Term, len(matches)) - for i := range matches { - inner := make([]*ast.Term, len(matches[i])) - for j := range matches[i] { - inner[j] = ast.StringTerm(matches[i][j]) - } - outer[i] = ast.NewTerm(ast.NewArray(inner...)) - } - - return iter(ast.NewTerm(ast.NewArray(outer...))) -} - -func builtinRegexReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - pattern, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - value, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - - re, err := getRegexp(string(pattern)) - if err != nil { - return err - } - - res := re.ReplaceAllString(string(base), string(value)) - - return iter(ast.StringTerm(res)) -} - -func init() { - regexpCache = map[string]*regexp.Regexp{} - RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid) - RegisterBuiltinFunc(ast.RegexMatch.Name, builtinRegexMatch) - RegisterBuiltinFunc(ast.RegexMatchDeprecated.Name, builtinRegexMatch) - RegisterBuiltinFunc(ast.RegexSplit.Name, builtinRegexSplit) - RegisterBuiltinFunc(ast.GlobsMatch.Name, builtinGlobsMatch) - RegisterBuiltinFunc(ast.RegexTemplateMatch.Name, builtinRegexMatchTemplate) - RegisterBuiltinFunc(ast.RegexFind.Name, builtinRegexFind) - RegisterBuiltinFunc(ast.RegexFindAllStringSubmatch.Name, builtinRegexFindAllStringSubmatch) - RegisterBuiltinFunc(ast.RegexReplace.Name, builtinRegexReplace) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/resolver.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/resolver.go deleted file mode 100644 index 5ed6c1e44..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/resolver.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/resolver" -) - -type resolverTrie struct { - r resolver.Resolver - children map[ast.Value]*resolverTrie -} - -func newResolverTrie() *resolverTrie { - return &resolverTrie{children: map[ast.Value]*resolverTrie{}} -} - -func (t *resolverTrie) Put(ref ast.Ref, r resolver.Resolver) { - node := t - for _, t := range ref { - child, ok := node.children[t.Value] - if !ok { - child = &resolverTrie{children: map[ast.Value]*resolverTrie{}} - node.children[t.Value] = child - } - node = child - } - node.r = r -} - -func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { - e.metrics.Timer(metrics.RegoExternalResolve).Start() - defer e.metrics.Timer(metrics.RegoExternalResolve).Stop() - node := t - for i, t := range ref { - child, ok := node.children[t.Value] - if !ok { - return nil, nil - } - node = child - if node.r != nil { - in := resolver.Input{ - Ref: ref[:i+1], - Input: e.input, - Metrics: e.metrics, - } - e.traceWasm(e.query[e.index], &in.Ref) - if e.data != nil { - return nil, errInScopeWithStmt - } - result, err := node.r.Eval(e.ctx, in) - if err != nil { - return nil, err - } - if result.Value == nil { - return nil, nil - } - val, err := result.Value.Find(ref[i+1:]) - if err != nil { - return nil, nil - } - return val, nil - } - } - return node.mktree(e, resolver.Input{ - Ref: ref, - Input: e.input, - Metrics: e.metrics, - }) -} - -func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { - if t.r != nil { - e.traceWasm(e.query[e.index], &in.Ref) - if e.data != nil { - return nil, errInScopeWithStmt - } - result, err := t.r.Eval(e.ctx, in) - if err != nil { - return nil, err - } - if result.Value == nil { - return nil, nil - } - return result.Value, nil - } - obj := ast.NewObject() - for k, child := range t.children { - v, err := child.mktree(e, resolver.Input{Ref: append(in.Ref, ast.NewTerm(k)), Input: in.Input, Metrics: in.Metrics}) - if err != nil { - return nil, err - } - if v != nil { - obj.Insert(ast.NewTerm(k), ast.NewTerm(v)) - } - } - return obj, nil -} - -var errInScopeWithStmt = &Error{ - Code: InternalErr, - Message: "wasm cannot be executed when 'with' statements are in-scope", -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/semver.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/semver.go deleted file mode 100644 index 7bb7b9c18..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/semver.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/semver" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - versionStringA, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - versionStringB, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - versionA, err := semver.NewVersion(string(versionStringA)) - if err != nil { - return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA) - } - versionB, err := semver.NewVersion(string(versionStringB)) - if err != nil { - return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB) - } - - result := versionA.Compare(*versionB) - - return iter(ast.IntNumberTerm(result)) -} - -func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - versionString, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return iter(ast.BooleanTerm(false)) - } - - result := true - - _, err = semver.NewVersion(string(versionString)) - if err != nil { - result = false - } - - return iter(ast.BooleanTerm(result)) -} - -func init() { - RegisterBuiltinFunc(ast.SemVerCompare.Name, builtinSemVerCompare) - RegisterBuiltinFunc(ast.SemVerIsValid.Name, builtinSemVerIsValid) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/strings.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/strings.go deleted file mode 100644 index d9e4a55e5..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/strings.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - "math/big" - "sort" - "strings" - - "github.com/tchap/go-patricia/v2/patricia" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a, b := operands[0].Value, operands[1].Value - - var strs []string - switch a := a.(type) { - case ast.String: - strs = []string{string(a)} - case *ast.Array, ast.Set: - var err error - strs, err = builtins.StringSliceOperand(a, 1) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(1, a, "string", "set", "array") - } - - var prefixes []string - switch b := b.(type) { - case ast.String: - prefixes = []string{string(b)} - case *ast.Array, ast.Set: - var err error - prefixes, err = builtins.StringSliceOperand(b, 2) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(2, b, "string", "set", "array") - } - - return iter(ast.BooleanTerm(anyStartsWithAny(strs, prefixes))) -} - -func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - a, b := operands[0].Value, operands[1].Value - - var strsReversed []string - switch a := a.(type) { - case ast.String: - strsReversed = []string{reverseString(string(a))} - case *ast.Array, ast.Set: - strs, err := builtins.StringSliceOperand(a, 1) - if err != nil { - return err - } - strsReversed = make([]string, len(strs)) - for i := range strs { - strsReversed[i] = reverseString(strs[i]) - } - default: - return builtins.NewOperandTypeErr(1, a, "string", "set", "array") - } - - var suffixesReversed []string - switch b := b.(type) { - case ast.String: - suffixesReversed = []string{reverseString(string(b))} - case *ast.Array, ast.Set: - suffixes, err := builtins.StringSliceOperand(b, 2) - if err != nil { - return err - } - suffixesReversed = make([]string, len(suffixes)) - for i := range suffixes { - suffixesReversed[i] = reverseString(suffixes[i]) - } - default: - return builtins.NewOperandTypeErr(2, b, "string", "set", "array") - } - - return iter(ast.BooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed))) -} - -func anyStartsWithAny(strs []string, prefixes []string) bool { - if len(strs) == 0 || len(prefixes) == 0 { - return false - } - if len(strs) == 1 && len(prefixes) == 1 { - return strings.HasPrefix(strs[0], prefixes[0]) - } - - trie := patricia.NewTrie() - for i := 0; i < len(strs); i++ { - trie.Insert([]byte(strs[i]), true) - } - - for i := 0; i < len(prefixes); i++ { - if trie.MatchSubtree([]byte(prefixes[i])) { - return true - } - } - - return false -} - -func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - input, err := builtins.NumberOperand(operands[0].Value, 1) - if err != nil { - return err - } - - base, err := builtins.NumberOperand(operands[1].Value, 2) - if err != nil { - return err - } - - var format string - switch base { - case ast.Number("2"): - format = "%b" - case ast.Number("8"): - format = "%o" - case ast.Number("10"): - format = "%d" - case ast.Number("16"): - format = "%x" - default: - return builtins.NewOperandEnumErr(2, "2", "8", "10", "16") - } - - f := builtins.NumberToFloat(input) - i, _ := f.Int(nil) - - return iter(ast.StringTerm(fmt.Sprintf(format, i))) -} - -func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - join, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - strs := []string{} - - switch b := operands[1].Value.(type) { - case *ast.Array: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") - } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err - } - case ast.Set: - err := b.Iter(func(x *ast.Term) error { - s, ok := x.Value.(ast.String) - if !ok { - return builtins.NewOperandElementErr(2, operands[1].Value, x.Value, "string") - } - strs = append(strs, string(s)) - return nil - }) - if err != nil { - return err - } - default: - return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array") - } - - return iter(ast.StringTerm(strings.Join(strs, string(join)))) -} - -func runesEqual(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - search, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - if len(string(search)) == 0 { - return fmt.Errorf("empty search character") - } - - baseRunes := []rune(string(base)) - searchRunes := []rune(string(search)) - searchLen := len(searchRunes) - - for i, r := range baseRunes { - if len(baseRunes) >= i+searchLen { - if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - return iter(ast.IntNumberTerm(i)) - } - } else { - break - } - } - - return iter(ast.IntNumberTerm(-1)) -} - -func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - search, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - if len(string(search)) == 0 { - return fmt.Errorf("empty search character") - } - - baseRunes := []rune(string(base)) - searchRunes := []rune(string(search)) - searchLen := len(searchRunes) - - var arr []*ast.Term - for i, r := range baseRunes { - if len(baseRunes) >= i+searchLen { - if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { - arr = append(arr, ast.IntNumberTerm(i)) - } - } else { - break - } - } - - return iter(ast.ArrayTerm(arr...)) -} - -func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - base, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - runes := []rune(base) - - startIndex, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } else if startIndex >= len(runes) { - return iter(ast.StringTerm("")) - } else if startIndex < 0 { - return fmt.Errorf("negative offset") - } - - length, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - var s ast.String - if length < 0 { - s = ast.String(runes[startIndex:]) - } else { - upto := startIndex + length - if len(runes) < upto { - upto = len(runes) - } - s = ast.String(runes[startIndex:upto]) - } - - return iter(ast.NewTerm(s)) -} - -func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - substr, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.Contains(string(s), string(substr)))) -} - -func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - substr, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - baseTerm := string(s) - searchTerm := string(substr) - - count := strings.Count(baseTerm, searchTerm) - - return iter(ast.IntNumberTerm(count)) -} - -func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - prefix, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.HasPrefix(string(s), string(prefix)))) -} - -func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - suffix, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.BooleanTerm(strings.HasSuffix(string(s), string(suffix)))) -} - -func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.ToLower(string(s)))) -} - -func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.ToUpper(string(s)))) -} - -func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - d, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - elems := strings.Split(string(s), string(d)) - arr := make([]*ast.Term, len(elems)) - for i := range elems { - arr[i] = ast.StringTerm(elems[i]) - } - return iter(ast.ArrayTerm(arr...)) -} - -func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - old, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - n, err := builtins.StringOperand(operands[2].Value, 3) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.Replace(string(s), string(old), string(n), -1))) -} - -func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - patterns, err := builtins.ObjectOperand(operands[0].Value, 1) - if err != nil { - return err - } - keys := patterns.Keys() - sort.Slice(keys, func(i, j int) bool { return ast.Compare(keys[i].Value, keys[j].Value) < 0 }) - - s, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - oldnewArr := make([]string, 0, len(keys)*2) - for _, k := range keys { - keyVal, ok := k.Value.(ast.String) - if !ok { - return builtins.NewOperandErr(1, "non-string key found in pattern object") - } - val := patterns.Get(k) // cannot be nil - strVal, ok := val.Value.(ast.String) - if !ok { - return builtins.NewOperandErr(1, "non-string value found in pattern object") - } - oldnewArr = append(oldnewArr, string(keyVal), string(strVal)) - } - if err != nil { - return err - } - - r := strings.NewReplacer(oldnewArr...) - replaced := r.Replace(string(s)) - - return iter(ast.StringTerm(replaced)) -} - -func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.Trim(string(s), string(c)))) -} - -func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimLeft(string(s), string(c)))) -} - -func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - pre, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimPrefix(string(s), string(pre)))) -} - -func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - c, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimRight(string(s), string(c)))) -} - -func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - suf, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimSuffix(string(s), string(suf)))) -} - -func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(strings.TrimSpace(string(s)))) -} - -func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - astArr, ok := operands[1].Value.(*ast.Array) - if !ok { - return builtins.NewOperandTypeErr(2, operands[1].Value, "array") - } - - args := make([]interface{}, astArr.Len()) - - for i := range args { - switch v := astArr.Elem(i).Value.(type) { - case ast.Number: - if n, ok := v.Int(); ok { - args[i] = n - } else if b, ok := new(big.Int).SetString(v.String(), 10); ok { - args[i] = b - } else if f, ok := v.Float64(); ok { - args[i] = f - } else { - args[i] = v.String() - } - case ast.String: - args[i] = string(v) - default: - args[i] = astArr.Elem(i).String() - } - } - - return iter(ast.StringTerm(fmt.Sprintf(string(s), args...))) -} - -func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - s, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - return iter(ast.StringTerm(reverseString(string(s)))) -} - -func reverseString(str string) string { - sRunes := []rune(str) - length := len(sRunes) - reversedRunes := make([]rune, length) - - for index, r := range sRunes { - reversedRunes[length-index-1] = r - } - - return string(reversedRunes) -} - -func init() { - RegisterBuiltinFunc(ast.FormatInt.Name, builtinFormatInt) - RegisterBuiltinFunc(ast.Concat.Name, builtinConcat) - RegisterBuiltinFunc(ast.IndexOf.Name, builtinIndexOf) - RegisterBuiltinFunc(ast.IndexOfN.Name, builtinIndexOfN) - RegisterBuiltinFunc(ast.Substring.Name, builtinSubstring) - RegisterBuiltinFunc(ast.Contains.Name, builtinContains) - RegisterBuiltinFunc(ast.StringCount.Name, builtinStringCount) - RegisterBuiltinFunc(ast.StartsWith.Name, builtinStartsWith) - RegisterBuiltinFunc(ast.EndsWith.Name, builtinEndsWith) - RegisterBuiltinFunc(ast.Upper.Name, builtinUpper) - RegisterBuiltinFunc(ast.Lower.Name, builtinLower) - RegisterBuiltinFunc(ast.Split.Name, builtinSplit) - RegisterBuiltinFunc(ast.Replace.Name, builtinReplace) - RegisterBuiltinFunc(ast.ReplaceN.Name, builtinReplaceN) - RegisterBuiltinFunc(ast.Trim.Name, builtinTrim) - RegisterBuiltinFunc(ast.TrimLeft.Name, builtinTrimLeft) - RegisterBuiltinFunc(ast.TrimPrefix.Name, builtinTrimPrefix) - RegisterBuiltinFunc(ast.TrimRight.Name, builtinTrimRight) - RegisterBuiltinFunc(ast.TrimSuffix.Name, builtinTrimSuffix) - RegisterBuiltinFunc(ast.TrimSpace.Name, builtinTrimSpace) - RegisterBuiltinFunc(ast.Sprintf.Name, builtinSprintf) - RegisterBuiltinFunc(ast.AnyPrefixMatch.Name, builtinAnyPrefixMatch) - RegisterBuiltinFunc(ast.AnySuffixMatch.Name, builtinAnySuffixMatch) - RegisterBuiltinFunc(ast.StringReverse.Name, builtinReverse) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/time.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/time.go deleted file mode 100644 index ba3efc75d..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/time.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "encoding/json" - "fmt" - "math" - "math/big" - "strconv" - "sync" - "time" - _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -var tzCache map[string]*time.Location -var tzCacheMutex *sync.Mutex - -// 1677-09-21T00:12:43.145224192-00:00 -var minDateAllowedForNsConversion = time.Unix(0, math.MinInt64) - -// 2262-04-11T23:47:16.854775807-00:00 -var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64) - -func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error { - if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) { - return fmt.Errorf("time outside of valid range") - } - - return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano())))) -} - -func builtinTimeNowNanos(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { - return iter(bctx.Time) -} - -func builtinTimeParseNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - format, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - value, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - - formatStr := string(format) - // look for the formatStr in our acceptedTimeFormats and - // use the constant instead if it matches - if f, ok := acceptedTimeFormats[formatStr]; ok { - formatStr = f - } - result, err := time.Parse(formatStr, string(value)) - if err != nil { - return err - } - - return toSafeUnixNano(result, iter) -} - -func builtinTimeParseRFC3339Nanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - value, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - result, err := time.Parse(time.RFC3339, string(value)) - if err != nil { - return err - } - - return toSafeUnixNano(result, iter) -} -func builtinParseDurationNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - duration, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - value, err := time.ParseDuration(string(duration)) - if err != nil { - return err - } - return iter(ast.NumberTerm(int64ToJSONNumber(int64(value)))) -} - -// Represent exposed constants for formatting from the stdlib time pkg -var acceptedTimeFormats = map[string]string{ - "ANSIC": time.ANSIC, - "UnixDate": time.UnixDate, - "RubyDate": time.RubyDate, - "RFC822": time.RFC822, - "RFC822Z": time.RFC822Z, - "RFC850": time.RFC850, - "RFC1123": time.RFC1123, - "RFC1123Z": time.RFC1123Z, - "RFC3339": time.RFC3339, - "RFC3339Nano": time.RFC3339Nano, -} - -func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, layout, err := tzTime(operands[0].Value) - if err != nil { - return err - } - // Using RFC3339Nano time formatting as default - if layout == "" { - layout = time.RFC3339Nano - } else if layoutStr, ok := acceptedTimeFormats[layout]; ok { - // if we can find a constant specified, use the constant - layout = layoutStr - } - // otherwise try to treat the fmt string as a datetime fmt string - - timestamp := t.Format(layout) - return iter(ast.StringTerm(timestamp)) -} - -func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - year, month, day := t.Date() - result := ast.NewArray(ast.IntNumberTerm(year), ast.IntNumberTerm(int(month)), ast.IntNumberTerm(day)) - return iter(ast.NewTerm(result)) -} - -func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - hour, minute, second := t.Clock() - result := ast.NewArray(ast.IntNumberTerm(hour), ast.IntNumberTerm(minute), ast.IntNumberTerm(second)) - return iter(ast.NewTerm(result)) -} - -func builtinWeekday(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - weekday := t.Weekday().String() - return iter(ast.StringTerm(weekday)) -} - -func builtinAddDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - - years, err := builtins.IntOperand(operands[1].Value, 2) - if err != nil { - return err - } - - months, err := builtins.IntOperand(operands[2].Value, 3) - if err != nil { - return err - } - - days, err := builtins.IntOperand(operands[3].Value, 4) - if err != nil { - return err - } - - result := t.AddDate(years, months, days) - - return toSafeUnixNano(result, iter) -} - -func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - t1, _, err := tzTime(operands[0].Value) - if err != nil { - return err - } - t2, _, err := tzTime(operands[1].Value) - if err != nil { - return err - } - - // The following implementation of this function is taken - // from https://github.com/icza/gox licensed under Apache 2.0. - // The only modification made is to variable names. - // - // For details, see https://stackoverflow.com/a/36531443/1705598 - // - // Copyright 2021 icza - // BEGIN REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT - if t1.Location() != t2.Location() { - t2 = t2.In(t1.Location()) - } - if t1.After(t2) { - t1, t2 = t2, t1 - } - y1, M1, d1 := t1.Date() - y2, M2, d2 := t2.Date() - - h1, m1, s1 := t1.Clock() - h2, m2, s2 := t2.Clock() - - year := y2 - y1 - month := int(M2 - M1) - day := d2 - d1 - hour := h2 - h1 - min := m2 - m1 - sec := s2 - s1 - - // Normalize negative values - if sec < 0 { - sec += 60 - min-- - } - if min < 0 { - min += 60 - hour-- - } - if hour < 0 { - hour += 24 - day-- - } - if day < 0 { - // Days in month: - t := time.Date(y1, M1, 32, 0, 0, 0, 0, time.UTC) - day += 32 - t.Day() - month-- - } - if month < 0 { - month += 12 - year-- - } - // END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT - - return iter(ast.ArrayTerm(ast.IntNumberTerm(year), ast.IntNumberTerm(month), ast.IntNumberTerm(day), - ast.IntNumberTerm(hour), ast.IntNumberTerm(min), ast.IntNumberTerm(sec))) -} - -func tzTime(a ast.Value) (t time.Time, lay string, err error) { - var nVal ast.Value - loc := time.UTC - layout := "" - switch va := a.(type) { - case *ast.Array: - if va.Len() == 0 { - return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") - } - - nVal, err = builtins.NumberOperand(va.Elem(0).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - - if va.Len() > 1 { - tzVal, err := builtins.StringOperand(va.Elem(1).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - - tzName := string(tzVal) - - switch tzName { - case "", "UTC": - // loc is already UTC - - case "Local": - loc = time.Local - - default: - var ok bool - - tzCacheMutex.Lock() - loc, ok = tzCache[tzName] - - if !ok { - loc, err = time.LoadLocation(tzName) - if err != nil { - tzCacheMutex.Unlock() - return time.Time{}, layout, err - } - tzCache[tzName] = loc - } - tzCacheMutex.Unlock() - } - } - - if va.Len() > 2 { - lay, err := builtins.StringOperand(va.Elem(2).Value, 1) - if err != nil { - return time.Time{}, layout, err - } - layout = string(lay) - } - - case ast.Number: - nVal = a - - default: - return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") - } - - value, err := builtins.NumberOperand(nVal, 1) - if err != nil { - return time.Time{}, layout, err - } - - f := builtins.NumberToFloat(value) - i64, acc := f.Int64() - if acc != big.Exact { - return time.Time{}, layout, fmt.Errorf("timestamp too big") - } - - t = time.Unix(0, i64).In(loc) - - return t, layout, nil -} - -func int64ToJSONNumber(i int64) json.Number { - return json.Number(strconv.FormatInt(i, 10)) -} - -func init() { - RegisterBuiltinFunc(ast.NowNanos.Name, builtinTimeNowNanos) - RegisterBuiltinFunc(ast.ParseRFC3339Nanos.Name, builtinTimeParseRFC3339Nanos) - RegisterBuiltinFunc(ast.ParseNanos.Name, builtinTimeParseNanos) - RegisterBuiltinFunc(ast.ParseDurationNanos.Name, builtinParseDurationNanos) - RegisterBuiltinFunc(ast.Format.Name, builtinFormat) - RegisterBuiltinFunc(ast.Date.Name, builtinDate) - RegisterBuiltinFunc(ast.Clock.Name, builtinClock) - RegisterBuiltinFunc(ast.Weekday.Name, builtinWeekday) - RegisterBuiltinFunc(ast.AddDate.Name, builtinAddDate) - RegisterBuiltinFunc(ast.Diff.Name, builtinDiff) - tzCacheMutex = &sync.Mutex{} - tzCache = make(map[string]*time.Location) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/trace.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/trace.go index 277c94b62..4d4cc295e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/trace.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/topdown/trace.go @@ -5,898 +5,108 @@ package topdown import ( - "bytes" - "fmt" "io" - "slices" - "strings" - iStrs "github.com/open-policy-agent/opa/internal/strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -const ( - minLocationWidth = 5 // len("query") - maxIdealLocationWidth = 64 - columnPadding = 4 - maxExprVarWidth = 32 - maxPrettyExprVarWidth = 64 + v1 "github.com/open-policy-agent/opa/v1/topdown" ) // Op defines the types of tracing events. -type Op string +type Op = v1.Op const ( // EnterOp is emitted when a new query is about to be evaluated. - EnterOp Op = "Enter" + EnterOp = v1.EnterOp // ExitOp is emitted when a query has evaluated to true. - ExitOp Op = "Exit" + ExitOp = v1.ExitOp // EvalOp is emitted when an expression is about to be evaluated. - EvalOp Op = "Eval" + EvalOp = v1.EvalOp // RedoOp is emitted when an expression, rule, or query is being re-evaluated. - RedoOp Op = "Redo" + RedoOp = v1.RedoOp // SaveOp is emitted when an expression is saved instead of evaluated // during partial evaluation. - SaveOp Op = "Save" + SaveOp = v1.SaveOp // FailOp is emitted when an expression evaluates to false. - FailOp Op = "Fail" + FailOp = v1.FailOp // DuplicateOp is emitted when a query has produced a duplicate value. The search // will stop at the point where the duplicate was emitted and backtrack. - DuplicateOp Op = "Duplicate" + DuplicateOp = v1.DuplicateOp // NoteOp is emitted when an expression invokes a tracing built-in function. - NoteOp Op = "Note" + NoteOp = v1.NoteOp // IndexOp is emitted during an expression evaluation to represent lookup // matches. - IndexOp Op = "Index" + IndexOp = v1.IndexOp // WasmOp is emitted when resolving a ref using an external // Resolver. - WasmOp Op = "Wasm" + WasmOp = v1.WasmOp // UnifyOp is emitted when two terms are unified. Node will be set to an // equality expression with the two terms. This Node will not have location // info. - UnifyOp Op = "Unify" - FailedAssertionOp Op = "FailedAssertion" + UnifyOp = v1.UnifyOp + FailedAssertionOp = v1.FailedAssertionOp ) // VarMetadata provides some user facing information about // a variable in some policy. -type VarMetadata struct { - Name ast.Var `json:"name"` - Location *ast.Location `json:"location"` -} +type VarMetadata = v1.VarMetadata // Event contains state associated with a tracing event. -type Event struct { - Op Op // Identifies type of event. - Node ast.Node // Contains AST node relevant to the event. - Location *ast.Location // The location of the Node this event relates to. - QueryID uint64 // Identifies the query this event belongs to. - ParentID uint64 // Identifies the parent query this event belongs to. - Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event. - LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event. - Message string // Contains message for Note events. - Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations. - - input *ast.Term - bindings *bindings - localVirtualCacheSnapshot *ast.ValueMap -} - -func (evt *Event) WithInput(input *ast.Term) *Event { - evt.input = input - return evt -} - -// HasRule returns true if the Event contains an ast.Rule. -func (evt *Event) HasRule() bool { - _, ok := evt.Node.(*ast.Rule) - return ok -} - -// HasBody returns true if the Event contains an ast.Body. -func (evt *Event) HasBody() bool { - _, ok := evt.Node.(ast.Body) - return ok -} - -// HasExpr returns true if the Event contains an ast.Expr. -func (evt *Event) HasExpr() bool { - _, ok := evt.Node.(*ast.Expr) - return ok -} - -// Equal returns true if this event is equal to the other event. -func (evt *Event) Equal(other *Event) bool { - if evt.Op != other.Op { - return false - } - if evt.QueryID != other.QueryID { - return false - } - if evt.ParentID != other.ParentID { - return false - } - if !evt.equalNodes(other) { - return false - } - return evt.Locals.Equal(other.Locals) -} - -func (evt *Event) String() string { - return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID) -} - -// Input returns the input object as it was at the event. -func (evt *Event) Input() *ast.Term { - return evt.input -} - -// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when -// the event is emitted rather than on recorded trace events as the bindings are going to be different by then. -func (evt *Event) Plug(term *ast.Term) *ast.Term { - return evt.bindings.Plug(term) -} - -func (evt *Event) equalNodes(other *Event) bool { - switch a := evt.Node.(type) { - case ast.Body: - if b, ok := other.Node.(ast.Body); ok { - return a.Equal(b) - } - case *ast.Rule: - if b, ok := other.Node.(*ast.Rule); ok { - return a.Equal(b) - } - case *ast.Expr: - if b, ok := other.Node.(*ast.Expr); ok { - return a.Equal(b) - } - case nil: - return other.Node == nil - } - return false -} +type Event = v1.Event // Tracer defines the interface for tracing in the top-down evaluation engine. // Deprecated: Use QueryTracer instead. -type Tracer interface { - Enabled() bool - Trace(*Event) -} +type Tracer = v1.Tracer // QueryTracer defines the interface for tracing in the top-down evaluation engine. // The implementation can provide additional configuration to modify the tracing // behavior for query evaluations. -type QueryTracer interface { - Enabled() bool - TraceEvent(Event) - Config() TraceConfig -} +type QueryTracer = v1.QueryTracer // TraceConfig defines some common configuration for Tracer implementations -type TraceConfig struct { - PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer. -} - -// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance. -type legacyTracer struct { - t Tracer -} - -func (l *legacyTracer) Enabled() bool { - return l.t.Enabled() -} - -func (l *legacyTracer) Config() TraceConfig { - return TraceConfig{ - PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables - } -} - -func (l *legacyTracer) TraceEvent(evt Event) { - l.t.Trace(&evt) -} +type TraceConfig = v1.TraceConfig // WrapLegacyTracer will create a new QueryTracer which wraps an // older Tracer instance. func WrapLegacyTracer(tracer Tracer) QueryTracer { - return &legacyTracer{t: tracer} + return v1.WrapLegacyTracer(tracer) } // BufferTracer implements the Tracer and QueryTracer interface by // simply buffering all events received. -type BufferTracer []*Event +type BufferTracer = v1.BufferTracer // NewBufferTracer returns a new BufferTracer. func NewBufferTracer() *BufferTracer { - return &BufferTracer{} -} - -// Enabled always returns true if the BufferTracer is instantiated. -func (b *BufferTracer) Enabled() bool { - return b != nil -} - -// Trace adds the event to the buffer. -// Deprecated: Use TraceEvent instead. -func (b *BufferTracer) Trace(evt *Event) { - *b = append(*b, evt) -} - -// TraceEvent adds the event to the buffer. -func (b *BufferTracer) TraceEvent(evt Event) { - *b = append(*b, &evt) -} - -// Config returns the Tracers standard configuration -func (b *BufferTracer) Config() TraceConfig { - return TraceConfig{PlugLocalVars: true} + return v1.NewBufferTracer() } // PrettyTrace pretty prints the trace to the writer. func PrettyTrace(w io.Writer, trace []*Event) { - PrettyTraceWithOpts(w, trace, PrettyTraceOptions{}) + v1.PrettyTrace(w, trace) } // PrettyTraceWithLocation prints the trace to the writer and includes location information func PrettyTraceWithLocation(w io.Writer, trace []*Event) { - PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true}) -} - -type PrettyTraceOptions struct { - Locations bool // Include location information - ExprVariables bool // Include variables found in the expression - LocalVariables bool // Include all local variables -} - -type traceRow []string - -func (r *traceRow) add(s string) { - *r = append(*r, s) -} - -type traceTable struct { - rows []traceRow - maxWidths []int + v1.PrettyTraceWithLocation(w, trace) } -func (t *traceTable) add(row traceRow) { - t.rows = append(t.rows, row) - for i := range row { - if i >= len(t.maxWidths) { - t.maxWidths = append(t.maxWidths, len(row[i])) - } else if len(row[i]) > t.maxWidths[i] { - t.maxWidths[i] = len(row[i]) - } - } -} - -func (t *traceTable) write(w io.Writer, padding int) { - for _, row := range t.rows { - for i, cell := range row { - width := t.maxWidths[i] + padding - if i < len(row)-1 { - _, _ = fmt.Fprintf(w, "%-*s ", width, cell) - } else { - _, _ = fmt.Fprintf(w, "%s", cell) - } - } - _, _ = fmt.Fprintln(w) - } -} +type PrettyTraceOptions = v1.PrettyTraceOptions func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) { - depths := depths{} - - // FIXME: Can we shorten each location as we process each trace event instead of beforehand? - filePathAliases, _ := getShortenedFileNames(trace) - - table := traceTable{} - - for _, event := range trace { - depth := depths.GetOrSet(event.QueryID, event.ParentID) - row := traceRow{} - - if opts.Locations { - location := formatLocation(event, filePathAliases) - row.add(location) - } - - row.add(formatEvent(event, depth)) - - if opts.ExprVariables { - vars := exprLocalVars(event) - keys := sortedKeys(vars) - - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, k := range keys { - if i > 0 { - buf.WriteString(", ") - } - _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth)) - } - buf.WriteString("}") - row.add(buf.String()) - } - - if opts.LocalVariables { - if locals := event.Locals; locals != nil { - keys := sortedKeys(locals) - - buf := new(bytes.Buffer) - buf.WriteString("{") - for i, k := range keys { - if i > 0 { - buf.WriteString(", ") - } - _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth)) - } - buf.WriteString("}") - row.add(buf.String()) - } else { - row.add("{}") - } - } - - table.add(row) - } - - table.write(w, columnPadding) -} - -func sortedKeys(vm *ast.ValueMap) []ast.Value { - keys := make([]ast.Value, 0, vm.Len()) - vm.Iter(func(k, _ ast.Value) bool { - keys = append(keys, k) - return false - }) - slices.SortFunc(keys, func(a, b ast.Value) int { - return strings.Compare(a.String(), b.String()) - }) - return keys -} - -func exprLocalVars(e *Event) *ast.ValueMap { - vars := ast.NewValueMap() - - findVars := func(term *ast.Term) bool { - //if r, ok := term.Value.(ast.Ref); ok { - // fmt.Printf("ref: %v\n", r) - // //return true - //} - if name, ok := term.Value.(ast.Var); ok { - if meta, ok := e.LocalMetadata[name]; ok { - if val := e.Locals.Get(name); val != nil { - vars.Put(meta.Name, val) - } - } - } - return false - } - - if r, ok := e.Node.(*ast.Rule); ok { - // We're only interested in vars in the head, not the body - ast.WalkTerms(r.Head, findVars) - return vars - } - - // The local cache snapshot only contains a snapshot for those refs present in the event node, - // so they can all be added to the vars map. - e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { - vars.Put(k, v) - return false - }) - - ast.WalkTerms(e.Node, findVars) - - return vars -} - -func formatEvent(event *Event, depth int) string { - padding := formatEventPadding(event, depth) - if event.Op == NoteOp { - return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message) - } - - var details interface{} - if node, ok := event.Node.(*ast.Rule); ok { - details = node.Path() - } else if event.Ref != nil { - details = event.Ref - } else { - details = rewrite(event).Node - } - - template := "%v%v %v" - opts := []interface{}{padding, event.Op, details} - - if event.Message != "" { - template += " %v" - opts = append(opts, event.Message) - } - - return fmt.Sprintf(template, opts...) + v1.PrettyTraceWithOpts(w, trace, opts) } -func formatEventPadding(event *Event, depth int) string { - spaces := formatEventSpaces(event, depth) - if spaces > 1 { - return strings.Repeat("| ", spaces-1) - } - return "" -} - -func formatEventSpaces(event *Event, depth int) int { - switch event.Op { - case EnterOp: - return depth - case RedoOp: - if _, ok := event.Node.(*ast.Expr); !ok { - return depth - } - } - return depth + 1 -} - -// getShortenedFileNames will return a map of file paths to shortened aliases -// that were found in the trace. It also returns the longest location expected -func getShortenedFileNames(trace []*Event) (map[string]string, int) { - // Get a deduplicated list of all file paths - // and the longest file path size - fpAliases := map[string]string{} - var canShorten []string - longestLocation := 0 - for _, event := range trace { - if event.Location != nil { - if event.Location.File != "" { - // length of ":" - curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1 - if curLen > longestLocation { - longestLocation = curLen - } - - if _, ok := fpAliases[event.Location.File]; ok { - continue - } - - canShorten = append(canShorten, event.Location.File) - - // Default to just alias their full path - fpAliases[event.Location.File] = event.Location.File - } else { - // length of ":" - curLen := minLocationWidth + numDigits10(event.Location.Row) + 1 - if curLen > longestLocation { - longestLocation = curLen - } - } - } - } - - if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth { - fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...) - } - - return fpAliases, longestLocation -} - -func numDigits10(n int) int { - if n < 10 { - return 1 - } - return numDigits10(n/10) + 1 -} - -func formatLocation(event *Event, fileAliases map[string]string) string { - - location := event.Location - if location == nil { - return "" - } - - if location.File == "" { - return fmt.Sprintf("query:%v", location.Row) - } - - return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row) -} - -// depths is a helper for computing the depth of an event. Events within the -// same query all have the same depth. The depth of query is -// depth(parent(query))+1. -type depths map[uint64]int - -func (ds depths) GetOrSet(qid uint64, pqid uint64) int { - depth := ds[qid] - if depth == 0 { - depth = ds[pqid] - depth++ - ds[qid] = depth - } - return depth -} - -func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - str, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return handleBuiltinErr(ast.Trace.Name, bctx.Location, err) - } - - if !bctx.TraceEnabled { - return iter(ast.BooleanTerm(true)) - } - - evt := Event{ - Op: NoteOp, - Location: bctx.Location, - QueryID: bctx.QueryID, - ParentID: bctx.ParentID, - Message: string(str), - } - - for i := range bctx.QueryTracers { - bctx.QueryTracers[i].TraceEvent(evt) - } - - return iter(ast.BooleanTerm(true)) -} - -func rewrite(event *Event) *Event { - - cpy := *event - - var node ast.Node - - switch v := event.Node.(type) { - case *ast.Expr: - expr := v.Copy() - - // Hide generated local vars in 'key' position that have not been - // rewritten. - if ev, ok := v.Terms.(*ast.Every); ok { - if kv, ok := ev.Key.Value.(ast.Var); ok { - if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() { - expr.Terms.(*ast.Every).Key = nil - } - } - } - node = expr - case ast.Body: - node = v.Copy() - case *ast.Rule: - node = v.Copy() - } - - _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) { - if meta, ok := cpy.LocalMetadata[v]; ok { - return meta.Name, nil - } - return v, nil - }) - - cpy.Node = node - - return &cpy -} - -type varInfo struct { - VarMetadata - val ast.Value - exprLoc *ast.Location - col int // 0-indexed column -} - -func (v varInfo) Value() string { - if v.val != nil { - return v.val.String() - } - return "undefined" -} - -func (v varInfo) Title() string { - if v.exprLoc != nil && v.exprLoc.Text != nil { - return string(v.exprLoc.Text) - } - return string(v.Name) -} - -func padLocationText(loc *ast.Location) string { - if loc == nil { - return "" - } - - text := string(loc.Text) - - if loc.Col == 0 { - return text - } - - buf := new(bytes.Buffer) - j := 0 - for i := 1; i < loc.Col; i++ { - if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i { - buf.WriteString("\t") - j++ - } else { - buf.WriteString(" ") - } - } - - buf.WriteString(text) - return buf.String() -} - -type PrettyEventOpts struct { - PrettyVars bool -} - -func walkTestTerms(x interface{}, f func(*ast.Term) bool) { - var vis *ast.GenericVisitor - vis = ast.NewGenericVisitor(func(x interface{}) bool { - switch x := x.(type) { - case ast.Call: - for _, t := range x[1:] { - vis.Walk(t) - } - return true - case *ast.Expr: - if x.IsCall() { - for _, o := range x.Operands() { - vis.Walk(o) - } - for i := range x.With { - vis.Walk(x.With[i]) - } - return true - } - case *ast.Term: - return f(x) - case *ast.With: - vis.Walk(x.Value) - return true - } - return false - }) - vis.Walk(x) -} +type PrettyEventOpts = v1.PrettyEventOpts func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error { - if !opts.PrettyVars { - _, _ = fmt.Fprintln(w, padLocationText(e.Location)) - return nil - } - - buf := new(bytes.Buffer) - exprVars := map[string]varInfo{} - - findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool { - return func(term *ast.Term) bool { - if term.Location == nil { - return false - } - - switch v := term.Value.(type) { - case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: - // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars. - return true - case ast.Var: - var info *varInfo - if meta, ok := e.LocalMetadata[v]; ok { - info = &varInfo{ - VarMetadata: meta, - val: e.Locals.Get(v), - exprLoc: term.Location, - } - } else if unknownAreUndefined { - info = &varInfo{ - VarMetadata: VarMetadata{Name: v}, - exprLoc: term.Location, - col: term.Location.Col, - } - } - - if info != nil { - if v, exists := exprVars[info.Title()]; !exists || v.val == nil { - if term.Location != nil { - info.col = term.Location.Col - } - exprVars[info.Title()] = *info - } - } - } - return false - } - } - - expr, ok := e.Node.(*ast.Expr) - if !ok || expr == nil { - return nil - } - - base := expr.BaseCogeneratedExpr() - exprText := padLocationText(base.Location) - buf.WriteString(exprText) - - e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { - var info *varInfo - switch k := k.(type) { - case ast.Ref: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k[0].Location, - col: k[0].Location.Col, - } - case *ast.ArrayComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Term.Location, - col: k.Term.Location.Col, - } - case *ast.SetComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Term.Location, - col: k.Term.Location.Col, - } - case *ast.ObjectComprehension: - info = &varInfo{ - VarMetadata: VarMetadata{Name: ast.Var(k.String())}, - val: v, - exprLoc: k.Key.Location, - col: k.Key.Location.Col, - } - } - - if info != nil { - exprVars[info.Title()] = *info - } - - return false - }) - - // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined', - // since the compiler might have opted out of the necessary rewrite. - walkTestTerms(expr, findVars(!expr.Negated)) - coExprs := expr.CogeneratedExprs() - for _, coExpr := range coExprs { - // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr, - // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr - // hasn't been evaluated yet (the fail happened before it). - walkTestTerms(coExpr, findVars(false)) - } - - printPrettyVars(buf, exprVars) - _, _ = fmt.Fprint(w, buf.String()) - return nil -} - -func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { - containsTabs := false - varRows := make(map[int]interface{}) - for _, info := range exprVars { - if len(info.exprLoc.Tabs) > 0 { - containsTabs = true - } - varRows[info.exprLoc.Row] = nil - } - - if containsTabs && len(varRows) > 1 { - // We can't (currently) reliably point to var locations when they are on different rows that contain tabs. - // So we'll just print them in alphabetical order instead. - byName := make([]varInfo, 0, len(exprVars)) - for _, info := range exprVars { - byName = append(byName, info) - } - slices.SortStableFunc(byName, func(a, b varInfo) int { - return strings.Compare(a.Title(), b.Title()) - }) - - w.WriteString("\n\nWhere:\n") - for _, info := range byName { - w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth))) - } - - return - } - - byCol := make([]varInfo, 0, len(exprVars)) - for _, info := range exprVars { - byCol = append(byCol, info) - } - slices.SortFunc(byCol, func(a, b varInfo) int { - // sort first by column, then by reverse row (to present vars in the same order they appear in the expr) - if a.col == b.col { - if a.exprLoc.Row == b.exprLoc.Row { - return strings.Compare(a.Title(), b.Title()) - } - return b.exprLoc.Row - a.exprLoc.Row - } - return a.col - b.col - }) - - if len(byCol) == 0 { - return - } - - w.WriteString("\n") - printArrows(w, byCol, -1) - for i := len(byCol) - 1; i >= 0; i-- { - w.WriteString("\n") - printArrows(w, byCol, i) - } -} - -func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { - prevCol := 0 - var slice []varInfo - if printValueAt >= 0 { - slice = l[:printValueAt+1] - } else { - slice = l - } - isFirst := true - for i, info := range slice { - - isLast := i >= len(slice)-1 - col := info.col - - if !isLast && col == l[i+1].col { - // We're sharing the same column with another, subsequent var - continue - } - - spaces := col - 1 - if i > 0 && !isFirst { - spaces = (col - prevCol) - 1 - } - - for j := 0; j < spaces; j++ { - tab := false - for _, t := range info.exprLoc.Tabs { - if t == j+prevCol+1 { - w.WriteString("\t") - tab = true - break - } - } - if !tab { - w.WriteString(" ") - } - } - - if isLast && printValueAt >= 0 { - valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) - if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { - // There is another var on this column, so we need to include the name to differentiate them. - w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr)) - } else { - w.WriteString(valueStr) - } - } else { - w.WriteString("|") - } - prevCol = col - isFirst = false - } -} - -func init() { - RegisterBuiltinFunc(ast.Trace.Name, builtinTrace) + return v1.PrettyEvent(w, e, opts) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/type.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/type.go deleted file mode 100644 index dab5c853c..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/type.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" -) - -func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Number: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.String: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Boolean: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case *ast.Array: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Set: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Object: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Null: - return iter(ast.BooleanTerm(true)) - default: - return iter(ast.BooleanTerm(false)) - } -} - -func init() { - RegisterBuiltinFunc(ast.IsNumber.Name, builtinIsNumber) - RegisterBuiltinFunc(ast.IsString.Name, builtinIsString) - RegisterBuiltinFunc(ast.IsBoolean.Name, builtinIsBoolean) - RegisterBuiltinFunc(ast.IsArray.Name, builtinIsArray) - RegisterBuiltinFunc(ast.IsSet.Name, builtinIsSet) - RegisterBuiltinFunc(ast.IsObject.Name, builtinIsObject) - RegisterBuiltinFunc(ast.IsNull.Name, builtinIsNull) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/type_name.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/type_name.go deleted file mode 100644 index 0a8b44aed..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/type_name.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "fmt" - - "github.com/open-policy-agent/opa/ast" -) - -func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - switch operands[0].Value.(type) { - case ast.Null: - return iter(ast.StringTerm("null")) - case ast.Boolean: - return iter(ast.StringTerm("boolean")) - case ast.Number: - return iter(ast.StringTerm("number")) - case ast.String: - return iter(ast.StringTerm("string")) - case *ast.Array: - return iter(ast.StringTerm("array")) - case ast.Object: - return iter(ast.StringTerm("object")) - case ast.Set: - return iter(ast.StringTerm("set")) - } - - return fmt.Errorf("illegal value") -} - -func init() { - RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/uuid.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/uuid.go deleted file mode 100644 index d3a7a5f90..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/uuid.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2020 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/topdown/builtins" -) - -type uuidCachingKey string - -func builtinUUIDRFC4122(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - - var key = uuidCachingKey(operands[0].Value.String()) - - val, ok := bctx.Cache.Get(key) - if ok { - return iter(val.(*ast.Term)) - } - - s, err := uuid.New(bctx.Seed) - if err != nil { - return err - } - - result := ast.NewTerm(ast.String(s)) - bctx.Cache.Put(key, result) - - return iter(result) -} - -func builtinUUIDParse(_ BuiltinContext, operands []*ast.Term, iter func(term *ast.Term) error) error { - str, err := builtins.StringOperand(operands[0].Value, 1) - if err != nil { - return err - } - - parsed, err := uuid.Parse(string(str)) - if err != nil { - return nil - } - val, err := ast.InterfaceToValue(parsed) - if err != nil { - return err - } - - return iter(ast.NewTerm(val)) -} - -func init() { - RegisterBuiltinFunc(ast.UUIDRFC4122.Name, builtinUUIDRFC4122) - RegisterBuiltinFunc(ast.UUIDParse.Name, builtinUUIDParse) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/walk.go b/constraint/vendor/github.com/open-policy-agent/opa/topdown/walk.go deleted file mode 100644 index 0f3b3544b..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/walk.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package topdown - -import ( - "github.com/open-policy-agent/opa/ast" -) - -func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - input := operands[0] - - if pathIsWildcard(operands) { - // When the path assignment is a wildcard: walk(input, [_, value]) - // we may skip the path construction entirely, and simply return - // same pointer in each iteration. This is a much more efficient - // path when only the values are needed. - return walkNoPath(input, iter) - } - - filter := getOutputPath(operands) - return walk(filter, nil, input, iter) -} - -func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { - - if filter == nil || filter.Len() == 0 { - if path == nil { - path = ast.NewArray() - } - - if err := iter(ast.ArrayTerm(ast.NewTerm(path.Copy()), input)); err != nil { - return err - } - } - - if filter != nil && filter.Len() > 0 { - key := filter.Elem(0) - filter = filter.Slice(1, -1) - if key.IsGround() { - if term := input.Get(key); term != nil { - path = pathAppend(path, key) - return walk(filter, path, term, iter) - } - return nil - } - } - - switch v := input.Value.(type) { - case *ast.Array: - for i := 0; i < v.Len(); i++ { - path = pathAppend(path, ast.IntNumberTerm(i)) - if err := walk(filter, path, v.Elem(i), iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - } - case ast.Object: - return v.Iter(func(k, v *ast.Term) error { - path = pathAppend(path, k) - if err := walk(filter, path, v, iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - return nil - }) - case ast.Set: - return v.Iter(func(elem *ast.Term) error { - path = pathAppend(path, elem) - if err := walk(filter, path, elem, iter); err != nil { - return err - } - path = path.Slice(0, path.Len()-1) - return nil - }) - } - - return nil -} - -var emptyArr = ast.ArrayTerm() - -func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error { - if err := iter(ast.ArrayTerm(emptyArr, input)); err != nil { - return err - } - - switch v := input.Value.(type) { - case ast.Object: - return v.Iter(func(_, v *ast.Term) error { - return walkNoPath(v, iter) - }) - case *ast.Array: - for i := 0; i < v.Len(); i++ { - if err := walkNoPath(v.Elem(i), iter); err != nil { - return err - } - } - case ast.Set: - return v.Iter(func(elem *ast.Term) error { - return walkNoPath(elem, iter) - }) - } - - return nil -} - -func pathAppend(path *ast.Array, key *ast.Term) *ast.Array { - if path == nil { - return ast.NewArray(key) - } - - return path.Append(key) -} - -func getOutputPath(operands []*ast.Term) *ast.Array { - if len(operands) == 2 { - if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { - if path, ok := arr.Elem(0).Value.(*ast.Array); ok { - return path - } - } - } - return nil -} - -func pathIsWildcard(operands []*ast.Term) bool { - if len(operands) == 2 { - if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { - if v, ok := arr.Elem(0).Value.(ast.Var); ok { - return v.IsWildcard() - } - } - } - return false -} - -func init() { - RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/types/decode.go b/constraint/vendor/github.com/open-policy-agent/opa/types/decode.go index a6bd9ea03..ae04b38ff 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/types/decode.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/types/decode.go @@ -5,187 +5,10 @@ package types import ( - "encoding/json" - "fmt" - - "github.com/open-policy-agent/opa/util" -) - -const ( - typeNull = "null" - typeBoolean = "boolean" - typeNumber = "number" - typeString = "string" - typeArray = "array" - typeSet = "set" - typeObject = "object" - typeAny = "any" - typeFunction = "function" + v1 "github.com/open-policy-agent/opa/v1/types" ) // Unmarshal deserializes bs and returns the resulting type. func Unmarshal(bs []byte) (result Type, err error) { - - var hint rawtype - - if err = util.UnmarshalJSON(bs, &hint); err == nil { - switch hint.Type { - case typeNull: - result = NewNull() - case typeBoolean: - result = NewBoolean() - case typeNumber: - result = NewNumber() - case typeString: - result = NewString() - case typeArray: - var arr rawarray - if err = util.UnmarshalJSON(bs, &arr); err == nil { - var err error - var static []Type - var dynamic Type - if static, err = unmarshalSlice(arr.Static); err != nil { - return nil, err - } - if len(arr.Dynamic) != 0 { - if dynamic, err = Unmarshal(arr.Dynamic); err != nil { - return nil, err - } - } - result = NewArray(static, dynamic) - } - case typeObject: - var obj rawobject - if err = util.UnmarshalJSON(bs, &obj); err == nil { - var err error - var static []*StaticProperty - var dynamic *DynamicProperty - if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { - return nil, err - } - if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { - return nil, err - } - result = NewObject(static, dynamic) - } - case typeSet: - var set rawset - if err = util.UnmarshalJSON(bs, &set); err == nil { - var of Type - if of, err = Unmarshal(set.Of); err == nil { - result = NewSet(of) - } - } - case typeAny: - var union rawunion - if err = util.UnmarshalJSON(bs, &union); err == nil { - var of []Type - if of, err = unmarshalSlice(union.Of); err == nil { - result = NewAny(of...) - } - } - case typeFunction: - var decl rawdecl - if err = util.UnmarshalJSON(bs, &decl); err == nil { - args, err := unmarshalSlice(decl.Args) - if err != nil { - return nil, err - } - var ret Type - if len(decl.Result) > 0 { - ret, err = Unmarshal(decl.Result) - if err != nil { - return nil, err - } - } - if len(decl.Variadic) > 0 { - varargs, err := Unmarshal(decl.Variadic) - if err != nil { - return nil, err - } - result = NewVariadicFunction(args, varargs, ret) - } else { - result = NewFunction(args, ret) - } - } - default: - err = fmt.Errorf("unsupported type '%v'", hint.Type) - } - } - - return result, err -} - -type rawtype struct { - Type string `json:"type"` -} - -type rawarray struct { - Static []json.RawMessage `json:"static"` - Dynamic json.RawMessage `json:"dynamic"` -} - -type rawobject struct { - Static []rawstaticproperty `json:"static"` - Dynamic rawdynamicproperty `json:"dynamic"` -} - -type rawstaticproperty struct { - Key interface{} `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawdynamicproperty struct { - Key json.RawMessage `json:"key"` - Value json.RawMessage `json:"value"` -} - -type rawset struct { - Of json.RawMessage `json:"of"` -} - -type rawunion struct { - Of []json.RawMessage `json:"of"` -} - -type rawdecl struct { - Args []json.RawMessage `json:"args"` - Result json.RawMessage `json:"result"` - Variadic json.RawMessage `json:"variadic"` -} - -func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { - result = make([]Type, len(elems)) - for i := range elems { - if result[i], err = Unmarshal(elems[i]); err != nil { - return nil, err - } - } - return result, err -} - -func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { - result = make([]*StaticProperty, len(elems)) - for i := range elems { - value, err := Unmarshal(elems[i].Value) - if err != nil { - return nil, err - } - result[i] = NewStaticProperty(elems[i].Key, value) - } - return result, err -} - -func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { - if len(x.Key) == 0 { - return nil, nil - } - var key Type - if key, err = Unmarshal(x.Key); err == nil { - var value Type - if value, err = Unmarshal(x.Value); err == nil { - return NewDynamicProperty(key, value), nil - } - } - return nil, err + return v1.Unmarshal(bs) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/types/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/types/doc.go new file mode 100644 index 000000000..bfa068e66 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/types/doc.go @@ -0,0 +1,8 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Deprecated: This package is intended for older projects transitioning from OPA v0.x and will remain for the lifetime of OPA v1.x, but its use is not recommended. +// For newer features and behaviours, such as defaulting to the Rego v1 syntax, use the corresponding components in the [github.com/open-policy-agent/opa/v1] package instead. +// See https://www.openpolicyagent.org/docs/latest/v0-compatibility/ for more information. +package types diff --git a/constraint/vendor/github.com/open-policy-agent/opa/types/types.go b/constraint/vendor/github.com/open-policy-agent/opa/types/types.go index 2a050927d..b888b27b6 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/types/types.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/types/types.go @@ -7,1197 +7,194 @@ package types import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/open-policy-agent/opa/util" + v1 "github.com/open-policy-agent/opa/v1/types" ) // Sprint returns the string representation of the type. func Sprint(x Type) string { - if x == nil { - return "???" - } - return x.String() + return v1.Sprint(x) } // Type represents a type of a term in the language. -type Type interface { - String() string - typeMarker() string - json.Marshaler -} - -func (Null) typeMarker() string { return typeNull } -func (Boolean) typeMarker() string { return typeBoolean } -func (Number) typeMarker() string { return typeNumber } -func (String) typeMarker() string { return typeString } -func (*Array) typeMarker() string { return typeArray } -func (*Object) typeMarker() string { return typeObject } -func (*Set) typeMarker() string { return typeSet } -func (Any) typeMarker() string { return typeAny } -func (Function) typeMarker() string { return typeFunction } +type Type = v1.Type // Null represents the null type. -type Null struct{} +type Null = v1.Null // NewNull returns a new Null type. func NewNull() Null { - return Null{} + return v1.NewNull() } // NamedType represents a type alias with an arbitrary name and description. // This is useful for generating documentation for built-in functions. -type NamedType struct { - Name, Descr string - Type Type -} - -func (n *NamedType) typeMarker() string { return n.Type.typeMarker() } -func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() } -func (n *NamedType) MarshalJSON() ([]byte, error) { - var obj map[string]interface{} - switch x := n.Type.(type) { - case interface{ toMap() map[string]interface{} }: - obj = x.toMap() - default: - obj = map[string]interface{}{ - "type": n.Type.typeMarker(), - } - } - obj["name"] = n.Name - if n.Descr != "" { - obj["description"] = n.Descr - } - return json.Marshal(obj) -} - -func (n *NamedType) Description(d string) *NamedType { - n.Descr = d - return n -} +type NamedType = v1.NamedType // Named returns the passed type as a named type. // Named types are only valid at the top level of built-in functions. // Note that nested named types cause panic. func Named(name string, t Type) *NamedType { - return &NamedType{ - Type: t, - Name: name, - } -} - -// MarshalJSON returns the JSON encoding of t. -func (t Null) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func unwrap(t Type) Type { - switch t := t.(type) { - case *NamedType: - return t.Type - default: - return t - } -} - -func (t Null) String() string { - return typeNull + return v1.Named(name, t) } // Boolean represents the boolean type. -type Boolean struct{} +type Boolean = v1.Boolean // B represents an instance of the boolean type. var B = NewBoolean() // NewBoolean returns a new Boolean type. func NewBoolean() Boolean { - return Boolean{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t Boolean) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - return json.Marshal(repr) -} - -func (t Boolean) String() string { - return t.typeMarker() + return v1.NewBoolean() } // String represents the string type. -type String struct{} +type String = v1.String // S represents an instance of the string type. var S = NewString() // NewString returns a new String type. func NewString() String { - return String{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t String) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func (String) String() string { - return typeString + return v1.NewString() } // Number represents the number type. -type Number struct{} +type Number = v1.Number // N represents an instance of the number type. var N = NewNumber() // NewNumber returns a new Number type. func NewNumber() Number { - return Number{} -} - -// MarshalJSON returns the JSON encoding of t. -func (t Number) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "type": t.typeMarker(), - }) -} - -func (Number) String() string { - return typeNumber + return v1.NewNumber() } // Array represents the array type. -type Array struct { - static []Type // static items - dynamic Type // dynamic items -} +type Array = v1.Array // NewArray returns a new Array type. func NewArray(static []Type, dynamic Type) *Array { - return &Array{ - static: static, - dynamic: dynamic, - } -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Array) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Array) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.static) != 0 { - repr["static"] = t.static - } - if t.dynamic != nil { - repr["dynamic"] = t.dynamic - } - return repr -} - -func (t *Array) String() string { - prefix := "array" - buf := []string{} - for _, tpe := range t.static { - buf = append(buf, Sprint(tpe)) - } - repr := prefix - if len(buf) > 0 { - repr += "<" + strings.Join(buf, ", ") + ">" - } - if t.dynamic != nil { - repr += "[" + t.dynamic.String() + "]" - } - return repr -} - -// Dynamic returns the type of the array's dynamic elements. -func (t *Array) Dynamic() Type { - return t.dynamic -} - -// Len returns the number of static array elements. -func (t *Array) Len() int { - return len(t.static) -} - -// Select returns the type of element at the zero-based pos. -func (t *Array) Select(pos int) Type { - if pos >= 0 { - if len(t.static) > pos { - return t.static[pos] - } - if t.dynamic != nil { - return t.dynamic - } - } - return nil + return v1.NewArray(static, dynamic) } // Set represents the set type. -type Set struct { - of Type -} +type Set = v1.Set // NewSet returns a new Set type. func NewSet(of Type) *Set { - return &Set{ - of: of, - } -} - -func (t *Set) Of() Type { - return t.of -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Set) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if t.of != nil { - repr["of"] = t.of - } - return repr -} - -func (t *Set) String() string { - prefix := typeSet - return prefix + "[" + Sprint(t.of) + "]" + return v1.NewSet(of) } // StaticProperty represents a static object property. -type StaticProperty struct { - Key interface{} - Value Type -} +type StaticProperty = v1.StaticProperty // NewStaticProperty returns a new StaticProperty object. func NewStaticProperty(key interface{}, value Type) *StaticProperty { - return &StaticProperty{ - Key: key, - Value: value, - } -} - -// MarshalJSON returns the JSON encoding of p. -func (p *StaticProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "key": p.Key, - "value": p.Value, - }) + return v1.NewStaticProperty(key, value) } // DynamicProperty represents a dynamic object property. -type DynamicProperty struct { - Key Type - Value Type -} +type DynamicProperty = v1.DynamicProperty // NewDynamicProperty returns a new DynamicProperty object. func NewDynamicProperty(key, value Type) *DynamicProperty { - return &DynamicProperty{ - Key: key, - Value: value, - } -} - -// MarshalJSON returns the JSON encoding of p. -func (p *DynamicProperty) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]interface{}{ - "key": p.Key, - "value": p.Value, - }) -} - -func (p *DynamicProperty) String() string { - return fmt.Sprintf("%s: %s", Sprint(p.Key), Sprint(p.Value)) + return v1.NewDynamicProperty(key, value) } // Object represents the object type. -type Object struct { - static []*StaticProperty // constant properties - dynamic *DynamicProperty // dynamic properties -} +type Object = v1.Object // NewObject returns a new Object type. func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object { - sort.Slice(static, func(i, j int) bool { - cmp := util.Compare(static[i].Key, static[j].Key) - return cmp == -1 - }) - return &Object{ - static: static, - dynamic: dynamic, - } -} - -func (t *Object) String() string { - prefix := "object" - buf := make([]string, 0, len(t.static)) - for _, p := range t.static { - buf = append(buf, fmt.Sprintf("%v: %v", p.Key, Sprint(p.Value))) - } - repr := prefix - if len(buf) > 0 { - repr += "<" + strings.Join(buf, ", ") + ">" - } - if t.dynamic != nil { - repr += "[" + t.dynamic.String() + "]" - } - return repr -} - -// DynamicValue returns the type of the object's dynamic elements. -func (t *Object) DynamicValue() Type { - if t.dynamic == nil { - return nil - } - return t.dynamic.Value -} - -// DynamicProperties returns the type of the object's dynamic elements. -func (t *Object) DynamicProperties() *DynamicProperty { - return t.dynamic -} - -// StaticProperties returns the type of the object's static elements. -func (t *Object) StaticProperties() []*StaticProperty { - return t.static -} - -// Keys returns the keys of the object's static elements. -func (t *Object) Keys() []interface{} { - sl := make([]interface{}, 0, len(t.static)) - for _, p := range t.static { - sl = append(sl, p.Key) - } - return sl -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Object) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t *Object) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.static) != 0 { - repr["static"] = t.static - } - if t.dynamic != nil { - repr["dynamic"] = t.dynamic - } - return repr -} - -// Select returns the type of the named property. -func (t *Object) Select(name interface{}) Type { - pos := sort.Search(len(t.static), func(x int) bool { - return util.Compare(t.static[x].Key, name) >= 0 - }) - - if pos < len(t.static) && util.Compare(t.static[pos].Key, name) == 0 { - return t.static[pos].Value - } - - if t.dynamic != nil { - if Contains(t.dynamic.Key, TypeOf(name)) { - return t.dynamic.Value - } - } - - return nil -} - -func (t *Object) Merge(other Type) *Object { - if otherObj, ok := other.(*Object); ok { - return mergeObjects(t, otherObj) - } - - var typeK Type - var typeV Type - dynProps := t.DynamicProperties() - if dynProps != nil { - typeK = Or(Keys(other), dynProps.Key) - typeV = Or(Values(other), dynProps.Value) - dynProps = NewDynamicProperty(typeK, typeV) - } else { - typeK = Keys(other) - typeV = Values(other) - if typeK != nil && typeV != nil { - dynProps = NewDynamicProperty(typeK, typeV) - } - } - - return NewObject(t.StaticProperties(), dynProps) -} - -func mergeObjects(a, b *Object) *Object { - var dynamicProps *DynamicProperty - if a.dynamic != nil && b.dynamic != nil { - typeK := Or(a.dynamic.Key, b.dynamic.Key) - var typeV Type - aObj, aIsObj := a.dynamic.Value.(*Object) - bObj, bIsObj := b.dynamic.Value.(*Object) - if aIsObj && bIsObj { - typeV = mergeObjects(aObj, bObj) - } else { - typeV = Or(a.dynamic.Value, b.dynamic.Value) - } - dynamicProps = NewDynamicProperty(typeK, typeV) - } else if a.dynamic != nil { - dynamicProps = a.dynamic - } else { - dynamicProps = b.dynamic - } - - staticPropsMap := make(map[interface{}]Type) - - for _, sp := range a.static { - staticPropsMap[sp.Key] = sp.Value - } - - for _, sp := range b.static { - currV := staticPropsMap[sp.Key] - if currV != nil { - currVObj, currVIsObj := currV.(*Object) - spVObj, spVIsObj := sp.Value.(*Object) - if currVIsObj && spVIsObj { - staticPropsMap[sp.Key] = mergeObjects(currVObj, spVObj) - } else { - staticPropsMap[sp.Key] = Or(currV, sp.Value) - } - } else { - staticPropsMap[sp.Key] = sp.Value - } - } - - staticProps := make([]*StaticProperty, 0, len(staticPropsMap)) - for k, v := range staticPropsMap { - staticProps = append(staticProps, NewStaticProperty(k, v)) - } - - return NewObject(staticProps, dynamicProps) + return v1.NewObject(static, dynamic) } // Any represents a dynamic type. -type Any []Type +type Any = v1.Any // A represents the superset of all types. var A = NewAny() // NewAny returns a new Any type. func NewAny(of ...Type) Any { - sl := make(Any, len(of)) - copy(sl, of) - sort.Sort(typeSlice(sl)) - return sl -} - -// Contains returns true if t is a superset of other. -func (t Any) Contains(other Type) bool { - if _, ok := other.(*Function); ok { - return false - } - // Note(philipc): We used to do this as a linear search. - // Since this is always sorted, we can use a binary search instead. - i := sort.Search(len(t), func(i int) bool { - return Compare(t[i], other) >= 0 - }) - if i < len(t) && Compare(t[i], other) == 0 { - // x is present at t[i] - return true - } - return len(t) == 0 -} - -// MarshalJSON returns the JSON encoding of t. -func (t Any) MarshalJSON() ([]byte, error) { - return json.Marshal(t.toMap()) -} - -func (t Any) toMap() map[string]interface{} { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t) != 0 { - repr["of"] = []Type(t) - } - return repr -} - -// Merge return a new Any type that is the superset of t and other. -func (t Any) Merge(other Type) Any { - if otherAny, ok := other.(Any); ok { - return t.Union(otherAny) - } - if t.Contains(other) { - return t - } - cpy := make(Any, len(t)+1) - idx := sort.Search(len(t), func(i int) bool { - return Compare(t[i], other) >= 0 - }) - copy(cpy, t[:idx]) - cpy[idx] = other - copy(cpy[idx+1:], t[idx:]) - return cpy -} - -// Union returns a new Any type that is the union of the two Any types. -// Note(philipc): The two Any slices MUST be sorted before running Union, -// or else this method will fail to merge the two slices correctly. -func (t Any) Union(other Any) Any { - lenT := len(t) - lenOther := len(other) - // Return the more general (blank) Any type if present. - if lenT == 0 { - return t - } - if lenOther == 0 { - return other - } - // Prealloc the output list. - maxLen := lenT - if lenT < lenOther { - maxLen = lenOther - } - merged := make(Any, 0, maxLen) - // Note(philipc): Create a merged slice, doing the minimum number of - // comparisons along the way. We treat this as a problem of merging two - // sorted lists that might have duplicates. This specifically saves us - // from cases where one list might be *much* longer than the other. - // Algorithm: - // Assume: - // - List A - // - List B - // - List Output - // - Idx_a, Idx_b - // Procedure: - // - While Idx_a < len(A) and Idx_b < len(B) - // - Compare head(A) and head(B) - // - Cases: - // - A < B: Append head(A) to Output, advance Idx_a - // - A == B: Append head(A) to Output, advance Idx_a, Idx_b - // - A > B: Append head(B) to Output, advance Idx_b - // - Return output - idxA := 0 - idxB := 0 - for idxA < lenT || idxB < lenOther { - // Early-exit cases: - if idxA == lenT { - // Ran out of elements in t. Copy over what's left from other. - merged = append(merged, other[idxB:]...) - break - } else if idxB == lenOther { - // Ran out of elements in other. Copy over what's left from t. - merged = append(merged, t[idxA:]...) - break - } - // Normal selection of next element to merge: - switch Compare(t[idxA], other[idxB]) { - // A < B: - case -1: - merged = append(merged, t[idxA]) - idxA++ - // A == B: - case 0: - merged = append(merged, t[idxA]) - idxA++ - idxB++ - // A > B: - case 1: - merged = append(merged, other[idxB]) - idxB++ - } - } - return merged -} - -func (t Any) String() string { - prefix := "any" - if len(t) == 0 { - return prefix - } - buf := make([]string, len(t)) - for i := range t { - buf[i] = Sprint(t[i]) - } - return prefix + "<" + strings.Join(buf, ", ") + ">" + return v1.NewAny(of...) } // Function represents a function type. -type Function struct { - args []Type - result Type - variadic Type -} +type Function = v1.Function // Args returns an argument list. func Args(x ...Type) []Type { - return x + return v1.Args(x...) } // Void returns true if the function has no return value. This function returns // false if x is not a function. func Void(x Type) bool { - f, ok := x.(*Function) - return ok && f.Result() == nil + return v1.Void(x) } // Arity returns the number of arguments in the function signature or zero if x // is not a function. If the type is unknown, this function returns -1. func Arity(x Type) int { - if x == nil { - return -1 - } - f, ok := x.(*Function) - if !ok { - return 0 - } - return len(f.FuncArgs().Args) + return v1.Arity(x) } // NewFunction returns a new Function object of the given argument and result types. func NewFunction(args []Type, result Type) *Function { - return &Function{ - args: args, - result: result, - } + return v1.NewFunction(args, result) } // NewVariadicFunction returns a new Function object. This function sets the // variadic bit on the signature. Non-void variadic functions are not currently // supported. func NewVariadicFunction(args []Type, varargs Type, result Type) *Function { - if result != nil { - panic("illegal value: non-void variadic functions not supported") - } - return &Function{ - args: args, - variadic: varargs, - result: nil, - } -} - -// FuncArgs returns the function's arguments. -func (t *Function) FuncArgs() FuncArgs { - return FuncArgs{Args: t.Args(), Variadic: unwrap(t.variadic)} -} - -// NamedFuncArgs returns the function's arguments, with a name and -// description if available. -func (t *Function) NamedFuncArgs() FuncArgs { - args := make([]Type, len(t.args)) - copy(args, t.args) - return FuncArgs{Args: args, Variadic: t.variadic} -} - -// Args returns the function's arguments as a slice, ignoring variadic arguments. -// Deprecated: Use FuncArgs instead. -func (t *Function) Args() []Type { - cpy := make([]Type, len(t.args)) - for i := range t.args { - cpy[i] = unwrap(t.args[i]) - } - return cpy -} - -// Result returns the function's result type. -func (t *Function) Result() Type { - return unwrap(t.result) -} - -// Result returns the function's result type, without stripping name and description. -func (t *Function) NamedResult() Type { - return t.result -} - -func (t *Function) String() string { - return fmt.Sprintf("%v => %v", t.FuncArgs(), Sprint(t.Result())) -} - -// MarshalJSON returns the JSON encoding of t. -func (t *Function) MarshalJSON() ([]byte, error) { - repr := map[string]interface{}{ - "type": t.typeMarker(), - } - if len(t.args) > 0 { - repr["args"] = t.args - } - if t.result != nil { - repr["result"] = t.result - } - if t.variadic != nil { - repr["variadic"] = t.variadic - } - return json.Marshal(repr) -} - -// UnmarshalJSON decodes the JSON serialized function declaration. -func (t *Function) UnmarshalJSON(bs []byte) error { - tpe, err := Unmarshal(bs) - if err != nil { - return err - } - - f, ok := tpe.(*Function) - if !ok { - return fmt.Errorf("invalid type") - } - - *t = *f - return nil -} - -// Union returns a new function representing the union of t and other. Functions -// must have the same arity to be unioned. -func (t *Function) Union(other *Function) *Function { - if other == nil { - return t - } - if t == nil { - return other - } - - a := t.Args() - b := other.Args() - if len(a) != len(b) { - return nil - } - - aIsVariadic := t.FuncArgs().Variadic != nil - bIsVariadic := other.FuncArgs().Variadic != nil - - if aIsVariadic && !bIsVariadic { - return nil - } else if bIsVariadic && !aIsVariadic { - return nil - } - - args := make([]Type, len(a)) - for i := range a { - args[i] = Or(a[i], b[i]) - } - - result := NewFunction(args, Or(t.Result(), other.Result())) - result.variadic = Or(t.FuncArgs().Variadic, other.FuncArgs().Variadic) - - return result + return v1.NewVariadicFunction(args, varargs, result) } // FuncArgs represents the arguments that can be passed to a function. -type FuncArgs struct { - Args []Type `json:"args,omitempty"` - Variadic Type `json:"variadic,omitempty"` -} - -func (a FuncArgs) String() string { - buf := make([]string, 0, len(a.Args)+1) - for i := range a.Args { - buf = append(buf, Sprint(a.Args[i])) - } - if a.Variadic != nil { - buf = append(buf, Sprint(a.Variadic)+"...") - } - return "(" + strings.Join(buf, ", ") + ")" -} - -// Arg returns the nth argument's type. -func (a FuncArgs) Arg(x int) Type { - if x < len(a.Args) { - return a.Args[x] - } - return a.Variadic -} +type FuncArgs = v1.FuncArgs // Compare returns -1, 0, 1 based on comparison between a and b. func Compare(a, b Type) int { - a, b = unwrap(a), unwrap(b) - x := typeOrder(a) - y := typeOrder(b) - if x > y { - return 1 - } else if x < y { - return -1 - } - switch a.(type) { - case nil, Null, Boolean, Number, String: - return 0 - case *Array: - arrA := a.(*Array) - arrB := b.(*Array) - if arrA.dynamic != nil && arrB.dynamic == nil { - return 1 - } else if arrB.dynamic != nil && arrA.dynamic == nil { - return -1 - } - if arrB.dynamic != nil && arrA.dynamic != nil { - if cmp := Compare(arrA.dynamic, arrB.dynamic); cmp != 0 { - return cmp - } - } - return typeSliceCompare(arrA.static, arrB.static) - case *Object: - objA := a.(*Object) - objB := b.(*Object) - if objA.dynamic != nil && objB.dynamic == nil { - return 1 - } else if objB.dynamic != nil && objA.dynamic == nil { - return -1 - } - if objA.dynamic != nil && objB.dynamic != nil { - if cmp := Compare(objA.dynamic.Key, objB.dynamic.Key); cmp != 0 { - return cmp - } - if cmp := Compare(objA.dynamic.Value, objB.dynamic.Value); cmp != 0 { - return cmp - } - } - - lenStaticA := len(objA.static) - lenStaticB := len(objB.static) - - minLen := lenStaticA - if lenStaticB < minLen { - minLen = lenStaticB - } - - for i := 0; i < minLen; i++ { - if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 { - return cmp - } - if cmp := Compare(objA.static[i].Value, objB.static[i].Value); cmp != 0 { - return cmp - } - } - - if lenStaticA < lenStaticB { - return -1 - } else if lenStaticB < lenStaticA { - return 1 - } - - return 0 - case *Set: - setA := a.(*Set) - setB := b.(*Set) - if setA.of == nil && setB.of == nil { - return 0 - } else if setA.of == nil { - return -1 - } else if setB.of == nil { - return 1 - } - return Compare(setA.of, setB.of) - case Any: - sl1 := typeSlice(a.(Any)) - sl2 := typeSlice(b.(Any)) - return typeSliceCompare(sl1, sl2) - case *Function: - fA := a.(*Function) - fB := b.(*Function) - if len(fA.args) < len(fB.args) { - return -1 - } else if len(fA.args) > len(fB.args) { - return 1 - } - for i := 0; i < len(fA.args); i++ { - if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 { - return cmp - } - } - if cmp := Compare(fA.result, fB.result); cmp != 0 { - return cmp - } - return Compare(fA.variadic, fB.variadic) - default: - panic("unreachable") - } + return v1.Compare(a, b) } // Contains returns true if a is a superset or equal to b. func Contains(a, b Type) bool { - if x, ok := unwrap(a).(Any); ok { - return x.Contains(b) - } - return Compare(a, b) == 0 + return v1.Contains(a, b) } // Or returns a type that represents the union of a and b. If one type is a // superset of the other, the superset is returned unchanged. func Or(a, b Type) Type { - a, b = unwrap(a), unwrap(b) - if a == nil { - return b - } else if b == nil { - return a - } - fA, ok1 := a.(*Function) - fB, ok2 := b.(*Function) - if ok1 && ok2 { - return fA.Union(fB) - } else if ok1 || ok2 { - return nil - } - anyA, ok1 := a.(Any) - anyB, ok2 := b.(Any) - if ok1 { - return anyA.Merge(b) - } - if ok2 { - return anyB.Merge(a) - } - if Compare(a, b) == 0 { - return a - } - return NewAny(a, b) + return v1.Or(a, b) } // Select returns a property or item of a. func Select(a Type, x interface{}) Type { - switch a := unwrap(a).(type) { - case *Array: - n, ok := x.(json.Number) - if !ok { - return nil - } - pos, err := n.Int64() - if err != nil { - return nil - } - return a.Select(int(pos)) - case *Object: - return a.Select(x) - case *Set: - tpe := TypeOf(x) - if Compare(a.of, tpe) == 0 { - return a.of - } - if x, ok := a.of.(Any); ok { - if x.Contains(tpe) { - return tpe - } - } - return nil - case Any: - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - // TODO(tsandall): test nil/nil - tpe = Or(Select(a[i], x), tpe) - } - return tpe - default: - return nil - } + return v1.Select(a, x) } // Keys returns the type of keys that can be enumerated for a. For arrays, the // keys are always number types, for objects the keys are always string types, // and for sets the keys are always the type of the set element. func Keys(a Type) Type { - switch a := unwrap(a).(type) { - case *Array: - return N - case *Object: - var tpe Type - for _, k := range a.Keys() { - tpe = Or(tpe, TypeOf(k)) - } - if a.dynamic != nil { - tpe = Or(tpe, a.dynamic.Key) - } - return tpe - case *Set: - return a.of - case Any: - // TODO(tsandall): ditto test - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - tpe = Or(Keys(a[i]), tpe) - } - return tpe - } - return nil + return v1.Keys(a) } // Values returns the type of values that can be enumerated for a. func Values(a Type) Type { - switch a := unwrap(a).(type) { - case *Array: - var tpe Type - for i := range a.static { - tpe = Or(tpe, a.static[i]) - } - return Or(tpe, a.dynamic) - case *Object: - var tpe Type - for i := range a.static { - tpe = Or(tpe, a.static[i].Value) - } - if a.dynamic != nil { - tpe = Or(tpe, a.dynamic.Value) - } - return tpe - case *Set: - return a.of - case Any: - if Compare(a, A) == 0 { - return A - } - var tpe Type - for i := range a { - tpe = Or(Values(a[i]), tpe) - } - return tpe - } - return nil + return v1.Values(a) } // Nil returns true if a's type is unknown. func Nil(a Type) bool { - switch a := unwrap(a).(type) { - case nil: - return true - case *Function: - for i := range a.args { - if Nil(a.args[i]) { - return true - } - } - return Nil(a.result) - case *Array: - for i := range a.static { - if Nil(a.static[i]) { - return true - } - } - if a.dynamic != nil { - return Nil(a.dynamic) - } - case *Object: - for i := range a.static { - if Nil(a.static[i].Value) { - return true - } - } - if a.dynamic != nil { - return Nil(a.dynamic.Key) || Nil(a.dynamic.Value) - } - case *Set: - return Nil(a.of) - } - return false + return v1.Nil(a) } // TypeOf returns the type of the Golang native value. func TypeOf(x interface{}) Type { - switch x := x.(type) { - case nil: - return NewNull() - case bool: - return B - case string: - return S - case json.Number: - return N - case map[string]interface{}: - // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{} - // so map[string]interface{} must be handled here because the type checker uses the value - // to interface conversion when inferring object types. - static := make([]*StaticProperty, 0, len(x)) - for k, v := range x { - static = append(static, NewStaticProperty(k, TypeOf(v))) - } - return NewObject(static, nil) - case map[interface{}]interface{}: - static := make([]*StaticProperty, 0, len(x)) - for k, v := range x { - static = append(static, NewStaticProperty(k, TypeOf(v))) - } - return NewObject(static, nil) - case []interface{}: - static := make([]Type, len(x)) - for i := range x { - static[i] = TypeOf(x[i]) - } - return NewArray(static, nil) - } - panic("unreachable") -} - -type typeSlice []Type - -func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } -func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } -func (s typeSlice) Len() int { return len(s) } - -func typeSliceCompare(a, b []Type) int { - minLen := len(a) - if len(b) < minLen { - minLen = len(b) - } - for i := 0; i < minLen; i++ { - if cmp := Compare(a[i], b[i]); cmp != 0 { - return cmp - } - } - if len(a) < len(b) { - return -1 - } else if len(b) < len(a) { - return 1 - } - return 0 -} - -func typeOrder(x Type) int { - switch unwrap(x).(type) { - case Null: - return 0 - case Boolean: - return 1 - case Number: - return 2 - case String: - return 3 - case *Array: - return 4 - case *Object: - return 5 - case *Set: - return 6 - case Any: - return 7 - case *Function: - return 8 - case nil: - return -1 - } - panic("unreachable") + return v1.TypeOf(x) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/backoff.go b/constraint/vendor/github.com/open-policy-agent/opa/util/backoff.go deleted file mode 100644 index 6fbf63ef7..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/backoff.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "math/rand" - "time" -) - -func init() { - // NOTE(sr): We don't need good random numbers here; it's used for jittering - // the backup timing a bit. But anyways, let's make it random enough; without - // a call to rand.Seed() we'd get the same stream of numbers for each program - // run. (Or not, if some other packages happens to seed the global randomness - // source.) - // Note(philipc): rand.Seed() was deprecated in Go 1.20, so we've switched to - // using the recommended rand.New(rand.NewSource(seed)) style. - rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// DefaultBackoff returns a delay with an exponential backoff based on the -// number of retries. -func DefaultBackoff(base, max float64, retries int) time.Duration { - return Backoff(base, max, .2, 1.6, retries) -} - -// Backoff returns a delay with an exponential backoff based on the number of -// retries. Same algorithm used in gRPC. -func Backoff(base, max, jitter, factor float64, retries int) time.Duration { - if retries == 0 { - return 0 - } - - backoff, max := base, max - for backoff < max && retries > 0 { - backoff *= factor - retries-- - } - if backoff > max { - backoff = max - } - - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - - return time.Duration(backoff) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/compare.go b/constraint/vendor/github.com/open-policy-agent/opa/util/compare.go deleted file mode 100644 index 8ae775369..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/compare.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "encoding/json" - "fmt" - "math/big" - "sort" -) - -// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a. -// -// For comparison between values of different types, the following ordering is used: -// nil < bool < int, float64 < string < []interface{} < map[string]interface{}. Slices and maps -// are compared recursively. If one slice or map is a subset of the other slice or map -// it is considered "less than". Nil is always equal to nil. -func Compare(a, b interface{}) int { - aSortOrder := sortOrder(a) - bSortOrder := sortOrder(b) - if aSortOrder < bSortOrder { - return -1 - } else if bSortOrder < aSortOrder { - return 1 - } - switch a := a.(type) { - case nil: - return 0 - case bool: - switch b := b.(type) { - case bool: - if a == b { - return 0 - } - if !a { - return -1 - } - return 1 - } - case json.Number: - switch b := b.(type) { - case json.Number: - return compareJSONNumber(a, b) - } - case int: - switch b := b.(type) { - case int: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case float64: - switch b := b.(type) { - case float64: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case string: - switch b := b.(type) { - case string: - if a == b { - return 0 - } else if a < b { - return -1 - } - return 1 - } - case []interface{}: - switch b := b.(type) { - case []interface{}: - bLen := len(b) - aLen := len(a) - minLen := aLen - if bLen < minLen { - minLen = bLen - } - for i := 0; i < minLen; i++ { - cmp := Compare(a[i], b[i]) - if cmp != 0 { - return cmp - } - } - if aLen == bLen { - return 0 - } else if aLen < bLen { - return -1 - } - return 1 - } - case map[string]interface{}: - switch b := b.(type) { - case map[string]interface{}: - var aKeys []string - for k := range a { - aKeys = append(aKeys, k) - } - var bKeys []string - for k := range b { - bKeys = append(bKeys, k) - } - sort.Strings(aKeys) - sort.Strings(bKeys) - aLen := len(aKeys) - bLen := len(bKeys) - minLen := aLen - if bLen < minLen { - minLen = bLen - } - for i := 0; i < minLen; i++ { - if aKeys[i] < bKeys[i] { - return -1 - } else if bKeys[i] < aKeys[i] { - return 1 - } - aVal := a[aKeys[i]] - bVal := b[bKeys[i]] - cmp := Compare(aVal, bVal) - if cmp != 0 { - return cmp - } - } - if aLen == bLen { - return 0 - } else if aLen < bLen { - return -1 - } - return 1 - } - } - - panic(fmt.Sprintf("illegal arguments of type %T and type %T", a, b)) -} - -const ( - nilSort = iota - boolSort = iota - numberSort = iota - stringSort = iota - arraySort = iota - objectSort = iota -) - -func compareJSONNumber(a, b json.Number) int { - bigA, ok := new(big.Float).SetString(string(a)) - if !ok { - panic("illegal value") - } - bigB, ok := new(big.Float).SetString(string(b)) - if !ok { - panic("illegal value") - } - return bigA.Cmp(bigB) -} - -func sortOrder(v interface{}) int { - switch v.(type) { - case nil: - return nilSort - case bool: - return boolSort - case json.Number: - return numberSort - case int: - return numberSort - case float64: - return numberSort - case string: - return stringSort - case []interface{}: - return arraySort - case map[string]interface{}: - return objectSort - } - panic(fmt.Sprintf("illegal argument of type %T", v)) -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/hashmap.go b/constraint/vendor/github.com/open-policy-agent/opa/util/hashmap.go deleted file mode 100644 index 8875a6323..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/hashmap.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "fmt" - "strings" -) - -// T is a concise way to refer to T. -type T interface{} - -type hashEntry struct { - k T - v T - next *hashEntry -} - -// HashMap represents a key/value map. -type HashMap struct { - eq func(T, T) bool - hash func(T) int - table map[int]*hashEntry - size int -} - -// NewHashMap returns a new empty HashMap. -func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { - return &HashMap{ - eq: eq, - hash: hash, - table: make(map[int]*hashEntry), - size: 0, - } -} - -// Copy returns a shallow copy of this HashMap. -func (h *HashMap) Copy() *HashMap { - cpy := NewHashMap(h.eq, h.hash) - h.Iter(func(k, v T) bool { - cpy.Put(k, v) - return false - }) - return cpy -} - -// Equal returns true if this HashMap equals the other HashMap. -// Two hash maps are equal if they contain the same key/value pairs. -func (h *HashMap) Equal(other *HashMap) bool { - if h.Len() != other.Len() { - return false - } - return !h.Iter(func(k, v T) bool { - ov, ok := other.Get(k) - if !ok { - return true - } - return !h.eq(v, ov) - }) -} - -// Get returns the value for k. -func (h *HashMap) Get(k T) (T, bool) { - hash := h.hash(k) - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - return entry.v, true - } - } - return nil, false -} - -// Delete removes the key k. -func (h *HashMap) Delete(k T) { - hash := h.hash(k) - var prev *hashEntry - for entry := h.table[hash]; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - if prev != nil { - prev.next = entry.next - } else { - h.table[hash] = entry.next - } - h.size-- - return - } - prev = entry - } -} - -// Hash returns the hash code for this hash map. -func (h *HashMap) Hash() int { - var hash int - h.Iter(func(k, v T) bool { - hash += h.hash(k) + h.hash(v) - return false - }) - return hash -} - -// Iter invokes the iter function for each element in the HashMap. -// If the iter function returns true, iteration stops and the return value is true. -// If the iter function never returns true, iteration proceeds through all elements -// and the return value is false. -func (h *HashMap) Iter(iter func(T, T) bool) bool { - for _, entry := range h.table { - for ; entry != nil; entry = entry.next { - if iter(entry.k, entry.v) { - return true - } - } - } - return false -} - -// Len returns the current size of this HashMap. -func (h *HashMap) Len() int { - return h.size -} - -// Put inserts a key/value pair into this HashMap. If the key is already present, the existing -// value is overwritten. -func (h *HashMap) Put(k T, v T) { - hash := h.hash(k) - head := h.table[hash] - for entry := head; entry != nil; entry = entry.next { - if h.eq(entry.k, k) { - entry.v = v - return - } - } - h.table[hash] = &hashEntry{k: k, v: v, next: head} - h.size++ -} - -func (h *HashMap) String() string { - var buf []string - h.Iter(func(k T, v T) bool { - buf = append(buf, fmt.Sprintf("%v: %v", k, v)) - return false - }) - return "{" + strings.Join(buf, ", ") + "}" -} - -// Update returns a new HashMap with elements from the other HashMap put into this HashMap. -// If the other HashMap contains elements with the same key as this HashMap, the value -// from the other HashMap overwrites the value from this HashMap. -func (h *HashMap) Update(other *HashMap) *HashMap { - updated := h.Copy() - other.Iter(func(k, v T) bool { - updated.Put(k, v) - return false - }) - return updated -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/json.go b/constraint/vendor/github.com/open-policy-agent/opa/util/json.go deleted file mode 100644 index 4f1e14513..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/json.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 The OPA Authors. All rights reserved. -// Use of this source code is governed by an Apache2 -// license that can be found in the LICENSE file. - -package util - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "sigs.k8s.io/yaml" - - "github.com/open-policy-agent/opa/loader/extension" -) - -// UnmarshalJSON parses the JSON encoded data and stores the result in the value -// pointed to by x. -// -// This function is intended to be used in place of the standard json.Marshal -// function when json.Number is required. -func UnmarshalJSON(bs []byte, x interface{}) error { - return unmarshalJSON(bs, x, true) -} - -func unmarshalJSON(bs []byte, x interface{}, ext bool) error { - buf := bytes.NewBuffer(bs) - decoder := NewJSONDecoder(buf) - if err := decoder.Decode(x); err != nil { - if handler := extension.FindExtension(".json"); handler != nil && ext { - return handler(bs, x) - } - return err - } - - // Since decoder.Decode validates only the first json structure in bytes, - // check if decoder has more bytes to consume to validate whole input bytes. - tok, err := decoder.Token() - if tok != nil { - return fmt.Errorf("error: invalid character '%s' after top-level value", tok) - } - if err != nil && err != io.EOF { - return err - } - return nil -} - -// NewJSONDecoder returns a new decoder that reads from r. -// -// This function is intended to be used in place of the standard json.NewDecoder -// when json.Number is required. -func NewJSONDecoder(r io.Reader) *json.Decoder { - decoder := json.NewDecoder(r) - decoder.UseNumber() - return decoder -} - -// MustUnmarshalJSON parse the JSON encoded data and returns the result. -// -// If the data cannot be decoded, this function will panic. This function is for -// test purposes. -func MustUnmarshalJSON(bs []byte) interface{} { - var x interface{} - if err := UnmarshalJSON(bs, &x); err != nil { - panic(err) - } - return x -} - -// MustMarshalJSON returns the JSON encoding of x -// -// If the data cannot be encoded, this function will panic. This function is for -// test purposes. -func MustMarshalJSON(x interface{}) []byte { - bs, err := json.Marshal(x) - if err != nil { - panic(err) - } - return bs -} - -// RoundTrip encodes to JSON, and decodes the result again. -// -// Thereby, it is converting its argument to the representation expected by -// rego.Input and inmem's Write operations. Works with both references and -// values. -func RoundTrip(x *interface{}) error { - bs, err := json.Marshal(x) - if err != nil { - return err - } - return UnmarshalJSON(bs, x) -} - -// Reference returns a pointer to its argument unless the argument already is -// a pointer. If the argument is **t, or ***t, etc, it will return *t. -// -// Used for preparing Go types (including pointers to structs) into values to be -// put through util.RoundTrip(). -func Reference(x interface{}) *interface{} { - var y interface{} - rv := reflect.ValueOf(x) - if rv.Kind() == reflect.Ptr { - return Reference(rv.Elem().Interface()) - } - if rv.Kind() != reflect.Invalid { - y = rv.Interface() - return &y - } - return &x -} - -// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type. -func Unmarshal(bs []byte, v interface{}) error { - if json.Valid(bs) { - return unmarshalJSON(bs, v, false) - } - nbs, err := yaml.YAMLToJSON(bs) - if err == nil { - return unmarshalJSON(nbs, v, false) - } - // not json or yaml: try extensions - if handler := extension.FindExtension(".json"); handler != nil { - return handler(bs, v) - } - return err -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/maps.go b/constraint/vendor/github.com/open-policy-agent/opa/util/maps.go deleted file mode 100644 index d943b4d0a..000000000 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/maps.go +++ /dev/null @@ -1,10 +0,0 @@ -package util - -// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go new file mode 100644 index 000000000..297c6907c --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/annotations.go @@ -0,0 +1,976 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/open-policy-agent/opa/internal/deepcopy" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + annotationScopePackage = "package" + annotationScopeImport = "import" + annotationScopeRule = "rule" + annotationScopeDocument = "document" + annotationScopeSubpackages = "subpackages" +) + +type ( + // Annotations represents metadata attached to other AST nodes such as rules. + Annotations struct { + Scope string `json:"scope"` + Title string `json:"title,omitempty"` + Entrypoint bool `json:"entrypoint,omitempty"` + Description string `json:"description,omitempty"` + Organizations []string `json:"organizations,omitempty"` + RelatedResources []*RelatedResourceAnnotation `json:"related_resources,omitempty"` + Authors []*AuthorAnnotation `json:"authors,omitempty"` + Schemas []*SchemaAnnotation `json:"schemas,omitempty"` + Custom map[string]interface{} `json:"custom,omitempty"` + Location *Location `json:"location,omitempty"` + + comments []*Comment + node Node + } + + // SchemaAnnotation contains a schema declaration for the document identified by the path. + SchemaAnnotation struct { + Path Ref `json:"path"` + Schema Ref `json:"schema,omitempty"` + Definition *interface{} `json:"definition,omitempty"` + } + + AuthorAnnotation struct { + Name string `json:"name"` + Email string `json:"email,omitempty"` + } + + RelatedResourceAnnotation struct { + Ref url.URL `json:"ref"` + Description string `json:"description,omitempty"` + } + + AnnotationSet struct { + byRule map[*Rule][]*Annotations + byPackage map[int]*Annotations + byPath *annotationTreeNode + modules []*Module // Modules this set was constructed from + } + + annotationTreeNode struct { + Value *Annotations + Children map[Value]*annotationTreeNode // we assume key elements are hashable (vars and strings only!) + } + + AnnotationsRef struct { + Path Ref `json:"path"` // The path of the node the annotations are applied to + Annotations *Annotations `json:"annotations,omitempty"` + Location *Location `json:"location,omitempty"` // The location of the node the annotations are applied to + + node Node // The node the annotations are applied to + } + + AnnotationsRefSet []*AnnotationsRef + + FlatAnnotationsRefSet AnnotationsRefSet +) + +func (a *Annotations) String() string { + bs, _ := a.MarshalJSON() + return string(bs) +} + +// Loc returns the location of this annotation. +func (a *Annotations) Loc() *Location { + return a.Location +} + +// SetLoc updates the location of this annotation. +func (a *Annotations) SetLoc(l *Location) { + a.Location = l +} + +// EndLoc returns the location of this annotation's last comment line. +func (a *Annotations) EndLoc() *Location { + count := len(a.comments) + if count == 0 { + return a.Location + } + return a.comments[count-1].Location +} + +// Compare returns an integer indicating if a is less than, equal to, or greater +// than other. +func (a *Annotations) Compare(other *Annotations) int { + + if a == nil && other == nil { + return 0 + } + + if a == nil { + return -1 + } + + if other == nil { + return 1 + } + + if cmp := scopeCompare(a.Scope, other.Scope); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Title, other.Title); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := compareStringLists(a.Organizations, other.Organizations); cmp != 0 { + return cmp + } + + if cmp := compareRelatedResources(a.RelatedResources, other.RelatedResources); cmp != 0 { + return cmp + } + + if cmp := compareAuthors(a.Authors, other.Authors); cmp != 0 { + return cmp + } + + if cmp := compareSchemas(a.Schemas, other.Schemas); cmp != 0 { + return cmp + } + + if a.Entrypoint != other.Entrypoint { + if a.Entrypoint { + return 1 + } + return -1 + } + + if cmp := util.Compare(a.Custom, other.Custom); cmp != 0 { + return cmp + } + + return 0 +} + +// GetTargetPath returns the path of the node these Annotations are applied to (the target) +func (a *Annotations) GetTargetPath() Ref { + switch n := a.node.(type) { + case *Package: + return n.Path + case *Rule: + return n.Ref().GroundPrefix() + default: + return nil + } +} + +func (a *Annotations) MarshalJSON() ([]byte, error) { + if a == nil { + return []byte(`{"scope":""}`), nil + } + + data := map[string]interface{}{ + "scope": a.Scope, + } + + if a.Title != "" { + data["title"] = a.Title + } + + if a.Description != "" { + data["description"] = a.Description + } + + if a.Entrypoint { + data["entrypoint"] = a.Entrypoint + } + + if len(a.Organizations) > 0 { + data["organizations"] = a.Organizations + } + + if len(a.RelatedResources) > 0 { + data["related_resources"] = a.RelatedResources + } + + if len(a.Authors) > 0 { + data["authors"] = a.Authors + } + + if len(a.Schemas) > 0 { + data["schemas"] = a.Schemas + } + + if len(a.Custom) > 0 { + data["custom"] = a.Custom + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Annotations { + if a.Location != nil { + data["location"] = a.Location + } + } + + return json.Marshal(data) +} + +func NewAnnotationsRef(a *Annotations) *AnnotationsRef { + var loc *Location + if a.node != nil { + loc = a.node.Loc() + } + + return &AnnotationsRef{ + Location: loc, + Path: a.GetTargetPath(), + Annotations: a, + node: a.node, + } +} + +func (ar *AnnotationsRef) GetPackage() *Package { + switch n := ar.node.(type) { + case *Package: + return n + case *Rule: + return n.Module.Package + default: + return nil + } +} + +func (ar *AnnotationsRef) GetRule() *Rule { + switch n := ar.node.(type) { + case *Rule: + return n + default: + return nil + } +} + +func (ar *AnnotationsRef) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": ar.Path, + } + + if ar.Annotations != nil { + data["annotations"] = ar.Annotations + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.AnnotationsRef { + if ar.Location != nil { + data["location"] = ar.Location + } + + // The location set for the schema ref terms is wrong (always set to + // row 1) and not really useful anyway.. so strip it out before marshalling + for _, schema := range ar.Annotations.Schemas { + if schema.Path != nil { + for _, term := range schema.Path { + term.Location = nil + } + } + } + } + + return json.Marshal(data) +} + +func scopeCompare(s1, s2 string) int { + + o1 := scopeOrder(s1) + o2 := scopeOrder(s2) + + if o2 < o1 { + return 1 + } else if o2 > o1 { + return -1 + } + + if s1 < s2 { + return -1 + } else if s2 < s1 { + return 1 + } + + return 0 +} + +func scopeOrder(s string) int { + switch s { + case annotationScopeRule: + return 1 + } + return 0 +} + +func compareAuthors(a, b []*AuthorAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := 0; i < len(a); i++ { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareRelatedResources(a, b []*RelatedResourceAnnotation) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := 0; i < len(a); i++ { + if cmp := strings.Compare(a[i].String(), b[i].String()); cmp != 0 { + return cmp + } + } + + return 0 +} + +func compareSchemas(a, b []*SchemaAnnotation) int { + maxLen := len(a) + if len(b) < maxLen { + maxLen = len(b) + } + + for i := 0; i < maxLen; i++ { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + return 0 +} + +func compareStringLists(a, b []string) int { + if len(a) > len(b) { + return 1 + } else if len(a) < len(b) { + return -1 + } + + for i := 0; i < len(a); i++ { + if cmp := strings.Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + + return 0 +} + +// Copy returns a deep copy of s. +func (a *Annotations) Copy(node Node) *Annotations { + cpy := *a + + cpy.Organizations = make([]string, len(a.Organizations)) + copy(cpy.Organizations, a.Organizations) + + cpy.RelatedResources = make([]*RelatedResourceAnnotation, len(a.RelatedResources)) + for i := range a.RelatedResources { + cpy.RelatedResources[i] = a.RelatedResources[i].Copy() + } + + cpy.Authors = make([]*AuthorAnnotation, len(a.Authors)) + for i := range a.Authors { + cpy.Authors[i] = a.Authors[i].Copy() + } + + cpy.Schemas = make([]*SchemaAnnotation, len(a.Schemas)) + for i := range a.Schemas { + cpy.Schemas[i] = a.Schemas[i].Copy() + } + + cpy.Custom = deepcopy.Map(a.Custom) + + cpy.node = node + + return &cpy +} + +// toObject constructs an AST Object from the annotation. +func (a *Annotations) toObject() (*Object, *Error) { + obj := NewObject() + + if a == nil { + return &obj, nil + } + + if len(a.Scope) > 0 { + obj.Insert(StringTerm("scope"), StringTerm(a.Scope)) + } + + if len(a.Title) > 0 { + obj.Insert(StringTerm("title"), StringTerm(a.Title)) + } + + if a.Entrypoint { + obj.Insert(StringTerm("entrypoint"), BooleanTerm(true)) + } + + if len(a.Description) > 0 { + obj.Insert(StringTerm("description"), StringTerm(a.Description)) + } + + if len(a.Organizations) > 0 { + orgs := make([]*Term, 0, len(a.Organizations)) + for _, org := range a.Organizations { + orgs = append(orgs, StringTerm(org)) + } + obj.Insert(StringTerm("organizations"), ArrayTerm(orgs...)) + } + + if len(a.RelatedResources) > 0 { + rrs := make([]*Term, 0, len(a.RelatedResources)) + for _, rr := range a.RelatedResources { + rrObj := NewObject(Item(StringTerm("ref"), StringTerm(rr.Ref.String()))) + if len(rr.Description) > 0 { + rrObj.Insert(StringTerm("description"), StringTerm(rr.Description)) + } + rrs = append(rrs, NewTerm(rrObj)) + } + obj.Insert(StringTerm("related_resources"), ArrayTerm(rrs...)) + } + + if len(a.Authors) > 0 { + as := make([]*Term, 0, len(a.Authors)) + for _, author := range a.Authors { + aObj := NewObject() + if len(author.Name) > 0 { + aObj.Insert(StringTerm("name"), StringTerm(author.Name)) + } + if len(author.Email) > 0 { + aObj.Insert(StringTerm("email"), StringTerm(author.Email)) + } + as = append(as, NewTerm(aObj)) + } + obj.Insert(StringTerm("authors"), ArrayTerm(as...)) + } + + if len(a.Schemas) > 0 { + ss := make([]*Term, 0, len(a.Schemas)) + for _, s := range a.Schemas { + sObj := NewObject() + if len(s.Path) > 0 { + sObj.Insert(StringTerm("path"), NewTerm(s.Path.toArray())) + } + if len(s.Schema) > 0 { + sObj.Insert(StringTerm("schema"), NewTerm(s.Schema.toArray())) + } + if s.Definition != nil { + def, err := InterfaceToValue(s.Definition) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid definition in schema annotation: %s", err.Error()) + } + sObj.Insert(StringTerm("definition"), NewTerm(def)) + } + ss = append(ss, NewTerm(sObj)) + } + obj.Insert(StringTerm("schemas"), ArrayTerm(ss...)) + } + + if len(a.Custom) > 0 { + c, err := InterfaceToValue(a.Custom) + if err != nil { + return nil, NewError(CompileErr, a.Location, "invalid custom annotation %s", err.Error()) + } + obj.Insert(StringTerm("custom"), NewTerm(c)) + } + + return &obj, nil +} + +func attachRuleAnnotations(mod *Module) { + // make a copy of the annotations + cpy := make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy[i] = a.Copy(a.node) + } + + for _, rule := range mod.Rules { + var j int + var found bool + for i, a := range cpy { + if rule.Ref().GroundPrefix().Equal(a.GetTargetPath()) { + if a.Scope == annotationScopeDocument { + rule.Annotations = append(rule.Annotations, a) + } else if a.Scope == annotationScopeRule && rule.Loc().Row > a.Location.Row { + j = i + found = true + rule.Annotations = append(rule.Annotations, a) + } + } + } + + if found && j < len(cpy) { + cpy = append(cpy[:j], cpy[j+1:]...) + } + } +} + +func attachAnnotationsNodes(mod *Module) Errors { + var errs Errors + + // Find first non-annotation statement following each annotation and attach + // the annotation to that statement. + for _, a := range mod.Annotations { + for _, stmt := range mod.stmts { + _, ok := stmt.(*Annotations) + if !ok { + if stmt.Loc().Row > a.Location.Row { + a.node = stmt + break + } + } + } + + if a.Scope == "" { + switch a.node.(type) { + case *Rule: + if a.Entrypoint { + a.Scope = annotationScopeDocument + } else { + a.Scope = annotationScopeRule + } + case *Package: + a.Scope = annotationScopePackage + case *Import: + a.Scope = annotationScopeImport + } + } + + if err := validateAnnotationScopeAttachment(a); err != nil { + errs = append(errs, err) + } + + if err := validateAnnotationEntrypointAttachment(a); err != nil { + errs = append(errs, err) + } + } + + return errs +} + +func validateAnnotationScopeAttachment(a *Annotations) *Error { + + switch a.Scope { + case annotationScopeRule, annotationScopeDocument: + if _, ok := a.node.(*Rule); ok { + return nil + } + return newScopeAttachmentErr(a, "rule") + case annotationScopePackage, annotationScopeSubpackages: + if _, ok := a.node.(*Package); ok { + return nil + } + return newScopeAttachmentErr(a, "package") + } + + return NewError(ParseErr, a.Loc(), "invalid annotation scope '%v'. Use one of '%s', '%s', '%s', or '%s'", + a.Scope, annotationScopeRule, annotationScopeDocument, annotationScopePackage, annotationScopeSubpackages) +} + +func validateAnnotationEntrypointAttachment(a *Annotations) *Error { + if a.Entrypoint && !(a.Scope == annotationScopeDocument || a.Scope == annotationScopePackage) { + return NewError( + ParseErr, a.Loc(), "annotation entrypoint applied to non-document or package scope '%v'", a.Scope) + } + return nil +} + +// Copy returns a deep copy of a. +func (a *AuthorAnnotation) Copy() *AuthorAnnotation { + cpy := *a + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (a *AuthorAnnotation) Compare(other *AuthorAnnotation) int { + if cmp := strings.Compare(a.Name, other.Name); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(a.Email, other.Email); cmp != 0 { + return cmp + } + + return 0 +} + +func (a *AuthorAnnotation) String() string { + if len(a.Email) == 0 { + return a.Name + } else if len(a.Name) == 0 { + return fmt.Sprintf("<%s>", a.Email) + } + return fmt.Sprintf("%s <%s>", a.Name, a.Email) +} + +// Copy returns a deep copy of rr. +func (rr *RelatedResourceAnnotation) Copy() *RelatedResourceAnnotation { + cpy := *rr + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (rr *RelatedResourceAnnotation) Compare(other *RelatedResourceAnnotation) int { + if cmp := strings.Compare(rr.Description, other.Description); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(rr.Ref.String(), other.Ref.String()); cmp != 0 { + return cmp + } + + return 0 +} + +func (rr *RelatedResourceAnnotation) String() string { + bs, _ := json.Marshal(rr) + return string(bs) +} + +func (rr *RelatedResourceAnnotation) MarshalJSON() ([]byte, error) { + d := map[string]interface{}{ + "ref": rr.Ref.String(), + } + + if len(rr.Description) > 0 { + d["description"] = rr.Description + } + + return json.Marshal(d) +} + +// Copy returns a deep copy of s. +func (s *SchemaAnnotation) Copy() *SchemaAnnotation { + cpy := *s + return &cpy +} + +// Compare returns an integer indicating if s is less than, equal to, or greater +// than other. +func (s *SchemaAnnotation) Compare(other *SchemaAnnotation) int { + + if cmp := s.Path.Compare(other.Path); cmp != 0 { + return cmp + } + + if cmp := s.Schema.Compare(other.Schema); cmp != 0 { + return cmp + } + + if s.Definition != nil && other.Definition == nil { + return -1 + } else if s.Definition == nil && other.Definition != nil { + return 1 + } else if s.Definition != nil && other.Definition != nil { + return util.Compare(*s.Definition, *other.Definition) + } + + return 0 +} + +func (s *SchemaAnnotation) String() string { + bs, _ := json.Marshal(s) + return string(bs) +} + +func newAnnotationSet() *AnnotationSet { + return &AnnotationSet{ + byRule: map[*Rule][]*Annotations{}, + byPackage: map[int]*Annotations{}, + byPath: newAnnotationTree(), + } +} + +func BuildAnnotationSet(modules []*Module) (*AnnotationSet, Errors) { + as := newAnnotationSet() + var errs Errors + for _, m := range modules { + for _, a := range m.Annotations { + if err := as.add(a); err != nil { + errs = append(errs, err) + } + } + } + if len(errs) > 0 { + return nil, errs + } + as.modules = modules + return as, nil +} + +// NOTE(philipc): During copy propagation, the underlying Nodes can be +// stripped away from the annotations, leading to nil deref panics. We +// silently ignore these cases for now, as a workaround. +func (as *AnnotationSet) add(a *Annotations) *Error { + switch a.Scope { + case annotationScopeRule: + if rule, ok := a.node.(*Rule); ok { + as.byRule[rule] = append(as.byRule[rule], a) + } + case annotationScopePackage: + if pkg, ok := a.node.(*Package); ok { + hash := pkg.Path.Hash() + if exist, ok := as.byPackage[hash]; ok { + return errAnnotationRedeclared(a, exist.Location) + } + as.byPackage[hash] = a + } + case annotationScopeDocument: + if rule, ok := a.node.(*Rule); ok { + path := rule.Ref().GroundPrefix() + x := as.byPath.get(path) + if x != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(path, a) + } + case annotationScopeSubpackages: + if pkg, ok := a.node.(*Package); ok { + x := as.byPath.get(pkg.Path) + if x != nil && x.Value != nil { + return errAnnotationRedeclared(a, x.Value.Location) + } + as.byPath.insert(pkg.Path, a) + } + } + return nil +} + +func (as *AnnotationSet) GetRuleScope(r *Rule) []*Annotations { + if as == nil { + return nil + } + return as.byRule[r] +} + +func (as *AnnotationSet) GetSubpackagesScope(path Ref) []*Annotations { + if as == nil { + return nil + } + return as.byPath.ancestors(path) +} + +func (as *AnnotationSet) GetDocumentScope(path Ref) *Annotations { + if as == nil { + return nil + } + if node := as.byPath.get(path); node != nil { + return node.Value + } + return nil +} + +func (as *AnnotationSet) GetPackageScope(pkg *Package) *Annotations { + if as == nil { + return nil + } + return as.byPackage[pkg.Path.Hash()] +} + +// Flatten returns a flattened list view of this AnnotationSet. +// The returned slice is sorted, first by the annotations' target path, then by their target location +func (as *AnnotationSet) Flatten() FlatAnnotationsRefSet { + // This preallocation often won't be optimal, but it's superior to starting with a nil slice. + refs := make([]*AnnotationsRef, 0, len(as.byPath.Children)+len(as.byRule)+len(as.byPackage)) + + refs = as.byPath.flatten(refs) + + for _, a := range as.byPackage { + refs = append(refs, NewAnnotationsRef(a)) + } + + for _, as := range as.byRule { + for _, a := range as { + refs = append(refs, NewAnnotationsRef(a)) + } + } + + // Sort by path, then annotation location, for stable output + sort.SliceStable(refs, func(i, j int) bool { + return refs[i].Compare(refs[j]) < 0 + }) + + return refs +} + +// Chain returns the chain of annotations leading up to the given rule. +// The returned slice is ordered as follows +// 0. Entries for the given rule, ordered from the METADATA block declared immediately above the rule, to the block declared farthest away (always at least one entry) +// 1. The 'document' scope entry, if any +// 2. The 'package' scope entry, if any +// 3. Entries for the 'subpackages' scope, if any; ordered from the closest package path to the fartest. E.g.: 'do.re.mi', 'do.re', 'do' +// The returned slice is guaranteed to always contain at least one entry, corresponding to the given rule. +func (as *AnnotationSet) Chain(rule *Rule) AnnotationsRefSet { + var refs []*AnnotationsRef + + ruleAnnots := as.GetRuleScope(rule) + + if len(ruleAnnots) >= 1 { + for _, a := range ruleAnnots { + refs = append(refs, NewAnnotationsRef(a)) + } + } else { + // Make sure there is always a leading entry representing the passed rule, even if it has no annotations + refs = append(refs, &AnnotationsRef{ + Location: rule.Location, + Path: rule.Ref().GroundPrefix(), + node: rule, + }) + } + + if len(refs) > 1 { + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + sort.SliceStable(refs, func(i, j int) bool { + return refs[i].Annotations.Location.Compare(refs[j].Annotations.Location) > 0 + }) + } + + docAnnots := as.GetDocumentScope(rule.Ref().GroundPrefix()) + if docAnnots != nil { + refs = append(refs, NewAnnotationsRef(docAnnots)) + } + + pkg := rule.Module.Package + pkgAnnots := as.GetPackageScope(pkg) + if pkgAnnots != nil { + refs = append(refs, NewAnnotationsRef(pkgAnnots)) + } + + subPkgAnnots := as.GetSubpackagesScope(pkg.Path) + // We need to reverse the order, as subPkgAnnots ordering will start at the root, + // whereas we want to end at the root. + for i := len(subPkgAnnots) - 1; i >= 0; i-- { + refs = append(refs, NewAnnotationsRef(subPkgAnnots[i])) + } + + return refs +} + +func (ars FlatAnnotationsRefSet) Insert(ar *AnnotationsRef) FlatAnnotationsRefSet { + result := make(FlatAnnotationsRefSet, 0, len(ars)+1) + + // insertion sort, first by path, then location + for i, current := range ars { + if ar.Compare(current) < 0 { + result = append(result, ar) + result = append(result, ars[i:]...) + break + } + result = append(result, current) + } + + if len(result) < len(ars)+1 { + result = append(result, ar) + } + + return result +} + +func newAnnotationTree() *annotationTreeNode { + return &annotationTreeNode{ + Value: nil, + Children: map[Value]*annotationTreeNode{}, + } +} + +func (t *annotationTreeNode) insert(path Ref, value *Annotations) { + node := t + for _, k := range path { + child, ok := node.Children[k.Value] + if !ok { + child = newAnnotationTree() + node.Children[k.Value] = child + } + node = child + } + node.Value = value +} + +func (t *annotationTreeNode) get(path Ref) *annotationTreeNode { + node := t + for _, k := range path { + if node == nil { + return nil + } + child, ok := node.Children[k.Value] + if !ok { + return nil + } + node = child + } + return node +} + +// ancestors returns a slice of annotations in ascending order, starting with the root of ref; e.g.: 'root', 'root.foo', 'root.foo.bar'. +func (t *annotationTreeNode) ancestors(path Ref) (result []*Annotations) { + node := t + for _, k := range path { + if node == nil { + return result + } + child, ok := node.Children[k.Value] + if !ok { + return result + } + if child.Value != nil { + result = append(result, child.Value) + } + node = child + } + return result +} + +func (t *annotationTreeNode) flatten(refs []*AnnotationsRef) []*AnnotationsRef { + if a := t.Value; a != nil { + refs = append(refs, NewAnnotationsRef(a)) + } + for _, c := range t.Children { + refs = c.flatten(refs) + } + return refs +} + +func (ar *AnnotationsRef) Compare(other *AnnotationsRef) int { + if c := ar.Path.Compare(other.Path); c != 0 { + return c + } + + if c := ar.Annotations.Location.Compare(other.Annotations.Location); c != 0 { + return c + } + + return ar.Annotations.Compare(other.Annotations) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go new file mode 100644 index 000000000..9585620dc --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/builtins.go @@ -0,0 +1,3398 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "strings" + + "github.com/open-policy-agent/opa/v1/types" +) + +// Builtins is the registry of built-in functions supported by OPA. +// Call RegisterBuiltin to add a new built-in. +var Builtins []*Builtin + +// RegisterBuiltin adds a new built-in function to the registry. +func RegisterBuiltin(b *Builtin) { + Builtins = append(Builtins, b) + BuiltinMap[b.Name] = b + if len(b.Infix) > 0 { + BuiltinMap[b.Infix] = b + } +} + +// DefaultBuiltins is the registry of built-in functions supported in OPA +// by default. When adding a new built-in function to OPA, update this +// list. +var DefaultBuiltins = [...]*Builtin{ + // Unification/equality ("=") + Equality, + + // Assignment (":=") + Assign, + + // Membership, infix "in": `x in xs` + Member, + MemberWithKey, + + // Comparisons + GreaterThan, + GreaterThanEq, + LessThan, + LessThanEq, + NotEqual, + Equal, + + // Arithmetic + Plus, + Minus, + Multiply, + Divide, + Ceil, + Floor, + Round, + Abs, + Rem, + + // Bitwise Arithmetic + BitsOr, + BitsAnd, + BitsNegate, + BitsXOr, + BitsShiftLeft, + BitsShiftRight, + + // Binary + And, + Or, + + // Aggregates + Count, + Sum, + Product, + Max, + Min, + Any, + All, + + // Arrays + ArrayConcat, + ArraySlice, + ArrayReverse, + + // Conversions + ToNumber, + + // Casts (DEPRECATED) + CastObject, + CastNull, + CastBoolean, + CastString, + CastSet, + CastArray, + + // Regular Expressions + RegexIsValid, + RegexMatch, + RegexMatchDeprecated, + RegexSplit, + GlobsMatch, + RegexTemplateMatch, + RegexFind, + RegexFindAllStringSubmatch, + RegexReplace, + + // Sets + SetDiff, + Intersection, + Union, + + // Strings + AnyPrefixMatch, + AnySuffixMatch, + Concat, + FormatInt, + IndexOf, + IndexOfN, + Substring, + Lower, + Upper, + Contains, + StringCount, + StartsWith, + EndsWith, + Split, + Replace, + ReplaceN, + Trim, + TrimLeft, + TrimPrefix, + TrimRight, + TrimSuffix, + TrimSpace, + Sprintf, + StringReverse, + RenderTemplate, + + // Numbers + NumbersRange, + NumbersRangeStep, + RandIntn, + + // Encoding + JSONMarshal, + JSONMarshalWithOptions, + JSONUnmarshal, + JSONIsValid, + Base64Encode, + Base64Decode, + Base64IsValid, + Base64UrlEncode, + Base64UrlEncodeNoPad, + Base64UrlDecode, + URLQueryDecode, + URLQueryEncode, + URLQueryEncodeObject, + URLQueryDecodeObject, + YAMLMarshal, + YAMLUnmarshal, + YAMLIsValid, + HexEncode, + HexDecode, + + // Object Manipulation + ObjectUnion, + ObjectUnionN, + ObjectRemove, + ObjectFilter, + ObjectGet, + ObjectKeys, + ObjectSubset, + + // JSON Object Manipulation + JSONFilter, + JSONRemove, + JSONPatch, + + // Tokens + JWTDecode, + JWTVerifyRS256, + JWTVerifyRS384, + JWTVerifyRS512, + JWTVerifyPS256, + JWTVerifyPS384, + JWTVerifyPS512, + JWTVerifyES256, + JWTVerifyES384, + JWTVerifyES512, + JWTVerifyHS256, + JWTVerifyHS384, + JWTVerifyHS512, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + + // Time + NowNanos, + ParseNanos, + ParseRFC3339Nanos, + ParseDurationNanos, + Format, + Date, + Clock, + Weekday, + AddDate, + Diff, + + // Crypto + CryptoX509ParseCertificates, + CryptoX509ParseAndVerifyCertificates, + CryptoX509ParseAndVerifyCertificatesWithOptions, + CryptoMd5, + CryptoSha1, + CryptoSha256, + CryptoX509ParseCertificateRequest, + CryptoX509ParseRSAPrivateKey, + CryptoX509ParseKeyPair, + CryptoParsePrivateKeys, + CryptoHmacMd5, + CryptoHmacSha1, + CryptoHmacSha256, + CryptoHmacSha512, + CryptoHmacEqual, + + // Graphs + WalkBuiltin, + ReachableBuiltin, + ReachablePathsBuiltin, + + // Sort + Sort, + + // Types + IsNumber, + IsString, + IsBoolean, + IsArray, + IsSet, + IsObject, + IsNull, + TypeNameBuiltin, + + // HTTP + HTTPSend, + + // GraphQL + GraphQLParse, + GraphQLParseAndVerify, + GraphQLParseQuery, + GraphQLParseSchema, + GraphQLIsValid, + GraphQLSchemaIsValid, + + // JSON Schema + JSONSchemaVerify, + JSONMatchSchema, + + // Cloud Provider Helpers + ProvidersAWSSignReqObj, + + // Rego + RegoParseModule, + RegoMetadataChain, + RegoMetadataRule, + + // OPA + OPARuntime, + + // Tracing + Trace, + + // Networking + NetCIDROverlap, + NetCIDRIntersects, + NetCIDRContains, + NetCIDRContainsMatches, + NetCIDRExpand, + NetCIDRMerge, + NetLookupIPAddr, + NetCIDRIsValid, + + // Glob + GlobMatch, + GlobQuoteMeta, + + // Units + UnitsParse, + UnitsParseBytes, + + // UUIDs + UUIDRFC4122, + UUIDParse, + + // SemVers + SemVerIsValid, + SemVerCompare, + + // Printing + Print, + InternalPrint, +} + +// BuiltinMap provides a convenient mapping of built-in names to +// built-in definitions. +var BuiltinMap map[string]*Builtin + +// Deprecated: Builtins can now be directly annotated with the +// Nondeterministic property, and when set to true, will be ignored +// for partial evaluation. +var IgnoreDuringPartialEval = []*Builtin{ + RandIntn, + UUIDRFC4122, + JWTDecodeVerify, + JWTEncodeSignRaw, + JWTEncodeSign, + NowNanos, + HTTPSend, + OPARuntime, + NetLookupIPAddr, +} + +/** + * Unification + */ + +// Equality represents the "=" operator. +var Equality = &Builtin{ + Name: "eq", + Infix: "=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), +} + +/** + * Assignment + */ + +// Assign represents the assignment (":=") operator. +var Assign = &Builtin{ + Name: "assign", + Infix: ":=", + Decl: types.NewFunction( + types.Args(types.A, types.A), + types.B, + ), +} + +// Member represents the `in` (infix) operator. +var Member = &Builtin{ + Name: "internal.member_2", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + ), + types.B, + ), +} + +// MemberWithKey represents the `in` (infix) operator when used +// with two terms on the lhs, i.e., `k, v in obj`. +var MemberWithKey = &Builtin{ + Name: "internal.member_3", + Infix: "in", + Decl: types.NewFunction( + types.Args( + types.A, + types.A, + types.A, + ), + types.B, + ), +} + +/** + * Comparisons + */ +var comparison = category("comparison") + +var GreaterThan = &Builtin{ + Name: "gt", + Infix: ">", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater than `y`; false otherwise"), + ), +} + +var GreaterThanEq = &Builtin{ + Name: "gte", + Infix: ">=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is greater or equal to `y`; false otherwise"), + ), +} + +// LessThan represents the "<" comparison operator. +var LessThan = &Builtin{ + Name: "lt", + Infix: "<", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than `y`; false otherwise"), + ), +} + +var LessThanEq = &Builtin{ + Name: "lte", + Infix: "<=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is less than or equal to `y`; false otherwise"), + ), +} + +var NotEqual = &Builtin{ + Name: "neq", + Infix: "!=", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is not equal to `y`; false otherwise"), + ), +} + +// Equal represents the "==" comparison operator. +var Equal = &Builtin{ + Name: "equal", + Infix: "==", + Categories: comparison, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A), + types.Named("y", types.A), + ), + types.Named("result", types.B).Description("true if `x` is equal to `y`; false otherwise"), + ), +} + +/** + * Arithmetic + */ +var number = category("numbers") + +var Plus = &Builtin{ + Name: "plus", + Infix: "+", + Description: "Plus adds two numbers together.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the sum of `x` and `y`"), + ), + Categories: number, +} + +var Minus = &Builtin{ + Name: "minus", + Infix: "-", + Description: "Minus subtracts the second number from the first number or computes the difference between two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny(types.N, types.NewSet(types.A))), + types.Named("y", types.NewAny(types.N, types.NewSet(types.A))), + ), + types.Named("z", types.NewAny(types.N, types.NewSet(types.A))).Description("the difference of `x` and `y`"), + ), + Categories: category("sets", "numbers"), +} + +var Multiply = &Builtin{ + Name: "mul", + Infix: "*", + Description: "Multiplies two numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the product of `x` and `y`"), + ), + Categories: number, +} + +var Divide = &Builtin{ + Name: "div", + Infix: "/", + Description: "Divides the first number by the second number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the dividend"), + types.Named("y", types.N).Description("the divisor"), + ), + types.Named("z", types.N).Description("the result of `x` divided by `y`"), + ), + Categories: number, +} + +var Round = &Builtin{ + Name: "round", + Description: "Rounds the number to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x`"), + ), + Categories: number, +} + +var Ceil = &Builtin{ + Name: "ceil", + Description: "Rounds the number _up_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _up_"), + ), + Categories: number, +} + +var Floor = &Builtin{ + Name: "floor", + Description: "Rounds the number _down_ to the nearest integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to round"), + ), + types.Named("y", types.N).Description("the result of rounding `x` _down_"), + ), + Categories: number, +} + +var Abs = &Builtin{ + Name: "abs", + Description: "Returns the number without its sign.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the number to take the absolute value of"), + ), + types.Named("y", types.N).Description("the absolute value of `x`"), + ), + Categories: number, +} + +var Rem = &Builtin{ + Name: "rem", + Infix: "%", + Description: "Returns the remainder for of `x` divided by `y`, for `y != 0`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N), + types.Named("y", types.N), + ), + types.Named("z", types.N).Description("the remainder"), + ), + Categories: number, +} + +/** + * Bitwise + */ + +var BitsOr = &Builtin{ + Name: "bits.or", + Description: "Returns the bitwise \"OR\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise OR of `x` and `y`"), + ), +} + +var BitsAnd = &Builtin{ + Name: "bits.and", + Description: "Returns the bitwise \"AND\" of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise AND of `x` and `y`"), + ), +} + +var BitsNegate = &Builtin{ + Name: "bits.negate", + Description: "Returns the bitwise negation (flip) of an integer.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to negate"), + ), + types.Named("z", types.N).Description("the bitwise negation of `x`"), + ), +} + +var BitsXOr = &Builtin{ + Name: "bits.xor", + Description: "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the first integer"), + types.Named("y", types.N).Description("the second integer"), + ), + types.Named("z", types.N).Description("the bitwise XOR of `x` and `y`"), + ), +} + +var BitsShiftLeft = &Builtin{ + Name: "bits.lsh", + Description: "Returns a new integer with its bits shifted `s` bits to the left.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the left"), + ), +} + +var BitsShiftRight = &Builtin{ + Name: "bits.rsh", + Description: "Returns a new integer with its bits shifted `s` bits to the right.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.N).Description("the integer to shift"), + types.Named("s", types.N).Description("the number of bits to shift"), + ), + types.Named("z", types.N).Description("the result of shifting `x` `s` bits to the right"), + ), +} + +/** + * Sets + */ + +var sets = category("sets") + +var And = &Builtin{ + Name: "and", + Infix: "&", + Description: "Returns the intersection of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewSet(types.A)).Description("the first set"), + types.Named("y", types.NewSet(types.A)).Description("the second set"), + ), + types.Named("z", types.NewSet(types.A)).Description("the intersection of `x` and `y`"), + ), + Categories: sets, +} + +// Or performs a union operation on sets. +var Or = &Builtin{ + Name: "or", + Infix: "|", + Description: "Returns the union of two sets.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewSet(types.A)), + types.Named("y", types.NewSet(types.A)), + ), + types.Named("z", types.NewSet(types.A)).Description("the union of `x` and `y`"), + ), + Categories: sets, +} + +var Intersection = &Builtin{ + Name: "intersection", + Description: "Returns the intersection of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to intersect"), + ), + types.Named("y", types.NewSet(types.A)).Description("the intersection of all `xs` sets"), + ), + Categories: sets, +} + +var Union = &Builtin{ + Name: "union", + Description: "Returns the union of the given input sets.", + Decl: types.NewFunction( + types.Args( + types.Named("xs", types.NewSet(types.NewSet(types.A))).Description("set of sets to merge"), + ), + types.Named("y", types.NewSet(types.A)).Description("the union of all `xs` sets"), + ), + Categories: sets, +} + +/** + * Aggregates + */ + +var aggregates = category("aggregates") + +var Count = &Builtin{ + Name: "count", + Description: " Count takes a collection or string and returns the number of elements (or characters) in it.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + )).Description("the set/array/object/string to be counted"), + ), + types.Named("n", types.N).Description("the count of elements, key/val pairs, or characters, respectively."), + ), + Categories: aggregates, +} + +var Sum = &Builtin{ + Name: "sum", + Description: "Sums elements of an array or set of numbers.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewSet(types.N), + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to sum"), + ), + types.Named("n", types.N).Description("the sum of all elements"), + ), + Categories: aggregates, +} + +var Product = &Builtin{ + Name: "product", + Description: "Multiplies elements of an array or set of numbers", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewSet(types.N), + types.NewArray(nil, types.N), + )).Description("the set or array of numbers to multiply"), + ), + types.Named("n", types.N).Description("the product of all elements"), + ), + Categories: aggregates, +} + +var Max = &Builtin{ + Name: "max", + Description: "Returns the maximum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the maximum of all elements"), + ), + Categories: aggregates, +} + +var Min = &Builtin{ + Name: "min", + Description: "Returns the minimum value in a collection.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A), + )).Description("the set or array to be searched"), + ), + types.Named("n", types.A).Description("the minimum of all elements"), + ), + Categories: aggregates, +} + +/** + * Sorting + */ + +var Sort = &Builtin{ + Name: "sort", + Description: "Returns a sorted array.", + Decl: types.NewFunction( + types.Args( + types.Named("collection", types.NewAny( + types.NewArray(nil, types.A), + types.NewSet(types.A), + )).Description("the array or set to be sorted"), + ), + types.Named("n", types.NewArray(nil, types.A)).Description("the sorted array"), + ), + Categories: aggregates, +} + +/** + * Arrays + */ + +var ArrayConcat = &Builtin{ + Name: "array.concat", + Description: "Concatenates two arrays.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewArray(nil, types.A)).Description("the first array"), + types.Named("y", types.NewArray(nil, types.A)).Description("the second array"), + ), + types.Named("z", types.NewArray(nil, types.A)).Description("the concatenation of `x` and `y`"), + ), +} + +var ArraySlice = &Builtin{ + Name: "array.slice", + Description: "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be sliced"), + types.Named("start", types.NewNumber()).Description("the start index of the returned slice; if less than zero, it's clamped to 0"), + types.Named("stop", types.NewNumber()).Description("the stop index of the returned slice; if larger than `count(arr)`, it's clamped to `count(arr)`"), + ), + types.Named("slice", types.NewArray(nil, types.A)).Description("the subslice of `array`, from `start` to `end`, including `arr[start]`, but excluding `arr[end]`"), + ), +} // NOTE(sr): this function really needs examples + +var ArrayReverse = &Builtin{ + Name: "array.reverse", + Description: "Returns the reverse of a given array.", + Decl: types.NewFunction( + types.Args( + types.Named("arr", types.NewArray(nil, types.A)).Description("the array to be reversed"), + ), + types.Named("rev", types.NewArray(nil, types.A)).Description("an array containing the elements of `arr` in reverse order"), + ), +} + +/** + * Conversions + */ +var conversions = category("conversions") + +var ToNumber = &Builtin{ + Name: "to_number", + Description: "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.S, + types.B, + types.NewNull(), + )).Description("value to convert"), + ), + types.Named("num", types.N).Description("the numeric representation of `x`"), + ), + Categories: conversions, +} + +/** + * Regular Expressions + */ + +var RegexMatch = &Builtin{ + Name: "regex.match", + Description: "Matches a string against a regular expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("value to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `value` matches `pattern`"), + ), +} + +var RegexIsValid = &Builtin{ + Name: "regex.is_valid", + Description: "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + ), + types.Named("result", types.B).Description("true if `pattern` is a valid regular expression"), + ), +} + +var RegexFindAllStringSubmatch = &Builtin{ + Name: "regex.find_all_string_submatch_n", + Description: "Returns all successive matches of the expression.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return; `-1` means all matches"), + ), + types.Named("output", types.NewArray(nil, types.NewArray(nil, types.S))).Description("array of all matches"), + ), +} + +var RegexTemplateMatch = &Builtin{ + Name: "regex.template_match", + Description: "Matches a string against a pattern, where there pattern may be glob-like", + Decl: types.NewFunction( + types.Args( + types.Named("template", types.S).Description("template expression containing `0..n` regular expressions"), + types.Named("value", types.S).Description("string to match"), + types.Named("delimiter_start", types.S).Description("start delimiter of the regular expression in `template`"), + types.Named("delimiter_end", types.S).Description("end delimiter of the regular expression in `template`"), + ), + types.Named("result", types.B).Description("true if `value` matches the `template`"), + ), +} // TODO(sr): example:`regex.template_match("urn:foo:{.*}", "urn:foo:bar:baz", "{", "}")`` returns ``true``. + +var RegexSplit = &Builtin{ + Name: "regex.split", + Description: "Splits the input string by the occurrences of the given pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("the parts obtained by splitting `value`"), + ), +} + +// RegexFind takes two strings and a number, the pattern, the value and number of match values to +// return, -1 means all match values. +var RegexFind = &Builtin{ + Name: "regex.find_n", + Description: "Returns the specified number of matches when matching the input against the pattern.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("regular expression"), + types.Named("value", types.S).Description("string to match"), + types.Named("number", types.N).Description("number of matches to return, if `-1`, returns all matches"), + ), + types.Named("output", types.NewArray(nil, types.S)).Description("collected matches"), + ), +} + +// GlobsMatch takes two strings regexp-style strings and evaluates to true if their +// intersection matches a non-empty set of non-empty strings. +// Examples: +// - "a.a." and ".b.b" -> true. +// - "[a-z]*" and [0-9]+" -> not true. +var GlobsMatch = &Builtin{ + Name: "regex.globs_match", + Description: `Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings. +The set of regex symbols is limited for this builtin: only ` + "`.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", + Decl: types.NewFunction( + types.Args( + types.Named("glob1", types.S).Description("first glob-style regular expression"), + types.Named("glob2", types.S).Description("second glob-style regular expression"), + ), + types.Named("result", types.B).Description("true if the intersection of `glob1` and `glob2` matches a non-empty set of non-empty strings"), + ), +} + +/** + * Strings + */ +var stringsCat = category("strings") + +var AnyPrefixMatch = &Builtin{ + Name: "strings.any_prefix_match", + Description: "Returns true if any of the search strings begins with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.NewSet(types.S), + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.NewSet(types.S), + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, +} + +var AnySuffixMatch = &Builtin{ + Name: "strings.any_suffix_match", + Description: "Returns true if any of the search strings ends with any of the base strings.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.NewAny( + types.S, + types.NewSet(types.S), + types.NewArray(nil, types.S), + )).Description("search string(s)"), + types.Named("base", types.NewAny( + types.S, + types.NewSet(types.S), + types.NewArray(nil, types.S), + )).Description("base string(s)"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, +} + +var Concat = &Builtin{ + Name: "concat", + Description: "Joins a set or array of strings with a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("delimiter", types.S).Description("string to use as a delimiter"), + types.Named("collection", types.NewAny( + types.NewSet(types.S), + types.NewArray(nil, types.S), + )).Description("strings to join"), + ), + types.Named("output", types.S).Description("the joined string"), + ), + Categories: stringsCat, +} + +var FormatInt = &Builtin{ + Name: "format_int", + Description: "Returns the string representation of the number in the given base after rounding it down to an integer value.", + Decl: types.NewFunction( + types.Args( + types.Named("number", types.N).Description("number to format"), + types.Named("base", types.N).Description("base of number representation to use"), + ), + types.Named("output", types.S).Description("formatted number"), + ), + Categories: stringsCat, +} + +var IndexOf = &Builtin{ + Name: "indexof", + Description: "Returns the index of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("index of first occurrence, `-1` if not found"), + ), + Categories: stringsCat, +} + +var IndexOfN = &Builtin{ + Name: "indexof_n", + Description: "Returns a list of all the indexes of a substring contained inside a string.", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("output", types.NewArray(nil, types.N)).Description("all indices at which `needle` occurs in `haystack`, may be empty"), + ), + Categories: stringsCat, +} + +var Substring = &Builtin{ + Name: "substring", + Description: "Returns the portion of a string for a given `offset` and a `length`. If `length < 0`, `output` is the remainder of the string.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to extract substring from"), + types.Named("offset", types.N).Description("offset, must be positive"), + types.Named("length", types.N).Description("length of the substring starting from `offset`"), + ), + types.Named("output", types.S).Description("substring of `value` from `offset`, of length `length`"), + ), + Categories: stringsCat, +} + +var Contains = &Builtin{ + Name: "contains", + Description: "Returns `true` if the search string is included in the base string", + Decl: types.NewFunction( + types.Args( + types.Named("haystack", types.S).Description("string to search in"), + types.Named("needle", types.S).Description("substring to look for"), + ), + types.Named("result", types.B).Description("result of the containment check"), + ), + Categories: stringsCat, +} + +var StringCount = &Builtin{ + Name: "strings.count", + Description: "Returns the number of non-overlapping instances of a substring in a string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("string to search in"), + types.Named("substring", types.S).Description("substring to look for"), + ), + types.Named("output", types.N).Description("count of occurrences, `0` if not found"), + ), + Categories: stringsCat, +} + +var StartsWith = &Builtin{ + Name: "startswith", + Description: "Returns true if the search string begins with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the prefix check"), + ), + Categories: stringsCat, +} + +var EndsWith = &Builtin{ + Name: "endswith", + Description: "Returns true if the search string ends with the base string.", + Decl: types.NewFunction( + types.Args( + types.Named("search", types.S).Description("search string"), + types.Named("base", types.S).Description("base string"), + ), + types.Named("result", types.B).Description("result of the suffix check"), + ), + Categories: stringsCat, +} + +var Lower = &Builtin{ + Name: "lower", + Description: "Returns the input string but with all characters in lower-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to lower-case"), + ), + types.Named("y", types.S).Description("lower-case of x"), + ), + Categories: stringsCat, +} + +var Upper = &Builtin{ + Name: "upper", + Description: "Returns the input string but with all characters in upper-case.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is converted to upper-case"), + ), + types.Named("y", types.S).Description("upper-case of x"), + ), + Categories: stringsCat, +} + +var Split = &Builtin{ + Name: "split", + Description: "Split returns an array containing elements of the input string split on a delimiter.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string that is split"), + types.Named("delimiter", types.S).Description("delimiter used for splitting"), + ), + types.Named("ys", types.NewArray(nil, types.S)).Description("split parts"), + ), + Categories: stringsCat, +} + +var Replace = &Builtin{ + Name: "replace", + Description: "Replace replaces all instances of a sub-string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string being processed"), + types.Named("old", types.S).Description("substring to replace"), + types.Named("new", types.S).Description("string to replace `old` with"), + ), + types.Named("y", types.S).Description("string with replaced substrings"), + ), + Categories: stringsCat, +} + +var ReplaceN = &Builtin{ + Name: "strings.replace_n", + Description: `Replaces a string from a list of old, new string pairs. +Replacements are performed in the order they appear in the target string, without overlapping matches. +The old string comparisons are done in argument order.`, + Decl: types.NewFunction( + types.Args( + types.Named("patterns", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.S)), + ).Description("replacement pairs"), + types.Named("value", types.S).Description("string to replace substring matches in"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), +} + +var RegexReplace = &Builtin{ + Name: "regex.replace", + Description: `Find and replaces the text using the regular expression pattern.`, + Decl: types.NewFunction( + types.Args( + types.Named("s", types.S).Description("string being processed"), + types.Named("pattern", types.S).Description("regex pattern to be applied"), + types.Named("value", types.S).Description("regex value"), + ), + types.Named("output", types.S).Description("string with replaced substrings"), + ), +} + +var Trim = &Builtin{ + Name: "trim", + Description: "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off"), + ), + types.Named("output", types.S).Description("string trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimLeft = &Builtin{ + Name: "trim_left", + Description: "Returns `value` with all leading instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the left"), + ), + types.Named("output", types.S).Description("string left-trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimPrefix = &Builtin{ + Name: "trim_prefix", + Description: "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("prefix", types.S).Description("prefix to cut off"), + ), + types.Named("output", types.S).Description("string with `prefix` cut off"), + ), + Categories: stringsCat, +} + +var TrimRight = &Builtin{ + Name: "trim_right", + Description: "Returns `value` with all trailing instances of the `cutset` characters removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("cutset", types.S).Description("string of characters that are cut off on the right"), + ), + types.Named("output", types.S).Description("string right-trimmed of `cutset` characters"), + ), + Categories: stringsCat, +} + +var TrimSuffix = &Builtin{ + Name: "trim_suffix", + Description: "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + types.Named("suffix", types.S).Description("suffix to cut off"), + ), + types.Named("output", types.S).Description("string with `suffix` cut off"), + ), + Categories: stringsCat, +} + +var TrimSpace = &Builtin{ + Name: "trim_space", + Description: "Return the given string with all leading and trailing white space removed.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("string to trim"), + ), + types.Named("output", types.S).Description("string leading and trailing white space cut off"), + ), + Categories: stringsCat, +} + +var Sprintf = &Builtin{ + Name: "sprintf", + Description: "Returns the given string, formatted.", + Decl: types.NewFunction( + types.Args( + types.Named("format", types.S).Description("string with formatting verbs"), + types.Named("values", types.NewArray(nil, types.A)).Description("arguments to format into formatting verbs"), + ), + types.Named("output", types.S).Description("`format` formatted by the values in `values`"), + ), + Categories: stringsCat, +} + +var StringReverse = &Builtin{ + Name: "strings.reverse", + Description: "Reverses a given string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to reverse"), + ), + types.Named("y", types.S).Description("reversed string"), + ), + Categories: stringsCat, +} + +var RenderTemplate = &Builtin{ + Name: "strings.render_template", + Description: `Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key. + For examples of templating syntax, see https://pkg.go.dev/text/template`, + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("a templated string"), + types.Named("vars", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("a mapping of template variable keys to values"), + ), + types.Named("result", types.S).Description("rendered template with template variables injected"), + ), + Categories: stringsCat, +} + +/** + * Numbers + */ + +// RandIntn returns a random number 0 - n +// Marked non-deterministic because it relies on RNG internally. +var RandIntn = &Builtin{ + Name: "rand.intn", + Description: "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", + Decl: types.NewFunction( + types.Args( + types.Named("str", types.S).Description("seed string for the random number"), + types.Named("n", types.N).Description("upper bound of the random number (exclusive)"), + ), + types.Named("y", types.N).Description("random integer in the range `[0, abs(n))`"), + ), + Categories: number, + Nondeterministic: true, +} + +var NumbersRange = &Builtin{ + Name: "numbers.range", + Description: "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a > b`, then `range` is in descending order.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b`"), + ), +} + +var NumbersRangeStep = &Builtin{ + Name: "numbers.range_step", + Description: `Returns an array of numbers in the given (inclusive) range incremented by a positive step. + If "a==b", then "range == [a]"; if "a > b", then "range" is in descending order. + If the provided "step" is less then 1, an error will be thrown. + If "b" is not in the range of the provided "step", "b" won't be included in the result. + `, + Decl: types.NewFunction( + types.Args( + types.Named("a", types.N).Description("the start of the range"), + types.Named("b", types.N).Description("the end of the range (inclusive)"), + types.Named("step", types.N).Description("the step between numbers in the range"), + ), + types.Named("range", types.NewArray(nil, types.N)).Description("the range between `a` and `b` in `step` increments"), + ), +} + +/** + * Units + */ + +var UnitsParse = &Builtin{ + Name: "units.parse", + Description: `Converts strings like "10G", "5K", "4M", "1500m", and the like into a number. +This number can be a non-integer, such as 1.5, 0.22, etc. Scientific notation is supported, +allowing values such as "1e-3K" (1) or "2.5e6M" (2.5 million M). + +Supports standard metric decimal and binary SI units (e.g., K, Ki, M, Mi, G, Gi, etc.) where +m, K, M, G, T, P, and E are treated as decimal units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as +binary units. + +Note that 'm' and 'M' are case-sensitive to allow distinguishing between "milli" and "mega" units +respectively. Other units are case-insensitive.`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), +} + +var UnitsParseBytes = &Builtin{ + Name: "units.parse_bytes", + Description: `Converts strings like "10GB", "5K", "4mb", or "1e6KB" into an integer number of bytes. + +Supports standard byte units (e.g., KB, KiB, etc.) where KB, MB, GB, and TB are treated as decimal +units, and KiB, MiB, GiB, and TiB are treated as binary units. Scientific notation is supported, +enabling values like "1.5e3MB" (1500MB) or "2e6GiB" (2 million GiB). + +The bytes symbol (b/B) in the unit is optional; omitting it will yield the same result (e.g., "Mi" +and "MiB" are equivalent).`, + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the byte unit to parse"), + ), + types.Named("y", types.N).Description("the parsed number"), + ), +} + +// +/** + * Type + */ + +// UUIDRFC4122 returns a version 4 UUID string. +// Marked non-deterministic because it relies on RNG internally. +var UUIDRFC4122 = &Builtin{ + Name: "uuid.rfc4122", + Description: "Returns a new UUIDv4.", + Decl: types.NewFunction( + types.Args( + types.Named("k", types.S).Description("seed string"), + ), + types.Named("output", types.S).Description("a version 4 UUID; for any given `k`, the output will be consistent throughout a query evaluation"), + ), + Nondeterministic: true, +} + +var UUIDParse = &Builtin{ + Name: "uuid.parse", + Description: "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", + Categories: nil, + Decl: types.NewFunction( + types.Args( + types.Named("uuid", types.S).Description("UUID string to parse"), + ), + types.Named("result", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("Properties of UUID if valid (version, variant, etc). Undefined otherwise."), + ), + Relation: false, +} + +/** + * JSON + */ + +var objectCat = category("object") + +var JSONFilter = &Builtin{ + Name: "json.filter", + Description: "Filters the object. " + + "For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). " + + "Paths are not filtered in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `paths`"), + ), + Categories: objectCat, +} + +var JSONRemove = &Builtin{ + Name: "json.remove", + Description: "Removes paths from an object. " + + "For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. " + + "Paths are not removed in-order and are deduplicated before being evaluated.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove paths from"), + types.Named("paths", types.NewAny( + types.NewArray( + nil, + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + types.NewSet( + types.NewAny( + types.S, + types.NewArray( + nil, + types.A, + ), + ), + ), + )).Description("JSON string paths"), + ), + types.Named("output", types.A).Description("result of removing all keys specified in `paths`"), + ), + Categories: objectCat, +} + +var JSONPatch = &Builtin{ + Name: "json.patch", + Description: "Patches an object according to RFC6902. " + + "For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. " + + "The patches are applied atomically: if any of them fails, the result will be undefined. " + + "Additionally works on sets, where a value contained in the set is considered to be its path.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.A).Description("the object to patch"), // TODO(sr): types.A? + types.Named("patches", types.NewArray( + nil, + types.NewObject( + []*types.StaticProperty{ + {Key: "op", Value: types.S}, + {Key: "path", Value: types.A}, + }, + types.NewDynamicProperty(types.A, types.A), + ), + )).Description("the JSON patches to apply"), + ), + types.Named("output", types.A).Description("result obtained after consecutively applying all patch operations in `patches`"), + ), + Categories: objectCat, +} + +var ObjectSubset = &Builtin{ + Name: "object.subset", + Description: "Determines if an object `sub` is a subset of another object `super`." + + "Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, " + + "**and** for all keys which `sub` and `super` share, they have the same value. " + + "This function works with objects, sets, arrays and a set of array and set." + + "If both arguments are objects, then the operation is recursive, e.g. " + + "`{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. " + + "If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, " + + "but does not attempt to recurse. If both arguments are arrays, " + + "then this function checks if `sub` appears contiguously in order within `super`, " + + "and also does not attempt to recurse. If `super` is array and `sub` is set, " + + "then this function checks if `super` contains every element of `sub` with no consideration of ordering, " + + "and also does not attempt to recurse.", + Decl: types.NewFunction( + types.Args( + types.Named("super", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.NewSet(types.A), + types.NewArray(nil, types.A), + )).Description("object to test if sub is a subset of"), + types.Named("sub", types.NewAny(types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + ), types.NewSet(types.A), + types.NewArray(nil, types.A), + )).Description("object to test if super is a superset of"), + ), + types.Named("result", types.A).Description("`true` if `sub` is a subset of `super`"), + ), +} + +var ObjectUnion = &Builtin{ + Name: "object.union", + Description: "Creates a new object of the asymmetric union of two objects. " + + "For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("left-hand object"), + types.Named("b", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("right-hand object"), + ), + types.Named("output", types.A).Description("a new object which is the result of an asymmetric recursive union of two objects where conflicts are resolved by choosing the key from the right-hand object `b`"), + ), // TODO(sr): types.A? ^^^^^^^ (also below) +} + +var ObjectUnionN = &Builtin{ + Name: "object.union_n", + Description: "Creates a new object that is the asymmetric union of all objects merged from left to right. " + + "For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", + Decl: types.NewFunction( + types.Args( + types.Named("objects", types.NewArray( + nil, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("list of objects to merge"), + ), + types.Named("output", types.A).Description("asymmetric recursive union of all objects in `objects`, merged from left to right, where conflicts are resolved by choosing the key from the right-hand object"), + ), +} + +var ObjectRemove = &Builtin{ + Name: "object.remove", + Description: "Removes specified keys from an object.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to remove keys from"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.NewSet(types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to remove from x"), + ), + types.Named("output", types.A).Description("result of removing the specified `keys` from `object`"), + ), +} + +var ObjectFilter = &Builtin{ + Name: "object.filter", + Description: "Filters the object by keeping only specified keys. " + + "For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty(types.A, types.A), + )).Description("object to filter keys"), + types.Named("keys", types.NewAny( + types.NewArray(nil, types.A), + types.NewSet(types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + )).Description("keys to keep in `object`"), + ), + types.Named("filtered", types.A).Description("remaining data from `object` with only keys specified in `keys`"), + ), +} + +var ObjectGet = &Builtin{ + Name: "object.get", + Description: "Returns value of an object's key if present, otherwise a default. " + + "If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. " + + "For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get `key` from"), + types.Named("key", types.A).Description("key to lookup in `object`"), + types.Named("default", types.A).Description("default to use when lookup fails"), + ), + types.Named("value", types.A).Description("`object[key]` if present, otherwise `default`"), + ), +} + +var ObjectKeys = &Builtin{ + Name: "object.keys", + Description: "Returns a set of an object's keys. " + + "For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("object to get keys from"), + ), + types.Named("value", types.NewSet(types.A)).Description("set of `object`'s keys"), + ), +} + +/* + * Encoding + */ +var encoding = category("encoding") + +var JSONMarshal = &Builtin{ + Name: "json.marshal", + Description: "Serializes the input term to JSON.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`"), + ), + Categories: encoding, +} + +var JSONMarshalWithOptions = &Builtin{ + Name: "json.marshal_with_options", + Description: "Serializes the input term JSON, with additional formatting options via the `opts` parameter. " + + "`opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + types.Named("opts", types.NewObject( + []*types.StaticProperty{ + types.NewStaticProperty("pretty", types.B), + types.NewStaticProperty("indent", types.S), + types.NewStaticProperty("prefix", types.S), + }, + types.NewDynamicProperty(types.S, types.A), + )).Description("encoding options"), + ), + types.Named("y", types.S).Description("the JSON string representation of `x`, with configured prefix/indent string(s) as appropriate"), + ), + Categories: encoding, +} + +var JSONUnmarshal = &Builtin{ + Name: "json.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: encoding, +} + +var JSONIsValid = &Builtin{ + Name: "json.is_valid", + Description: "Verifies the input string is a valid JSON document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a JSON string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid JSON, `false` otherwise"), + ), + Categories: encoding, +} + +var Base64Encode = &Builtin{ + Name: "base64.encode", + Description: "Serializes the input string into base64 encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64 serialization of `x`"), + ), + Categories: encoding, +} + +var Base64Decode = &Builtin{ + Name: "base64.decode", + Description: "Deserializes the base64 encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64 deserialization of `x`"), + ), + Categories: encoding, +} + +var Base64IsValid = &Builtin{ + Name: "base64.is_valid", + Description: "Verifies the input string is base64 encoded.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to check"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid base64 encoded value, `false` otherwise"), + ), + Categories: encoding, +} + +var Base64UrlEncode = &Builtin{ + Name: "base64url.encode", + Description: "Serializes the input string into base64url encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: encoding, +} + +var Base64UrlEncodeNoPad = &Builtin{ + Name: "base64url.encode_no_pad", + Description: "Serializes the input string into base64url encoding without padding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("base64url serialization of `x`"), + ), + Categories: encoding, +} + +var Base64UrlDecode = &Builtin{ + Name: "base64url.decode", + Description: "Deserializes the base64url encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to decode"), + ), + types.Named("y", types.S).Description("base64url deserialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryDecode = &Builtin{ + Name: "urlquery.decode", + Description: "Decodes a URL-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the URL-encoded string"), + ), + types.Named("y", types.S).Description("URL-encoding deserialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryEncode = &Builtin{ + Name: "urlquery.encode", + Description: "Encodes the input string into a URL-encoded string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the string to encode"), + ), + types.Named("y", types.S).Description("URL-encoding serialization of `x`"), + ), + Categories: encoding, +} + +var URLQueryEncodeObject = &Builtin{ + Name: "urlquery.encode_object", + Description: "Encodes the given object into a URL encoded query string.", + Decl: types.NewFunction( + types.Args( + types.Named("object", types.NewObject( + nil, + types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.S), + types.NewSet(types.S)), + ), + ), + ).Description("the object to encode"), + ), + types.Named("y", types.S).Description("the URL-encoded serialization of `object`"), + ), + Categories: encoding, +} + +var URLQueryDecodeObject = &Builtin{ + Name: "urlquery.decode_object", + Description: "Decodes the given URL query string into an object.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("the query string"), + ), + types.Named("object", types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewArray(nil, types.S)))).Description("the resulting object"), + ), + Categories: encoding, +} + +var YAMLMarshal = &Builtin{ + Name: "yaml.marshal", + Description: "Serializes the input term to YAML.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("the term to serialize"), + ), + types.Named("y", types.S).Description("the YAML string representation of `x`"), + ), + Categories: encoding, +} + +var YAMLUnmarshal = &Builtin{ + Name: "yaml.unmarshal", + Description: "Deserializes the input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("y", types.A).Description("the term deserialized from `x`"), + ), + Categories: encoding, +} + +// YAMLIsValid verifies the input string is a valid YAML document. +var YAMLIsValid = &Builtin{ + Name: "yaml.is_valid", + Description: "Verifies the input string is a valid YAML document.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a YAML string"), + ), + types.Named("result", types.B).Description("`true` if `x` is valid YAML, `false` otherwise"), + ), + Categories: encoding, +} + +var HexEncode = &Builtin{ + Name: "hex.encode", + Description: "Serializes the input string using hex-encoding.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("string to encode"), + ), + types.Named("y", types.S).Description("serialization of `x` using hex-encoding"), + ), + Categories: encoding, +} + +var HexDecode = &Builtin{ + Name: "hex.decode", + Description: "Deserializes the hex-encoded input string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("a hex-encoded string"), + ), + types.Named("y", types.S).Description("deserialized from `x`"), + ), + Categories: encoding, +} + +/** + * Tokens + */ +var tokensCat = category("tokens") + +var JWTDecode = &Builtin{ + Name: "io.jwt.decode", + Description: "Decodes a JSON Web Token and outputs it as an object.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token to decode"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.S, + }, nil)).Description("`[header, payload, sig]`, where `header` and `payload` are objects; `sig` is the hexadecimal representation of the signature on the token."), + ), + Categories: tokensCat, +} + +var JWTVerifyRS256 = &Builtin{ + Name: "io.jwt.verify_rs256", + Description: "Verifies if a RS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyRS384 = &Builtin{ + Name: "io.jwt.verify_rs384", + Description: "Verifies if a RS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyRS512 = &Builtin{ + Name: "io.jwt.verify_rs512", + Description: "Verifies if a RS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS256 = &Builtin{ + Name: "io.jwt.verify_ps256", + Description: "Verifies if a PS256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS384 = &Builtin{ + Name: "io.jwt.verify_ps384", + Description: "Verifies if a PS384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyPS512 = &Builtin{ + Name: "io.jwt.verify_ps512", + Description: "Verifies if a PS512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES256 = &Builtin{ + Name: "io.jwt.verify_es256", + Description: "Verifies if a ES256 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES384 = &Builtin{ + Name: "io.jwt.verify_es384", + Description: "Verifies if a ES384 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyES512 = &Builtin{ + Name: "io.jwt.verify_es512", + Description: "Verifies if a ES512 JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("certificate", types.S).Description("PEM encoded certificate, PEM encoded public key, or the JWK key (set) used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS256 = &Builtin{ + Name: "io.jwt.verify_hs256", + Description: "Verifies if a HS256 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS384 = &Builtin{ + Name: "io.jwt.verify_hs384", + Description: "Verifies if a HS384 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +var JWTVerifyHS512 = &Builtin{ + Name: "io.jwt.verify_hs512", + Description: "Verifies if a HS512 (secret) JWT signature is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified"), + types.Named("secret", types.S).Description("plain text secret used to verify the signature"), + ), + types.Named("result", types.B).Description("`true` if the signature is valid, `false` otherwise"), + ), + Categories: tokensCat, +} + +// Marked non-deterministic because it relies on time internally. +var JWTDecodeVerify = &Builtin{ + Name: "io.jwt.decode_verify", + Description: `Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid. +Supports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.`, + Decl: types.NewFunction( + types.Args( + types.Named("jwt", types.S).Description("JWT token whose signature is to be verified and whose claims are to be checked"), + types.Named("constraints", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("claim verification constraints"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`[valid, header, payload]`: if the input token is verified and meets the requirements of `constraints` then `valid` is `true`; `header` and `payload` are objects containing the JOSE header and the JWT claim set; otherwise, `valid` is `false`, `header` and `payload` are `{}`"), + ), + Categories: tokensCat, + Nondeterministic: true, +} + +var tokenSign = category("tokensign") + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSignRaw = &Builtin{ + Name: "io.jwt.encode_sign_raw", + Description: "Encodes and optionally signs a JSON Web Token.", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.S).Description("JWS Protected Header"), + types.Named("payload", types.S).Description("JWS Payload"), + types.Named("key", types.S).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, +} + +// Marked non-deterministic because it relies on RNG internally. +var JWTEncodeSign = &Builtin{ + Name: "io.jwt.encode_sign", + Description: "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", + Decl: types.NewFunction( + types.Args( + types.Named("headers", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Protected Header"), + types.Named("payload", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWS Payload"), + types.Named("key", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JSON Web Key (RFC7517)"), + ), + types.Named("output", types.S).Description("signed JWT"), + ), + Categories: tokenSign, + Nondeterministic: true, +} + +/** + * Time + */ + +// Marked non-deterministic because it relies on time directly. +var NowNanos = &Builtin{ + Name: "time.now_ns", + Description: "Returns the current time since epoch in nanoseconds.", + Decl: types.NewFunction( + nil, + types.Named("now", types.N).Description("nanoseconds since epoch"), + ), + Nondeterministic: true, +} + +var ParseNanos = &Builtin{ + Name: "time.parse_ns", + Description: "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("layout", types.S).Description("format used for parsing, see the [Go `time` package documentation](https://golang.org/pkg/time/#Parse) for more details"), + types.Named("value", types.S).Description("input to parse according to `layout`"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), +} + +var ParseRFC3339Nanos = &Builtin{ + Name: "time.parse_rfc3339_ns", + Description: "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("value", types.S).Description("input string to parse in RFC3339 format"), + ), + types.Named("ns", types.N).Description("`value` in nanoseconds since epoch"), + ), +} + +var ParseDurationNanos = &Builtin{ + Name: "time.parse_duration_ns", + Description: "Returns the duration in nanoseconds represented by a string.", + Decl: types.NewFunction( + types.Args( + types.Named("duration", types.S).Description("a duration like \"3m\"; see the [Go `time` package documentation](https://golang.org/pkg/time/#ParseDuration) for more details"), + ), + types.Named("ns", types.N).Description("the `duration` in nanoseconds"), + ), +} + +var Format = &Builtin{ + Name: "time.format", + Description: "Returns the formatted timestamp for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + types.NewArray([]types.Type{types.N, types.S, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string; or a three-element array of ns, timezone string and a layout string or golang defined formatting constant (see golang supported time formats)"), + ), + types.Named("formatted timestamp", types.S).Description("the formatted timestamp represented for the nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), +} + +var Date = &Builtin{ + Name: "time.date", + Description: "Returns the `[year, month, day]` for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("date", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)).Description("an array of `year`, `month` (1-12), and `day` (1-31)"), + ), +} + +var Clock = &Builtin{ + Name: "time.clock", + Description: "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N}, nil)). + Description("the `hour`, `minute` (0-59), and `second` (0-59) representing the time of day for the nanoseconds since epoch in the supplied timezone (or UTC)"), + ), +} + +var Weekday = &Builtin{ + Name: "time.weekday", + Description: "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("a number representing the nanoseconds since the epoch (UTC); or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("day", types.S).Description("the weekday represented by `ns` nanoseconds since the epoch in the supplied timezone (or UTC)"), + ), +} + +var AddDate = &Builtin{ + Name: "time.add_date", + Description: "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month & day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", + Decl: types.NewFunction( + types.Args( + types.Named("ns", types.N).Description("nanoseconds since the epoch"), + types.Named("years", types.N).Description("number of years to add"), + types.Named("months", types.N).Description("number of months to add"), + types.Named("days", types.N).Description("number of days to add"), + ), + types.Named("output", types.N).Description("nanoseconds since the epoch representing the input time, with years, months and days added"), + ), +} + +var Diff = &Builtin{ + Name: "time.diff", + Description: "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", + Decl: types.NewFunction( + types.Args( + types.Named("ns1", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + types.Named("ns2", types.NewAny( + types.N, + types.NewArray([]types.Type{types.N, types.S}, nil), + )).Description("nanoseconds since the epoch; or a two-element array of the nanoseconds, and a timezone string"), + ), + types.Named("output", types.NewArray([]types.Type{types.N, types.N, types.N, types.N, types.N, types.N}, nil)).Description("difference between `ns1` and `ns2` (in their supplied timezones, if supplied, or UTC) as array of numbers: `[years, months, days, hours, minutes, seconds]`"), + ), +} + +/** + * Crypto. + */ + +var CryptoX509ParseCertificates = &Builtin{ + Name: "crypto.x509.parse_certificates", + Description: `Returns zero or more certificates from the given encoded string containing +DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more +concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing one or more certificates or a PEM string of one or more certificates"), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed X.509 certificates represented as objects"), + ), +} + +var CryptoX509ParseAndVerifyCertificates = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), +} + +var CryptoX509ParseAndVerifyCertificatesWithOptions = &Builtin{ + Name: "crypto.x509.parse_and_verify_certificates_with_options", + Description: `Returns one or more certificates from the given string containing PEM +or base64 encoded DER certificates after verifying the supplied certificates form a complete +certificate chain back to a trusted root. A config option passed as the second argument can +be used to configure the validation options used. + +The first certificate is treated as the root and the last is treated as the leaf, +with all others being treated as intermediates.`, + + Decl: types.NewFunction( + types.Args( + types.Named("certs", types.S).Description("base64 encoded DER or PEM data containing two or more certificates where the first is a root CA, the last is a leaf certificate, and all others are intermediate CAs"), + types.Named("options", types.NewObject( + nil, + types.NewDynamicProperty(types.S, types.A), + )).Description("object containing extra configs to verify the validity of certificates. `options` object supports four fields which maps to same fields in [x509.VerifyOptions struct](https://pkg.go.dev/crypto/x509#VerifyOptions). `DNSName`, `CurrentTime`: Nanoseconds since the Unix Epoch as a number, `MaxConstraintComparisons` and `KeyUsages`. `KeyUsages` is list and can have possible values as in: `\"KeyUsageAny\"`, `\"KeyUsageServerAuth\"`, `\"KeyUsageClientAuth\"`, `\"KeyUsageCodeSigning\"`, `\"KeyUsageEmailProtection\"`, `\"KeyUsageIPSECEndSystem\"`, `\"KeyUsageIPSECTunnel\"`, `\"KeyUsageIPSECUser\"`, `\"KeyUsageTimeStamping\"`, `\"KeyUsageOCSPSigning\"`, `\"KeyUsageMicrosoftServerGatedCrypto\"`, `\"KeyUsageNetscapeServerGatedCrypto\"`, `\"KeyUsageMicrosoftCommercialCodeSigning\"`, `\"KeyUsageMicrosoftKernelCodeSigning\"` "), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))), + }, nil)).Description("array of `[valid, certs]`: if the input certificate chain could be verified then `valid` is `true` and `certs` is an array of X.509 certificates represented as objects; if the input certificate chain could not be verified then `valid` is `false` and `certs` is `[]`"), + ), +} + +var CryptoX509ParseCertificateRequest = &Builtin{ + Name: "crypto.x509.parse_certificate_request", + Description: "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", + Decl: types.NewFunction( + types.Args( + types.Named("csr", types.S).Description("base64 string containing either a PEM encoded or DER CSR or a string containing a PEM CSR"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("X.509 CSR represented as an object"), + ), +} + +var CryptoX509ParseKeyPair = &Builtin{ + Name: "crypto.x509.parse_keypair", + Description: "Returns a valid key pair", + Decl: types.NewFunction( + types.Args( + types.Named("cert", types.S).Description("string containing PEM or base64 encoded DER certificates"), + types.Named("pem", types.S).Description("string containing PEM or base64 encoded DER keys"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("if key pair is valid, returns the tls.certificate(https://pkg.go.dev/crypto/tls#Certificate) as an object. If the key pair is invalid, nil and an error are returned."), + ), +} +var CryptoX509ParseRSAPrivateKey = &Builtin{ + Name: "crypto.x509.parse_rsa_private_key", + Description: "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", + Decl: types.NewFunction( + types.Args( + types.Named("pem", types.S).Description("base64 string containing a PEM encoded RSA private key"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))).Description("JWK as an object"), + ), +} + +var CryptoParsePrivateKeys = &Builtin{ + Name: "crypto.parse_private_keys", + Description: `Returns zero or more private keys from the given encoded string containing DER certificate data. + +If the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.`, + Decl: types.NewFunction( + types.Args( + types.Named("keys", types.S).Description("PEM encoded data containing one or more private keys as concatenated blocks. Optionally Base64 encoded."), + ), + types.Named("output", types.NewArray(nil, types.NewObject(nil, types.NewDynamicProperty(types.S, types.A)))).Description("parsed private keys represented as objects"), + ), +} + +var CryptoMd5 = &Builtin{ + Name: "crypto.md5", + Description: "Returns a string representing the input string hashed with the MD5 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("MD5-hash of `x`"), + ), +} + +var CryptoSha1 = &Builtin{ + Name: "crypto.sha1", + Description: "Returns a string representing the input string hashed with the SHA1 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA1-hash of `x`"), + ), +} + +var CryptoSha256 = &Builtin{ + Name: "crypto.sha256", + Description: "Returns a string representing the input string hashed with the SHA256 function", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + ), + types.Named("y", types.S).Description("SHA256-hash of `x`"), + ), +} + +var CryptoHmacMd5 = &Builtin{ + Name: "crypto.hmac.md5", + Description: "Returns a string representing the MD5 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("MD5-HMAC of `x`"), + ), +} + +var CryptoHmacSha1 = &Builtin{ + Name: "crypto.hmac.sha1", + Description: "Returns a string representing the SHA1 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA1-HMAC of `x`"), + ), +} + +var CryptoHmacSha256 = &Builtin{ + Name: "crypto.hmac.sha256", + Description: "Returns a string representing the SHA256 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA256-HMAC of `x`"), + ), +} + +var CryptoHmacSha512 = &Builtin{ + Name: "crypto.hmac.sha512", + Description: "Returns a string representing the SHA512 HMAC of the input message using the input key.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.S).Description("input string"), + types.Named("key", types.S).Description("key to use"), + ), + types.Named("y", types.S).Description("SHA512-HMAC of `x`"), + ), +} + +var CryptoHmacEqual = &Builtin{ + Name: "crypto.hmac.equal", + Description: "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", + Decl: types.NewFunction( + types.Args( + types.Named("mac1", types.S).Description("mac1 to compare"), + types.Named("mac2", types.S).Description("mac2 to compare"), + ), + types.Named("result", types.B).Description("`true` if the MACs are equals, `false` otherwise"), + ), +} + +/** + * Graphs. + */ +var graphs = category("graph") + +var WalkBuiltin = &Builtin{ + Name: "walk", + Relation: true, + Description: "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("value to walk"), + ), + types.Named("output", types.NewArray( + []types.Type{ + types.NewArray(nil, types.A), + types.A, + }, + nil, + )).Description("pairs of `path` and `value`: `path` is an array representing the pointer to `value` in `x`. If `path` is assigned a wildcard (`_`), the `walk` function will skip path creation entirely for faster evaluation."), + ), + Categories: graphs, +} + +var ReachableBuiltin = &Builtin{ + Name: "graph.reachable", + Description: "Computes the set of reachable nodes in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of neighboring vertices"), + types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("set or array of root vertices"), + ), + types.Named("output", types.NewSet(types.A)).Description("set of vertices reachable from the `initial` vertices in the directed `graph`"), + ), +} + +var ReachablePathsBuiltin = &Builtin{ + Name: "graph.reachable_paths", + Description: "Computes the set of reachable paths in the graph from a set of starting nodes.", + Decl: types.NewFunction( + types.Args( + types.Named("graph", types.NewObject( + nil, + types.NewDynamicProperty( + types.A, + types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A)), + )), + ).Description("object containing a set or array of root vertices"), + types.Named("initial", types.NewAny(types.NewSet(types.A), types.NewArray(nil, types.A))).Description("initial paths"), // TODO(sr): copied. is that correct? + ), + types.Named("output", types.NewSet(types.NewArray(nil, types.A))).Description("paths reachable from the `initial` vertices in the directed `graph`"), + ), +} + +/** + * Type + */ +var typesCat = category("types") + +var IsNumber = &Builtin{ + Name: "is_number", + Description: "Returns `true` if the input value is a number.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a number, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsString = &Builtin{ + Name: "is_string", + Description: "Returns `true` if the input value is a string.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a string, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsBoolean = &Builtin{ + Name: "is_boolean", + Description: "Returns `true` if the input value is a boolean.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an boolean, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsArray = &Builtin{ + Name: "is_array", + Description: "Returns `true` if the input value is an array.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an array, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsSet = &Builtin{ + Name: "is_set", + Description: "Returns `true` if the input value is a set.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is a set, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsObject = &Builtin{ + Name: "is_object", + Description: "Returns true if the input value is an object", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is an object, `false` otherwise."), + ), + Categories: typesCat, +} + +var IsNull = &Builtin{ + Name: "is_null", + Description: "Returns `true` if the input value is null.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("result", types.B).Description("`true` if `x` is null, `false` otherwise."), + ), + Categories: typesCat, +} + +/** + * Type Name + */ + +// TypeNameBuiltin returns the type of the input. +var TypeNameBuiltin = &Builtin{ + Name: "type_name", + Description: "Returns the type of its input value.", + Decl: types.NewFunction( + types.Args( + types.Named("x", types.A).Description("input value"), + ), + types.Named("type", types.S).Description(`one of "null", "boolean", "number", "string", "array", "object", "set"`), + ), + Categories: typesCat, +} + +/** + * HTTP Request + */ + +// Marked non-deterministic because HTTP request results can be non-deterministic. +var HTTPSend = &Builtin{ + Name: "http.send", + Description: "Returns a HTTP response to the given HTTP request.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("the HTTP request object"), + ), + types.Named("response", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("the HTTP response object"), + ), + Nondeterministic: true, +} + +/** + * GraphQL + */ + +// GraphQLParse returns a pair of AST objects from parsing/validation. +var GraphQLParse = &Builtin{ + Name: "graphql.parse", + Description: "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description("`output` is of the form `[query_ast, schema_ast]`. If the GraphQL query is valid given the provided schema, then `query_ast` and `schema_ast` are objects describing the ASTs for the query and schema."), + ), +} + +// GraphQLParseAndVerify returns a boolean and a pair of AST object from parsing/validation. +var GraphQLParseAndVerify = &Builtin{ + Name: "graphql.parse_and_verify", + Description: "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + }, nil)).Description(" `output` is of the form `[valid, query_ast, schema_ast]`. If the query is valid given the provided schema, then `valid` is `true`, and `query_ast` and `schema_ast` are objects describing the ASTs for the GraphQL query and schema. Otherwise, `valid` is `false` and `query_ast` and `schema_ast` are `{}`."), + ), +} + +// GraphQLParseQuery parses the input GraphQL query and returns a JSON +// representation of its AST. +var GraphQLParseQuery = &Builtin{ + Name: "graphql.parse_query", + Description: "Returns an AST object for a GraphQL query.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.S).Description("GraphQL query string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL query."), + ), +} + +// GraphQLParseSchema parses the input GraphQL schema and returns a JSON +// representation of its AST. +var GraphQLParseSchema = &Builtin{ + Name: "graphql.parse_schema", + Description: "Returns an AST object for a GraphQL schema.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.S).Description("GraphQL schema string"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))).Description("AST object for the GraphQL schema."), + ), +} + +// GraphQLIsValid returns true if a GraphQL query is valid with a given +// schema, and returns false for all other inputs. +var GraphQLIsValid = &Builtin{ + Name: "graphql.is_valid", + Description: "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("query", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL query"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the GraphQL schema"), + ), + types.Named("output", types.B).Description("`true` if the query is valid under the given schema. `false` otherwise."), + ), +} + +// GraphQLSchemaIsValid returns true if the input is valid GraphQL schema, +// and returns false for all other inputs. +var GraphQLSchemaIsValid = &Builtin{ + Name: "graphql.schema_is_valid", + Description: "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.B).Description("`true` if the schema is a valid GraphQL schema. `false` otherwise."), + ), +} + +/** + * JSON Schema + */ + +// JSONSchemaVerify returns empty string if the input is valid JSON schema +// and returns error string for all other inputs. +var JSONSchemaVerify = &Builtin{ + Name: "json.verify_schema", + Description: "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", + Decl: types.NewFunction( + types.Args( + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("the schema to verify"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewAny(types.S, types.Null{}), + }, nil)). + Description("`output` is of the form `[valid, error]`. If the schema is valid, then `valid` is `true`, and `error` is `null`. Otherwise, `valid` is `false` and `error` is a string describing the error."), + ), + Categories: objectCat, +} + +// JSONMatchSchema returns empty array if the document matches the JSON schema, +// and returns non-empty array with error objects otherwise. +var JSONMatchSchema = &Builtin{ + Name: "json.match_schema", + Description: "Checks that the document matches the JSON schema.", + Decl: types.NewFunction( + types.Args( + types.Named("document", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("document to verify by schema"), + types.Named("schema", types.NewAny(types.S, types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)))). + Description("schema to verify document by"), + ), + types.Named("output", types.NewArray([]types.Type{ + types.B, + types.NewArray( + nil, types.NewObject( + []*types.StaticProperty{ + {Key: "error", Value: types.S}, + {Key: "type", Value: types.S}, + {Key: "field", Value: types.S}, + {Key: "desc", Value: types.S}, + }, + nil, + ), + ), + }, nil)). + Description("`output` is of the form `[match, errors]`. If the document is valid given the schema, then `match` is `true`, and `errors` is an empty array. Otherwise, `match` is `false` and `errors` is an array of objects describing the error(s)."), + ), + Categories: objectCat, +} + +/** + * Cloud Provider Helper Functions + */ +var providersAWSCat = category("providers.aws") + +var ProvidersAWSSignReqObj = &Builtin{ + Name: "providers.aws.sign_req", + Description: "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", + Decl: types.NewFunction( + types.Args( + types.Named("request", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("HTTP request object"), + types.Named("aws_config", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AWS configuration object"), + types.Named("time_ns", types.N).Description("nanoseconds since the epoch"), + ), + types.Named("signed_request", types.NewObject(nil, types.NewDynamicProperty(types.A, types.A))). + Description("HTTP request object with `Authorization` header"), + ), + Categories: providersAWSCat, +} + +/** + * Rego + */ + +var RegoParseModule = &Builtin{ + Name: "rego.parse_module", + Description: "Parses the input Rego string and returns an object representation of the AST.", + Decl: types.NewFunction( + types.Args( + types.Named("filename", types.S).Description("file name to attach to AST nodes' locations"), + types.Named("rego", types.S).Description("Rego module"), + ), + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("AST object for the Rego module"), + ), +} + +var RegoMetadataChain = &Builtin{ + Name: "rego.metadata.chain", + Description: `Returns the chain of metadata for the active rule. +Ordered starting at the active rule, going outward to the most distant node in its package ancestry. +A chain entry is a JSON document with two members: "path", an array representing the path of the node; and "annotations", a JSON document containing the annotations declared for the node. +The first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the "annotations" member is not present).`, + Decl: types.NewFunction( + types.Args(), + types.Named("chain", types.NewArray(nil, types.A)).Description("each array entry represents a node in the path ancestry (chain) of the active rule that also has declared annotations"), + ), +} + +// RegoMetadataRule returns the metadata for the active rule +var RegoMetadataRule = &Builtin{ + Name: "rego.metadata.rule", + Description: "Returns annotations declared for the active rule and using the _rule_ scope.", + Decl: types.NewFunction( + types.Args(), + types.Named("output", types.A).Description("\"rule\" scope annotations for this rule; empty object if no annotations exist"), + ), +} + +/** + * OPA + */ + +// Marked non-deterministic because of unpredictable config/environment-dependent results. +var OPARuntime = &Builtin{ + Name: "opa.runtime", + Description: "Returns an object that describes the runtime environment where OPA is deployed.", + Decl: types.NewFunction( + nil, + types.Named("output", types.NewObject(nil, types.NewDynamicProperty(types.S, types.A))). + Description("includes a `config` key if OPA was started with a configuration file; an `env` key containing the environment variables that the OPA process was started with; includes `version` and `commit` keys containing the version and build commit of OPA."), + ), + Nondeterministic: true, +} + +/** + * Trace + */ +var tracing = category("tracing") + +var Trace = &Builtin{ + Name: "trace", + Description: "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", + Decl: types.NewFunction( + types.Args( + types.Named("note", types.S).Description("the note to include"), + ), + types.Named("result", types.B).Description("always `true`"), + ), + Categories: tracing, +} + +/** + * Glob + */ + +var GlobMatch = &Builtin{ + Name: "glob.match", + Description: "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + types.Named("delimiters", types.NewAny( + types.NewArray(nil, types.S), + types.NewNull(), + )).Description("glob pattern delimiters, e.g. `[\".\", \":\"]`, defaults to `[\".\"]` if unset. If `delimiters` is `null`, glob match without delimiter."), + types.Named("match", types.S).Description("string to match against `pattern`"), + ), + types.Named("result", types.B).Description("true if `match` can be found in `pattern` which is separated by `delimiters`"), + ), +} + +var GlobQuoteMeta = &Builtin{ + Name: "glob.quote_meta", + Description: "Returns a string which represents a version of the pattern where all asterisks have been escaped.", + Decl: types.NewFunction( + types.Args( + types.Named("pattern", types.S).Description("glob pattern"), + ), + types.Named("output", types.S).Description("the escaped string of `pattern`"), + ), + // TODO(sr): example for this was: Calling ``glob.quote_meta("*.github.com", output)`` returns ``\\*.github.com`` as ``output``. +} + +/** + * Networking + */ + +var NetCIDRIntersects = &Builtin{ + Name: "net.cidr_intersects", + Description: "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr1", types.S).Description("first CIDR"), + types.Named("cidr2", types.S).Description("second CIDR"), + ), + types.Named("result", types.B).Description("`true` if `cidr1` intersects with `cidr2`"), + ), +} + +var NetCIDRExpand = &Builtin{ + Name: "net.cidr_expand", + Description: "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to expand"), + ), + types.Named("hosts", types.NewSet(types.S)).Description("set of IP addresses the CIDR `cidr` expands to"), + ), +} + +var NetCIDRContains = &Builtin{ + Name: "net.cidr_contains", + Description: "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to check against"), + types.Named("cidr_or_ip", types.S).Description("CIDR or IP to check"), + ), + types.Named("result", types.B).Description("`true` if `cidr_or_ip` is contained within `cidr`"), + ), +} + +var NetCIDRContainsMatches = &Builtin{ + Name: "net.cidr_contains_matches", + Description: "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. " + + "This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", + Decl: types.NewFunction( + types.Args( + types.Named("cidrs", netCidrContainsMatchesOperandType).Description("CIDRs to check against"), + types.Named("cidrs_or_ips", netCidrContainsMatchesOperandType).Description("CIDRs or IPs to check"), + ), + types.Named("output", types.NewSet(types.NewArray([]types.Type{types.A, types.A}, nil))).Description("tuples identifying matches where `cidrs_or_ips` are contained within `cidrs`"), + ), +} + +var NetCIDRMerge = &Builtin{ + Name: "net.cidr_merge", + Description: "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`." + + `This function merges adjacent subnets where possible, those contained within others and also removes any duplicates. +Supports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. "/128").`, + Decl: types.NewFunction( + types.Args( + types.Named("addrs", types.NewAny( + types.NewArray(nil, types.NewAny(types.S)), + types.NewSet(types.S), + )).Description("CIDRs or IP addresses"), + ), + types.Named("output", types.NewSet(types.S)).Description("smallest possible set of CIDRs obtained after merging the provided list of IP addresses and subnets in `addrs`"), + ), +} + +var NetCIDRIsValid = &Builtin{ + Name: "net.cidr_is_valid", + Description: "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", + Decl: types.NewFunction( + types.Args( + types.Named("cidr", types.S).Description("CIDR to validate"), + ), + types.Named("result", types.B).Description("`true` if `cidr` is a valid CIDR"), + ), +} + +var netCidrContainsMatchesOperandType = types.NewAny( + types.S, + types.NewArray(nil, types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewSet(types.NewAny( + types.S, + types.NewArray(nil, types.A), + )), + types.NewObject(nil, types.NewDynamicProperty( + types.S, + types.NewAny( + types.S, + types.NewArray(nil, types.A), + ), + )), +) + +// Marked non-deterministic because DNS resolution results can be non-deterministic. +var NetLookupIPAddr = &Builtin{ + Name: "net.lookup_ip_addr", + Description: "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", + Decl: types.NewFunction( + types.Args( + types.Named("name", types.S).Description("domain name to resolve"), + ), + types.Named("addrs", types.NewSet(types.S)).Description("IP addresses (v4 and v6) that `name` resolves to"), + ), + Nondeterministic: true, +} + +/** + * Semantic Versions + */ + +var SemVerIsValid = &Builtin{ + Name: "semver.is_valid", + Description: "Validates that the input is a valid SemVer string.", + Decl: types.NewFunction( + types.Args( + types.Named("vsn", types.A).Description("input to validate"), + ), + types.Named("result", types.B).Description("`true` if `vsn` is a valid SemVer; `false` otherwise"), + ), +} + +var SemVerCompare = &Builtin{ + Name: "semver.compare", + Description: "Compares valid SemVer formatted version strings.", + Decl: types.NewFunction( + types.Args( + types.Named("a", types.S).Description("first version string"), + types.Named("b", types.S).Description("second version string"), + ), + types.Named("result", types.N).Description("`-1` if `a < b`; `1` if `a > b`; `0` if `a == b`"), + ), +} + +/** + * Printing + */ + +// Print is a special built-in function that writes zero or more operands +// to a message buffer. The caller controls how the buffer is displayed. The +// operands may be of any type. Furthermore, unlike other built-in functions, +// undefined operands DO NOT cause the print() function to fail during +// evaluation. +var Print = &Builtin{ + Name: "print", + Decl: types.NewVariadicFunction(nil, types.A, nil), +} + +// InternalPrint represents the internal implementation of the print() function. +// The compiler rewrites print() calls to refer to the internal implementation. +var InternalPrint = &Builtin{ + Name: "internal.print", + Decl: types.NewFunction([]types.Type{types.NewArray(nil, types.NewSet(types.A))}, nil), +} + +/** + * Deprecated built-ins. + */ + +// SetDiff has been replaced by the minus built-in. +var SetDiff = &Builtin{ + Name: "set_diff", + Decl: types.NewFunction( + types.Args( + types.NewSet(types.A), + types.NewSet(types.A), + ), + types.NewSet(types.A), + ), + deprecated: true, +} + +// NetCIDROverlap has been replaced by the `net.cidr_contains` built-in. +var NetCIDROverlap = &Builtin{ + Name: "net.cidr_overlap", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + deprecated: true, +} + +// CastArray checks the underlying type of the input. If it is array or set, an array +// containing the values is returned. If it is not an array, an error is thrown. +var CastArray = &Builtin{ + Name: "cast_array", + Decl: types.NewFunction( + types.Args(types.A), + types.NewArray(nil, types.A), + ), + deprecated: true, +} + +// CastSet checks the underlying type of the input. +// If it is a set, the set is returned. +// If it is an array, the array is returned in set form (all duplicates removed) +// If neither, an error is thrown +var CastSet = &Builtin{ + Name: "cast_set", + Decl: types.NewFunction( + types.Args(types.A), + types.NewSet(types.A), + ), + deprecated: true, +} + +// CastString returns input if it is a string; if not returns error. +// For formatting variables, see sprintf +var CastString = &Builtin{ + Name: "cast_string", + Decl: types.NewFunction( + types.Args(types.A), + types.S, + ), + deprecated: true, +} + +// CastBoolean returns input if it is a boolean; if not returns error. +var CastBoolean = &Builtin{ + Name: "cast_boolean", + Decl: types.NewFunction( + types.Args(types.A), + types.B, + ), + deprecated: true, +} + +// CastNull returns null if input is null; if not returns error. +var CastNull = &Builtin{ + Name: "cast_null", + Decl: types.NewFunction( + types.Args(types.A), + types.NewNull(), + ), + deprecated: true, +} + +// CastObject returns the given object if it is null; throws an error otherwise +var CastObject = &Builtin{ + Name: "cast_object", + Decl: types.NewFunction( + types.Args(types.A), + types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), + ), + deprecated: true, +} + +// RegexMatchDeprecated declares `re_match` which has been deprecated. Use `regex.match` instead. +var RegexMatchDeprecated = &Builtin{ + Name: "re_match", + Decl: types.NewFunction( + types.Args( + types.S, + types.S, + ), + types.B, + ), + deprecated: true, +} + +// All takes a list and returns true if all of the items +// are true. A collection of length 0 returns true. +var All = &Builtin{ + Name: "all", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + deprecated: true, +} + +// Any takes a collection and returns true if any of the items +// is true. A collection of length 0 returns false. +var Any = &Builtin{ + Name: "any", + Decl: types.NewFunction( + types.Args( + types.NewAny( + types.NewSet(types.A), + types.NewArray(nil, types.A), + ), + ), + types.B, + ), + deprecated: true, +} + +// Builtin represents a built-in function supported by OPA. Every built-in +// function is uniquely identified by a name. +type Builtin struct { + Name string `json:"name"` // Unique name of built-in function, e.g., (arg1,arg2,...,argN) + Description string `json:"description,omitempty"` // Description of what the built-in function does. + + // Categories of the built-in function. Omitted for namespaced + // built-ins, i.e. "array.concat" is taken to be of the "array" category. + // "minus" for example, is part of two categories: numbers and sets. (NOTE(sr): aspirational) + Categories []string `json:"categories,omitempty"` + + Decl *types.Function `json:"decl"` // Built-in function type declaration. + Infix string `json:"infix,omitempty"` // Unique name of infix operator. Default should be unset. + Relation bool `json:"relation,omitempty"` // Indicates if the built-in acts as a relation. + deprecated bool // Indicates if the built-in has been deprecated. + Nondeterministic bool `json:"nondeterministic,omitempty"` // Indicates if the built-in returns non-deterministic results. +} + +// category is a helper for specifying a Builtin's Categories +func category(cs ...string) []string { + return cs +} + +// Minimal returns a shallow copy of b with the descriptions and categories and +// named arguments stripped out. +func (b *Builtin) Minimal() *Builtin { + cpy := *b + fargs := b.Decl.FuncArgs() + if fargs.Variadic != nil { + cpy.Decl = types.NewVariadicFunction(fargs.Args, fargs.Variadic, b.Decl.Result()) + } else { + cpy.Decl = types.NewFunction(fargs.Args, b.Decl.Result()) + } + cpy.Categories = nil + cpy.Description = "" + return &cpy +} + +// IsDeprecated returns true if the Builtin function is deprecated and will be removed in a future release. +func (b *Builtin) IsDeprecated() bool { + return b.deprecated +} + +// IsDeterministic returns true if the Builtin function returns non-deterministic results. +func (b *Builtin) IsNondeterministic() bool { + return b.Nondeterministic +} + +// Expr creates a new expression for the built-in with the given operands. +func (b *Builtin) Expr(operands ...*Term) *Expr { + ts := make([]*Term, len(operands)+1) + ts[0] = NewTerm(b.Ref()) + for i := range operands { + ts[i+1] = operands[i] + } + return &Expr{ + Terms: ts, + } +} + +// Call creates a new term for the built-in with the given operands. +func (b *Builtin) Call(operands ...*Term) *Term { + call := make(Call, len(operands)+1) + call[0] = NewTerm(b.Ref()) + for i := range operands { + call[i+1] = operands[i] + } + return NewTerm(call) +} + +// Ref returns a Ref that refers to the built-in function. +func (b *Builtin) Ref() Ref { + parts := strings.Split(b.Name, ".") + ref := make(Ref, len(parts)) + ref[0] = VarTerm(parts[0]) + for i := 1; i < len(parts); i++ { + ref[i] = StringTerm(parts[i]) + } + return ref +} + +// IsTargetPos returns true if a variable in the i-th position will be bound by +// evaluating the call expression. +func (b *Builtin) IsTargetPos(i int) bool { + return b.Decl.Arity() == i +} + +func init() { + BuiltinMap = map[string]*Builtin{} + for _, b := range DefaultBuiltins { + RegisterBuiltin(b) + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go new file mode 100644 index 000000000..e7d561d9e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/capabilities.go @@ -0,0 +1,267 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/open-policy-agent/opa/internal/semver" + "github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities" + caps "github.com/open-policy-agent/opa/v1/capabilities" + "github.com/open-policy-agent/opa/v1/util" +) + +// VersonIndex contains an index from built-in function name, language feature, +// and future rego keyword to version number. During the build, this is used to +// create an index of the minimum version required for the built-in/feature/kw. +type VersionIndex struct { + Builtins map[string]semver.Version `json:"builtins"` + Features map[string]semver.Version `json:"features"` + Keywords map[string]semver.Version `json:"keywords"` +} + +// NOTE(tsandall): this file is generated by internal/cmd/genversionindex/main.go +// and run as part of go:generate. We generate the version index as part of the +// build process because it's relatively expensive to build (it takes ~500ms on +// my machine) and never changes. +// +//go:embed version_index.json +var versionIndexBs []byte + +var minVersionIndex = func() VersionIndex { + var vi VersionIndex + err := json.Unmarshal(versionIndexBs, &vi) + if err != nil { + panic(err) + } + return vi +}() + +// In the compiler, we used this to check that we're OK working with ref heads. +// If this isn't present, we'll fail. This is to ensure that older versions of +// OPA can work with policies that we're compiling -- if they don't know ref +// heads, they wouldn't be able to parse them. +const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" +const FeatureRefHeads = "rule_head_refs" +const FeatureRegoV1 = "rego_v1" +const FeatureRegoV1Import = "rego_v1_import" + +// Capabilities defines a structure containing data that describes the capabilities +// or features supported by a particular version of OPA. +type Capabilities struct { + Builtins []*Builtin `json:"builtins,omitempty"` + FutureKeywords []string `json:"future_keywords,omitempty"` + WasmABIVersions []WasmABIVersion `json:"wasm_abi_versions,omitempty"` + + // Features is a bit of a mixed bag for checking that an older version of OPA + // is able to do what needs to be done. + // TODO(sr): find better words ^^ + Features []string `json:"features,omitempty"` + + // allow_net is an array of hostnames or IP addresses, that an OPA instance is + // allowed to connect to. + // If omitted, ANY host can be connected to. If empty, NO host can be connected to. + // As of now, this only controls fetching remote refs for using JSON Schemas in + // the type checker. + // TODO(sr): support ports to further restrict connection peers + // TODO(sr): support restricting `http.send` using the same mechanism (see https://github.com/open-policy-agent/opa/issues/3665) + AllowNet []string `json:"allow_net,omitempty"` +} + +// WasmABIVersion captures the Wasm ABI version. Its `Minor` version is indicating +// backwards-compatible changes. +type WasmABIVersion struct { + Version int `json:"version"` + Minor int `json:"minor_version"` +} + +type CapabilitiesOptions struct { + regoVersion RegoVersion +} + +func newCapabilitiesOptions(opts []CapabilitiesOption) CapabilitiesOptions { + co := CapabilitiesOptions{} + for _, opt := range opts { + opt(&co) + } + return co +} + +type CapabilitiesOption func(*CapabilitiesOptions) + +func CapabilitiesRegoVersion(regoVersion RegoVersion) CapabilitiesOption { + return func(o *CapabilitiesOptions) { + o.regoVersion = regoVersion + } +} + +// CapabilitiesForThisVersion returns the capabilities of this version of OPA. +func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities { + co := newCapabilitiesOptions(opts) + + f := &Capabilities{} + + for _, vers := range capabilities.ABIVersions() { + f.WasmABIVersions = append(f.WasmABIVersions, WasmABIVersion{Version: vers[0], Minor: vers[1]}) + } + + f.Builtins = make([]*Builtin, len(Builtins)) + copy(f.Builtins, Builtins) + sort.Slice(f.Builtins, func(i, j int) bool { + return f.Builtins[i].Name < f.Builtins[j].Name + }) + + if co.regoVersion == RegoV0 || co.regoVersion == RegoV0CompatV1 { + for kw := range allFutureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1Import, + } + } else { + for kw := range futureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRegoV1, + } + } + + sort.Strings(f.FutureKeywords) + sort.Strings(f.Features) + + return f +} + +// LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. +func LoadCapabilitiesJSON(r io.Reader) (*Capabilities, error) { + d := util.NewJSONDecoder(r) + var c Capabilities + return &c, d.Decode(&c) +} + +// LoadCapabilitiesVersion loads a JSON serialized capabilities structure from the specific version. +func LoadCapabilitiesVersion(version string) (*Capabilities, error) { + cvs, err := LoadCapabilitiesVersions() + if err != nil { + return nil, err + } + + for _, cv := range cvs { + if cv == version { + cont, err := caps.FS.ReadFile(cv + ".json") + if err != nil { + return nil, err + } + + return LoadCapabilitiesJSON(bytes.NewReader(cont)) + } + + } + return nil, fmt.Errorf("no capabilities version found %v", version) +} + +// LoadCapabilitiesFile loads a JSON serialized capabilities structure from a file. +func LoadCapabilitiesFile(file string) (*Capabilities, error) { + fd, err := os.Open(file) + if err != nil { + return nil, err + } + defer fd.Close() + return LoadCapabilitiesJSON(fd) +} + +// LoadCapabilitiesVersions loads all capabilities versions +func LoadCapabilitiesVersions() ([]string, error) { + ents, err := caps.FS.ReadDir(".") + if err != nil { + return nil, err + } + + capabilitiesVersions := make([]string, 0, len(ents)) + for _, ent := range ents { + capabilitiesVersions = append(capabilitiesVersions, strings.Replace(ent.Name(), ".json", "", 1)) + } + return capabilitiesVersions, nil +} + +// MinimumCompatibleVersion returns the minimum compatible OPA version based on +// the built-ins, features, and keywords in c. +func (c *Capabilities) MinimumCompatibleVersion() (string, bool) { + + var maxVersion semver.Version + + // this is the oldest OPA release that includes capabilities + if err := maxVersion.Set("0.17.0"); err != nil { + panic("unreachable") + } + + for _, bi := range c.Builtins { + v, ok := minVersionIndex.Builtins[bi.Name] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, kw := range c.FutureKeywords { + v, ok := minVersionIndex.Keywords[kw] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + for _, feat := range c.Features { + v, ok := minVersionIndex.Features[feat] + if !ok { + return "", false + } + if v.Compare(maxVersion) > 0 { + maxVersion = v + } + } + + return maxVersion.String(), true +} + +func (c *Capabilities) ContainsFeature(feature string) bool { + for _, f := range c.Features { + if f == feature { + return true + } + } + return false +} + +// addBuiltinSorted inserts a built-in into c in sorted order. An existing built-in with the same name +// will be overwritten. +func (c *Capabilities) addBuiltinSorted(bi *Builtin) { + i := sort.Search(len(c.Builtins), func(x int) bool { + return c.Builtins[x].Name >= bi.Name + }) + if i < len(c.Builtins) && bi.Name == c.Builtins[i].Name { + c.Builtins[i] = bi + return + } + c.Builtins = append(c.Builtins, nil) + copy(c.Builtins[i+1:], c.Builtins[i:]) + c.Builtins[i] = bi +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/check.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/check.go new file mode 100644 index 000000000..57c2fa5d7 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/check.go @@ -0,0 +1,1329 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +type varRewriter func(Ref) Ref + +// exprChecker defines the interface for executing type checking on a single +// expression. The exprChecker must update the provided TypeEnv with inferred +// types of vars. +type exprChecker func(*TypeEnv, *Expr) *Error + +// typeChecker implements type checking on queries and rules. Errors are +// accumulated on the typeChecker so that a single run can report multiple +// issues. +type typeChecker struct { + builtins map[string]*Builtin + required *Capabilities + errs Errors + exprCheckers map[string]exprChecker + varRewriter varRewriter + ss *SchemaSet + allowNet []string + input types.Type + allowUndefinedFuncs bool + schemaTypes map[string]types.Type +} + +// newTypeChecker returns a new typeChecker object that has no errors. +func newTypeChecker() *typeChecker { + return &typeChecker{ + exprCheckers: map[string]exprChecker{ + "eq": checkExprEq, + }, + } +} + +func (tc *typeChecker) newEnv(exist *TypeEnv) *TypeEnv { + if exist != nil { + return exist.wrap() + } + env := newTypeEnv(tc.copy) + if tc.input != nil { + env.tree.Put(InputRootRef, tc.input) + } + return env +} + +func (tc *typeChecker) copy() *typeChecker { + return newTypeChecker(). + WithVarRewriter(tc.varRewriter). + WithSchemaSet(tc.ss). + WithSchemaTypes(tc.schemaTypes). + WithAllowNet(tc.allowNet). + WithInputType(tc.input). + WithAllowUndefinedFunctionCalls(tc.allowUndefinedFuncs). + WithBuiltins(tc.builtins). + WithRequiredCapabilities(tc.required) +} + +func (tc *typeChecker) WithRequiredCapabilities(c *Capabilities) *typeChecker { + tc.required = c + return tc +} + +func (tc *typeChecker) WithBuiltins(builtins map[string]*Builtin) *typeChecker { + tc.builtins = builtins + return tc +} + +func (tc *typeChecker) WithSchemaSet(ss *SchemaSet) *typeChecker { + tc.ss = ss + return tc +} + +func (tc *typeChecker) WithSchemaTypes(schemaTypes map[string]types.Type) *typeChecker { + tc.schemaTypes = schemaTypes + return tc +} + +func (tc *typeChecker) WithAllowNet(hosts []string) *typeChecker { + tc.allowNet = hosts + return tc +} + +func (tc *typeChecker) WithVarRewriter(f varRewriter) *typeChecker { + tc.varRewriter = f + return tc +} + +func (tc *typeChecker) WithInputType(tpe types.Type) *typeChecker { + tc.input = tpe + return tc +} + +// WithAllowUndefinedFunctionCalls sets the type checker to allow references to undefined functions. +// Additionally, the 'CheckUndefinedFuncs' and 'CheckSafetyRuleBodies' compiler stages are skipped. +func (tc *typeChecker) WithAllowUndefinedFunctionCalls(allow bool) *typeChecker { + tc.allowUndefinedFuncs = allow + return tc +} + +// Env returns a type environment for the specified built-ins with any other +// global types configured on the checker. In practice, this is the default +// environment that other statements will be checked against. +func (tc *typeChecker) Env(builtins map[string]*Builtin) *TypeEnv { + env := tc.newEnv(nil) + for _, bi := range builtins { + env.tree.Put(bi.Ref(), bi.Decl) + } + return env +} + +// CheckBody runs type checking on the body and returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of vars contained in the body. +func (tc *typeChecker) CheckBody(env *TypeEnv, body Body) (*TypeEnv, Errors) { + + errors := []*Error{} + env = tc.newEnv(env) + vis := newRefChecker(env, tc.varRewriter) + + WalkExprs(body, func(expr *Expr) bool { + + closureErrs := tc.checkClosures(env, expr) + for _, err := range closureErrs { + errors = append(errors, err) + } + + hasClosureErrors := len(closureErrs) > 0 + + // reset errors from previous iteration + vis.errs = nil + NewGenericVisitor(vis.Visit).Walk(expr) + for _, err := range vis.errs { + errors = append(errors, err) + } + + hasRefErrors := len(vis.errs) > 0 + + if err := tc.checkExpr(env, expr); err != nil { + // Suppress this error if a more actionable one has occurred. In + // this case, if an error occurred in a ref or closure contained in + // this expression, and the error is due to a nil type, then it's + // likely to be the result of the more specific error. + skip := (hasClosureErrors || hasRefErrors) && causedByNilType(err) + if !skip { + errors = append(errors, err) + } + } + return true + }) + + tc.err(errors) + return env, errors +} + +// CheckTypes runs type checking on the rules returns a TypeEnv if no errors +// are found. The resulting TypeEnv wraps the provided one. The resulting +// TypeEnv will be able to resolve types of refs that refer to rules. +func (tc *typeChecker) CheckTypes(env *TypeEnv, sorted []util.T, as *AnnotationSet) (*TypeEnv, Errors) { + env = tc.newEnv(env) + for _, s := range sorted { + tc.checkRule(env, as, s.(*Rule)) + } + tc.errs.Sort() + return env, tc.errs +} + +func (tc *typeChecker) checkClosures(env *TypeEnv, expr *Expr) Errors { + var result Errors + WalkClosures(expr, func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *SetComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + case *ObjectComprehension: + _, errs := tc.copy().CheckBody(env, x.Body) + if len(errs) > 0 { + result = errs + return true + } + } + return false + }) + return result +} + +func (tc *typeChecker) getSchemaType(schemaAnnot *SchemaAnnotation, rule *Rule) (types.Type, *Error) { + if tc.schemaTypes == nil { + tc.schemaTypes = make(map[string]types.Type) + } + + if refType, exists := tc.schemaTypes[schemaAnnot.Schema.String()]; exists { + return refType, nil + } + + refType, err := processAnnotation(tc.ss, schemaAnnot, rule, tc.allowNet) + if err != nil { + return nil, err + } + + if refType == nil { + return nil, nil + } + + tc.schemaTypes[schemaAnnot.Schema.String()] = refType + return refType, nil + +} + +func (tc *typeChecker) checkRule(env *TypeEnv, as *AnnotationSet, rule *Rule) { + + env = env.wrap() + + schemaAnnots := getRuleAnnotation(as, rule) + for _, schemaAnnot := range schemaAnnots { + refType, err := tc.getSchemaType(schemaAnnot, rule) + if err != nil { + tc.err([]*Error{err}) + continue + } + + ref := schemaAnnot.Path + // if we do not have a ref or a reftype, we should not evaluate this rule. + if ref == nil || refType == nil { + continue + } + + prefixRef, t := getPrefix(env, ref) + if t == nil || len(prefixRef) == len(ref) { + env.tree.Put(ref, refType) + } else { + newType, err := override(ref[len(prefixRef):], t, refType, rule) + if err != nil { + tc.err([]*Error{err}) + continue + } + env.tree.Put(prefixRef, newType) + } + } + + cpy, err := tc.CheckBody(env, rule.Body) + env = env.next + path := rule.Ref() + + if len(err) > 0 { + // if the rule/function contains an error, add it to the type env so + // that expressions that refer to this rule/function do not encounter + // type errors. + env.tree.Put(path, types.A) + return + } + + var tpe types.Type + + if len(rule.Head.Args) > 0 { + // If args are not referred to in body, infer as any. + WalkVars(rule.Head.Args, func(v Var) bool { + if cpy.Get(v) == nil { + cpy.tree.PutOne(v, types.A) + } + return false + }) + + // Construct function type. + args := make([]types.Type, len(rule.Head.Args)) + for i := 0; i < len(rule.Head.Args); i++ { + args[i] = cpy.Get(rule.Head.Args[i]) + } + + f := types.NewFunction(args, cpy.Get(rule.Head.Value)) + + tpe = f + } else { + switch rule.Head.RuleKind() { + case SingleValue: + typeV := cpy.Get(rule.Head.Value) + if !path.IsGround() { + // e.g. store object[string: whatever] at data.p.q.r, not data.p.q.r[x] or data.p.q.r[x].y[z] + objPath := path.DynamicSuffix() + path = path.GroundPrefix() + + var err error + tpe, err = nestedObject(cpy, objPath, typeV) + if err != nil { + tc.err([]*Error{NewError(TypeErr, rule.Head.Location, err.Error())}) //nolint:govet + tpe = nil + } + } else { + if typeV != nil { + tpe = typeV + } + } + case MultiValue: + typeK := cpy.Get(rule.Head.Key) + if typeK != nil { + tpe = types.NewSet(typeK) + } + } + } + + if tpe != nil { + env.tree.Insert(path, tpe, env) + } +} + +// nestedObject creates a nested structure of object types, where each term on path corresponds to a level in the +// nesting. Each term in the path only contributes to the dynamic portion of its corresponding object. +func nestedObject(env *TypeEnv, path Ref, tpe types.Type) (types.Type, error) { + if len(path) == 0 { + return tpe, nil + } + + k := path[0] + typeV, err := nestedObject(env, path[1:], tpe) + if err != nil { + return nil, err + } + if typeV == nil { + return nil, nil + } + + var dynamicProperty *types.DynamicProperty + typeK := env.Get(k) + if typeK == nil { + return nil, nil + } + dynamicProperty = types.NewDynamicProperty(typeK, typeV) + + return types.NewObject(nil, dynamicProperty), nil +} + +func (tc *typeChecker) checkExpr(env *TypeEnv, expr *Expr) *Error { + if err := tc.checkExprWith(env, expr, 0); err != nil { + return err + } + if !expr.IsCall() { + return nil + } + + operator := expr.Operator().String() + + // If the type checker wasn't provided with a required capabilities + // structure then just skip. In some cases, type checking might be run + // without the need to record what builtins are required. + if tc.required != nil && tc.builtins != nil { + if bi, ok := tc.builtins[operator]; ok { + tc.required.addBuiltinSorted(bi) + } + } + + checker := tc.exprCheckers[operator] + if checker != nil { + return checker(env, expr) + } + + return tc.checkExprBuiltin(env, expr) +} + +func (tc *typeChecker) checkExprBuiltin(env *TypeEnv, expr *Expr) *Error { + + args := expr.Operands() + pre := getArgTypes(env, args) + + // NOTE(tsandall): undefined functions will have been caught earlier in the + // compiler. We check for undefined functions before the safety check so + // that references to non-existent functions result in undefined function + // errors as opposed to unsafe var errors. + // + // We cannot run type checking before the safety check because part of the + // type checker relies on reordering (in particular for references to local + // vars). + name := expr.Operator() + tpe := env.Get(name) + + if tpe == nil { + if tc.allowUndefinedFuncs { + return nil + } + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + // check if the expression refers to a function that contains an error + _, ok := tpe.(types.Any) + if ok { + return nil + } + + ftpe, ok := tpe.(*types.Function) + if !ok { + return NewError(TypeErr, expr.Location, "undefined function %v", name) + } + + fargs := ftpe.FuncArgs() + namedFargs := ftpe.NamedFuncArgs() + + if ftpe.Result() != nil { + fargs.Args = append(fargs.Args, ftpe.Result()) + namedFargs.Args = append(namedFargs.Args, ftpe.NamedResult()) + } + + if len(args) > len(fargs.Args) && fargs.Variadic == nil { + return newArgError(expr.Location, name, "too many arguments", pre, namedFargs) + } + + if len(args) < len(ftpe.FuncArgs().Args) { + return newArgError(expr.Location, name, "too few arguments", pre, namedFargs) + } + + for i := range args { + if !unify1(env, args[i], fargs.Arg(i), false) { + post := make([]types.Type, len(args)) + for i := range args { + post[i] = env.Get(args[i]) + } + return newArgError(expr.Location, name, "invalid argument(s)", post, namedFargs) + } + } + + return nil +} + +func checkExprEq(env *TypeEnv, expr *Expr) *Error { + + pre := getArgTypes(env, expr.Operands()) + + if len(pre) < Equality.Decl.Arity() { + return newArgError(expr.Location, expr.Operator(), "too few arguments", pre, Equality.Decl.FuncArgs()) + } + + if Equality.Decl.Arity() < len(pre) { + return newArgError(expr.Location, expr.Operator(), "too many arguments", pre, Equality.Decl.FuncArgs()) + } + + a, b := expr.Operand(0), expr.Operand(1) + typeA, typeB := env.Get(a), env.Get(b) + + if !unify2(env, a, typeA, b, typeB) { + err := NewError(TypeErr, expr.Location, "match error") + err.Details = &UnificationErrDetail{ + Left: typeA, + Right: typeB, + } + return err + } + + return nil +} + +func (tc *typeChecker) checkExprWith(env *TypeEnv, expr *Expr, i int) *Error { + if i == len(expr.With) { + return nil + } + + target, value := expr.With[i].Target, expr.With[i].Value + targetType, valueType := env.Get(target), env.Get(value) + + if t, ok := targetType.(*types.Function); ok { // built-in function replacement + switch v := valueType.(type) { + case *types.Function: // ...by function + if !unifies(targetType, valueType) { + return newArgError(expr.With[i].Loc(), target.Value.(Ref), "arity mismatch", v.FuncArgs().Args, t.NamedFuncArgs()) + } + default: // ... by value, nothing to check + } + } + + return tc.checkExprWith(env, expr, i+1) +} + +func unify2(env *TypeEnv, a *Term, typeA types.Type, b *Term, typeB types.Type) bool { + + nilA := types.Nil(typeA) + nilB := types.Nil(typeB) + + if nilA && !nilB { + return unify1(env, a, typeB, false) + } else if nilB && !nilA { + return unify1(env, b, typeA, false) + } else if !nilA && !nilB { + return unifies(typeA, typeB) + } + + switch a.Value.(type) { + case *Array: + return unify2Array(env, a, b) + case *object: + return unify2Object(env, a, b) + case Var: + switch b.Value.(type) { + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) + case *Array: + return unify2Array(env, b, a) + case *object: + return unify2Object(env, b, a) + } + } + + return false +} + +func unify2Array(env *TypeEnv, a *Term, b *Term) bool { + arr := a.Value.(*Array) + switch bv := b.Value.(type) { + case *Array: + if arr.Len() == bv.Len() { + for i := 0; i < arr.Len(); i++ { + if !unify2(env, arr.Elem(i), env.Get(arr.Elem(i)), bv.Elem(i), env.Get(bv.Elem(i))) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) + } + return false +} + +func unify2Object(env *TypeEnv, a *Term, b *Term) bool { + obj := a.Value.(Object) + switch bv := b.Value.(type) { + case *object: + cv := obj.Intersect(bv) + if obj.Len() == bv.Len() && bv.Len() == len(cv) { + for i := range cv { + if !unify2(env, cv[i][1], env.Get(cv[i][1]), cv[i][2], env.Get(cv[i][2])) { + return false + } + } + return true + } + case Var: + return unify1(env, a, types.A, false) && unify1(env, b, env.Get(a), false) + } + return false +} + +func unify1(env *TypeEnv, term *Term, tpe types.Type, union bool) bool { + switch v := term.Value.(type) { + case *Array: + switch tpe := tpe.(type) { + case *types.Array: + return unify1Array(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + for i := 0; i < v.Len(); i++ { + unify1(env, v.Elem(i), types.A, true) + } + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case *object: + switch tpe := tpe.(type) { + case *types.Object: + return unify1Object(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(key, value *Term) { + unify1(env, key, types.A, true) + unify1(env, value, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case Set: + switch tpe := tpe.(type) { + case *types.Set: + return unify1Set(env, v, tpe, union) + case types.Any: + if types.Compare(tpe, types.A) == 0 { + v.Foreach(func(elem *Term) { + unify1(env, elem, types.A, true) + }) + return true + } + unifies := false + for i := range tpe { + unifies = unify1(env, term, tpe[i], true) || unifies + } + return unifies + } + return false + case Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return unifies(env.Get(v), tpe) + case Var: + if !union { + if exist := env.Get(v); exist != nil { + return unifies(exist, tpe) + } + env.tree.PutOne(term.Value, tpe) + } else { + env.tree.PutOne(term.Value, types.Or(env.Get(v), tpe)) + } + return true + default: + if !IsConstant(v) { + panic("unreachable") + } + return unifies(env.Get(term), tpe) + } +} + +func unify1Array(env *TypeEnv, val *Array, tpe *types.Array, union bool) bool { + if val.Len() != tpe.Len() && tpe.Dynamic() == nil { + return false + } + for i := 0; i < val.Len(); i++ { + if !unify1(env, val.Elem(i), tpe.Select(i), union) { + return false + } + } + return true +} + +func unify1Object(env *TypeEnv, val Object, tpe *types.Object, union bool) bool { + if val.Len() != len(tpe.Keys()) && tpe.DynamicValue() == nil { + return false + } + stop := val.Until(func(k, v *Term) bool { + if IsConstant(k.Value) { + if child := selectConstant(tpe, k); child != nil { + if !unify1(env, v, child, union) { + return true + } + } else { + return true + } + } else { + // Inferring type of value under dynamic key would involve unioning + // with all property values of tpe whose keys unify. For now, type + // these values as Any. We can investigate stricter inference in + // the future. + unify1(env, v, types.A, union) + } + return false + }) + return !stop +} + +func unify1Set(env *TypeEnv, val Set, tpe *types.Set, union bool) bool { + of := types.Values(tpe) + return !val.Until(func(elem *Term) bool { + return !unify1(env, elem, of, union) + }) +} + +func (tc *typeChecker) err(errors []*Error) { + tc.errs = append(tc.errs, errors...) +} + +type refChecker struct { + env *TypeEnv + errs Errors + varRewriter varRewriter +} + +func rewriteVarsNop(node Ref) Ref { + return node +} + +func newRefChecker(env *TypeEnv, f varRewriter) *refChecker { + if f == nil { + f = rewriteVarsNop + } + + return &refChecker{ + env: env, + errs: nil, + varRewriter: f, + } +} + +func (rc *refChecker) Visit(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + case *Expr: + switch terms := x.Terms.(type) { + case []*Term: + for i := 1; i < len(terms); i++ { + NewGenericVisitor(rc.Visit).Walk(terms[i]) + } + return true + case *Term: + NewGenericVisitor(rc.Visit).Walk(terms) + return true + } + case Ref: + if err := rc.checkApply(rc.env, x); err != nil { + rc.errs = append(rc.errs, err) + return true + } + if err := rc.checkRef(rc.env, rc.env.tree, x, 0); err != nil { + rc.errs = append(rc.errs, err) + } + } + return false +} + +func (rc *refChecker) checkApply(curr *TypeEnv, ref Ref) *Error { + switch tpe := curr.Get(ref).(type) { + case *types.Function: // NOTE(sr): We don't support first-class functions, except for `with`. + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), len(ref)-1, tpe) + } + + return nil +} + +func (rc *refChecker) checkRef(curr *TypeEnv, node *typeTreeNode, ref Ref, idx int) *Error { + + if idx == len(ref) { + return nil + } + + head := ref[idx] + + // NOTE(sr): as long as package statements are required, this isn't possible: + // the shortest possible rule ref is data.a.b (b is idx 2), idx 1 and 2 need to + // be strings or vars. + if idx == 1 || idx == 2 { + switch head.Value.(type) { + case Var, String: // OK + default: + have := rc.env.Get(head.Value) + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, have, types.S, getOneOfForNode(node)) + } + } + + if v, ok := head.Value.(Var); ok && idx != 0 { + tpe := types.Keys(rc.env.getRefRecExtent(node)) + if exist := rc.env.Get(v); exist != nil { + if !unifies(tpe, exist) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, tpe, getOneOfForNode(node)) + } + } else { + rc.env.tree.PutOne(v, tpe) + } + } + + child := node.Child(head.Value) + if child == nil { + // NOTE(sr): idx is reset on purpose: we start over + switch { + case curr.next != nil: + next := curr.next + return rc.checkRef(next, next.tree, ref, 0) + + case RootDocumentNames.Contains(ref[0]): + if idx != 0 { + node.Children().Iter(func(_, child util.T) bool { + _ = rc.checkRef(curr, child.(*typeTreeNode), ref, idx+1) // ignore error + return false + }) + return nil + } + return rc.checkRefLeaf(types.A, ref, 1) + + default: + return rc.checkRefLeaf(types.A, ref, 0) + } + } + + if child.Leaf() { + return rc.checkRefLeaf(child.Value(), ref, idx+1) + } + + return rc.checkRef(curr, child, ref, idx+1) +} + +func (rc *refChecker) checkRefLeaf(tpe types.Type, ref Ref, idx int) *Error { + + if idx == len(ref) { + return nil + } + + head := ref[idx] + + keys := types.Keys(tpe) + if keys == nil { + return newRefErrUnsupported(ref[0].Location, rc.varRewriter(ref), idx-1, tpe) + } + + switch value := head.Value.(type) { + + case Var: + if exist := rc.env.Get(value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } else { + rc.env.tree.PutOne(value, types.Keys(tpe)) + } + + case Ref: + if exist := rc.env.Get(value); exist != nil { + if !unifies(exist, keys) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, exist, keys, getOneOfForType(tpe)) + } + } + + case *Array, Object, Set: + if !unify1(rc.env, head, keys, false) { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, rc.env.Get(head), keys, nil) + } + + default: + child := selectConstant(tpe, head) + if child == nil { + return newRefErrInvalid(ref[0].Location, rc.varRewriter(ref), idx, nil, types.Keys(tpe), getOneOfForType(tpe)) + } + return rc.checkRefLeaf(child, ref, idx+1) + } + + return rc.checkRefLeaf(types.Values(tpe), ref, idx+1) +} + +func unifies(a, b types.Type) bool { + + if a == nil || b == nil { + return false + } + + anyA, ok1 := a.(types.Any) + if ok1 { + if unifiesAny(anyA, b) { + return true + } + } + + anyB, ok2 := b.(types.Any) + if ok2 { + if unifiesAny(anyB, a) { + return true + } + } + + if ok1 || ok2 { + return false + } + + switch a := a.(type) { + case types.Null: + _, ok := b.(types.Null) + return ok + case types.Boolean: + _, ok := b.(types.Boolean) + return ok + case types.Number: + _, ok := b.(types.Number) + return ok + case types.String: + _, ok := b.(types.String) + return ok + case *types.Array: + b, ok := b.(*types.Array) + if !ok { + return false + } + return unifiesArrays(a, b) + case *types.Object: + b, ok := b.(*types.Object) + if !ok { + return false + } + return unifiesObjects(a, b) + case *types.Set: + b, ok := b.(*types.Set) + if !ok { + return false + } + return unifies(types.Values(a), types.Values(b)) + case *types.Function: + // NOTE(sr): variadic functions can only be internal ones, and we've forbidden + // their replacement via `with`; so we disregard variadic here + if types.Arity(a) == types.Arity(b) { + b := b.(*types.Function) + for i := range a.FuncArgs().Args { + if !unifies(a.FuncArgs().Arg(i), b.FuncArgs().Arg(i)) { + return false + } + } + return true + } + return false + default: + panic("unreachable") + } +} + +func unifiesAny(a types.Any, b types.Type) bool { + if _, ok := b.(*types.Function); ok { + return false + } + for i := range a { + if unifies(a[i], b) { + return true + } + } + return len(a) == 0 +} + +func unifiesArrays(a, b *types.Array) bool { + + if !unifiesArraysStatic(a, b) { + return false + } + + if !unifiesArraysStatic(b, a) { + return false + } + + return a.Dynamic() == nil || b.Dynamic() == nil || unifies(a.Dynamic(), b.Dynamic()) +} + +func unifiesArraysStatic(a, b *types.Array) bool { + if a.Len() != 0 { + for i := 0; i < a.Len(); i++ { + if !unifies(a.Select(i), b.Select(i)) { + return false + } + } + } + return true +} + +func unifiesObjects(a, b *types.Object) bool { + if !unifiesObjectsStatic(a, b) { + return false + } + + if !unifiesObjectsStatic(b, a) { + return false + } + + return a.DynamicValue() == nil || b.DynamicValue() == nil || unifies(a.DynamicValue(), b.DynamicValue()) +} + +func unifiesObjectsStatic(a, b *types.Object) bool { + for _, k := range a.Keys() { + if !unifies(a.Select(k), b.Select(k)) { + return false + } + } + return true +} + +// typeErrorCause defines an interface to determine the reason for a type +// error. The type error details implement this interface so that type checking +// can report more actionable errors. +type typeErrorCause interface { + nilType() bool +} + +func causedByNilType(err *Error) bool { + cause, ok := err.Details.(typeErrorCause) + if !ok { + return false + } + return cause.nilType() +} + +// ArgErrDetail represents a generic argument error. +type ArgErrDetail struct { + Have []types.Type `json:"have"` + Want types.FuncArgs `json:"want"` +} + +// Lines returns the string representation of the detail. +func (d *ArgErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = "have: " + formatArgs(d.Have) + lines[1] = "want: " + fmt.Sprint(d.Want) + return lines +} + +func (d *ArgErrDetail) nilType() bool { + for i := range d.Have { + if types.Nil(d.Have[i]) { + return true + } + } + return false +} + +// UnificationErrDetail describes a type mismatch error when two values are +// unified (e.g., x = [1,2,y]). +type UnificationErrDetail struct { + Left types.Type `json:"a"` + Right types.Type `json:"b"` +} + +func (a *UnificationErrDetail) nilType() bool { + return types.Nil(a.Left) || types.Nil(a.Right) +} + +// Lines returns the string representation of the detail. +func (a *UnificationErrDetail) Lines() []string { + lines := make([]string, 2) + lines[0] = fmt.Sprint("left : ", types.Sprint(a.Left)) + lines[1] = fmt.Sprint("right : ", types.Sprint(a.Right)) + return lines +} + +// RefErrUnsupportedDetail describes an undefined reference error where the +// referenced value does not support dereferencing (e.g., scalars). +type RefErrUnsupportedDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have"` // referenced type +} + +// Lines returns the string representation of the detail. +func (r *RefErrUnsupportedDetail) Lines() []string { + lines := []string{ + r.Ref.String(), + strings.Repeat("^", len(r.Ref[:r.Pos+1].String())), + fmt.Sprintf("have: %v", r.Have), + } + return lines +} + +// RefErrInvalidDetail describes an undefined reference error where the referenced +// value does not support the reference operand (e.g., missing object key, +// invalid key type, etc.) +type RefErrInvalidDetail struct { + Ref Ref `json:"ref"` // invalid ref + Pos int `json:"pos"` // invalid element + Have types.Type `json:"have,omitempty"` // type of invalid element (for var/ref elements) + Want types.Type `json:"want"` // allowed type (for non-object values) + OneOf []Value `json:"oneOf"` // allowed values (e.g., for object keys) +} + +// Lines returns the string representation of the detail. +func (r *RefErrInvalidDetail) Lines() []string { + lines := []string{r.Ref.String()} + offset := len(r.Ref[:r.Pos].String()) + 1 + pad := strings.Repeat(" ", offset) + lines = append(lines, fmt.Sprintf("%s^", pad)) + if r.Have != nil { + lines = append(lines, fmt.Sprintf("%shave (type): %v", pad, r.Have)) + } else { + lines = append(lines, fmt.Sprintf("%shave: %v", pad, r.Ref[r.Pos])) + } + if len(r.OneOf) > 0 { + lines = append(lines, fmt.Sprintf("%swant (one of): %v", pad, r.OneOf)) + } else { + lines = append(lines, fmt.Sprintf("%swant (type): %v", pad, r.Want)) + } + return lines +} + +func formatArgs(args []types.Type) string { + buf := make([]string, len(args)) + for i := range args { + buf[i] = types.Sprint(args[i]) + } + return "(" + strings.Join(buf, ", ") + ")" +} + +func newRefErrInvalid(loc *Location, ref Ref, idx int, have, want types.Type, oneOf []Value) *Error { + err := newRefError(loc, ref) + err.Details = &RefErrInvalidDetail{ + Ref: ref, + Pos: idx, + Have: have, + Want: want, + OneOf: oneOf, + } + return err +} + +func newRefErrUnsupported(loc *Location, ref Ref, idx int, have types.Type) *Error { + err := newRefError(loc, ref) + err.Details = &RefErrUnsupportedDetail{ + Ref: ref, + Pos: idx, + Have: have, + } + return err +} + +func newRefError(loc *Location, ref Ref) *Error { + return NewError(TypeErr, loc, "undefined ref: %v", ref) +} + +func newArgError(loc *Location, builtinName Ref, msg string, have []types.Type, want types.FuncArgs) *Error { + err := NewError(TypeErr, loc, "%v: %v", builtinName, msg) + err.Details = &ArgErrDetail{ + Have: have, + Want: want, + } + return err +} + +func getOneOfForNode(node *typeTreeNode) (result []Value) { + node.Children().Iter(func(k, _ util.T) bool { + result = append(result, k.(Value)) + return false + }) + + sortValueSlice(result) + return result +} + +func getOneOfForType(tpe types.Type) (result []Value) { + switch tpe := tpe.(type) { + case *types.Object: + for _, k := range tpe.Keys() { + v, err := InterfaceToValue(k) + if err != nil { + panic(err) + } + result = append(result, v) + } + + case types.Any: + for _, object := range tpe { + objRes := getOneOfForType(object) + result = append(result, objRes...) + } + } + + result = removeDuplicate(result) + sortValueSlice(result) + return result +} + +func sortValueSlice(sl []Value) { + sort.Slice(sl, func(i, j int) bool { + return sl[i].Compare(sl[j]) < 0 + }) +} + +func removeDuplicate(list []Value) []Value { + seen := make(map[Value]bool) + var newResult []Value + for _, item := range list { + if !seen[item] { + newResult = append(newResult, item) + seen[item] = true + } + } + return newResult +} + +func getArgTypes(env *TypeEnv, args []*Term) []types.Type { + pre := make([]types.Type, len(args)) + for i := range args { + pre[i] = env.Get(args[i]) + } + return pre +} + +// getPrefix returns the shortest prefix of ref that exists in env +func getPrefix(env *TypeEnv, ref Ref) (Ref, types.Type) { + if len(ref) == 1 { + t := env.Get(ref) + if t != nil { + return ref, t + } + } + for i := 1; i < len(ref); i++ { + t := env.Get(ref[:i]) + if t != nil { + return ref[:i], t + } + } + return nil, nil +} + +// override takes a type t and returns a type obtained from t where the path represented by ref within it has type o (overriding the original type of that path) +func override(ref Ref, t types.Type, o types.Type, rule *Rule) (types.Type, *Error) { + var newStaticProps []*types.StaticProperty + obj, ok := t.(*types.Object) + if !ok { + newType, err := getObjectType(ref, o, rule, types.NewDynamicProperty(types.A, types.A)) + if err != nil { + return nil, err + } + return newType, nil + } + found := false + if ok { + staticProps := obj.StaticProperties() + for _, prop := range staticProps { + valueCopy := prop.Value + key, err := InterfaceToValue(prop.Key) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "unexpected error in override: %s", err.Error()) + } + if len(ref) > 0 && ref[0].Value.Compare(key) == 0 { + found = true + if len(ref) == 1 { + valueCopy = o + } else { + newVal, err := override(ref[1:], valueCopy, o, rule) + if err != nil { + return nil, err + } + valueCopy = newVal + } + } + newStaticProps = append(newStaticProps, types.NewStaticProperty(prop.Key, valueCopy)) + } + } + + // ref[0] is not a top-level key in staticProps, so it must be added + if !found { + newType, err := getObjectType(ref, o, rule, obj.DynamicProperties()) + if err != nil { + return nil, err + } + newStaticProps = append(newStaticProps, newType.StaticProperties()...) + } + return types.NewObject(newStaticProps, obj.DynamicProperties()), nil +} + +func getKeys(ref Ref, rule *Rule) ([]interface{}, *Error) { + keys := []interface{}{} + for _, refElem := range ref { + key, err := JSON(refElem.Value) + if err != nil { + return nil, NewError(TypeErr, rule.Location, "error getting key from value: %s", err.Error()) + } + keys = append(keys, key) + } + return keys, nil +} + +func getObjectTypeRec(keys []interface{}, o types.Type, d *types.DynamicProperty) *types.Object { + if len(keys) == 1 { + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], o)} + return types.NewObject(staticProps, d) + } + + staticProps := []*types.StaticProperty{types.NewStaticProperty(keys[0], getObjectTypeRec(keys[1:], o, d))} + return types.NewObject(staticProps, d) +} + +func getObjectType(ref Ref, o types.Type, rule *Rule, d *types.DynamicProperty) (*types.Object, *Error) { + keys, err := getKeys(ref, rule) + if err != nil { + return nil, err + } + return getObjectTypeRec(keys, o, d), nil +} + +func getRuleAnnotation(as *AnnotationSet, rule *Rule) (result []*SchemaAnnotation) { + + for _, x := range as.GetSubpackagesScope(rule.Module.Package.Path) { + result = append(result, x.Schemas...) + } + + if x := as.GetPackageScope(rule.Module.Package); x != nil { + result = append(result, x.Schemas...) + } + + if x := as.GetDocumentScope(rule.Ref().GroundPrefix()); x != nil { + result = append(result, x.Schemas...) + } + + for _, x := range as.GetRuleScope(rule) { + result = append(result, x.Schemas...) + } + + return result +} + +func processAnnotation(ss *SchemaSet, annot *SchemaAnnotation, rule *Rule, allowNet []string) (types.Type, *Error) { + + var schema interface{} + + if annot.Schema != nil { + if ss == nil { + return nil, nil + } + schema = ss.Get(annot.Schema) + if schema == nil { + return nil, NewError(TypeErr, rule.Location, "undefined schema: %v", annot.Schema) + } + } else if annot.Definition != nil { + schema = *annot.Definition + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return nil, NewError(TypeErr, rule.Location, err.Error()) //nolint:govet + } + + return tpe, nil +} + +func errAnnotationRedeclared(a *Annotations, other *Location) *Error { + return NewError(TypeErr, a.Location, "%v annotation redeclared: %v", a.Scope, other) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go new file mode 100644 index 000000000..24e61712e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compare.go @@ -0,0 +1,440 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Compare returns an integer indicating whether two AST values are less than, +// equal to, or greater than each other. +// +// If a is less than b, the return value is negative. If a is greater than b, +// the return value is positive. If a is equal to b, the return value is zero. +// +// Different types are never equal to each other. For comparison purposes, types +// are sorted as follows: +// +// nil < Null < Boolean < Number < String < Var < Ref < Array < Object < Set < +// ArrayComprehension < ObjectComprehension < SetComprehension < Expr < SomeDecl +// < With < Body < Rule < Import < Package < Module. +// +// Arrays and Refs are equal if and only if both a and b have the same length +// and all corresponding elements are equal. If one element is not equal, the +// return value is the same as for the first differing element. If all elements +// are equal but a and b have different lengths, the shorter is considered less +// than the other. +// +// Objects are considered equal if and only if both a and b have the same sorted +// (key, value) pairs and are of the same length. Other comparisons are +// consistent but not defined. +// +// Sets are considered equal if and only if the symmetric difference of a and b +// is empty. +// Other comparisons are consistent but not defined. +func Compare(a, b interface{}) int { + + if t, ok := a.(*Term); ok { + if t == nil { + a = nil + } else { + a = t.Value + } + } + + if t, ok := b.(*Term); ok { + if t == nil { + b = nil + } else { + b = t.Value + } + } + + if a == nil { + if b == nil { + return 0 + } + return -1 + } + if b == nil { + return 1 + } + + sortA := sortOrder(a) + sortB := sortOrder(b) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + switch a := a.(type) { + case Null: + return 0 + case Boolean: + b := b.(Boolean) + if a.Equal(b) { + return 0 + } + if !a { + return -1 + } + return 1 + case Number: + if ai, err := json.Number(a).Int64(); err == nil { + if bi, err := json.Number(b.(Number)).Int64(); err == nil { + if ai == bi { + return 0 + } + if ai < bi { + return -1 + } + return 1 + } + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var bigA, bigB *big.Rat + fa, ok := new(big.Float).SetString(string(a)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + bigA = new(big.Rat).SetInt64(0) + } + } + if bigA == nil { + bigA, ok = new(big.Rat).SetString(string(a)) + if !ok { + panic("illegal value") + } + } + + fb, ok := new(big.Float).SetString(string(b.(Number))) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + bigB = new(big.Rat).SetInt64(0) + } + } + if bigB == nil { + bigB, ok = new(big.Rat).SetString(string(b.(Number))) + if !ok { + panic("illegal value") + } + } + + return bigA.Cmp(bigB) + case String: + b := b.(String) + if a.Equal(b) { + return 0 + } + if a < b { + return -1 + } + return 1 + case Var: + return VarCompare(a, b.(Var)) + case Ref: + b := b.(Ref) + return termSliceCompare(a, b) + case *Array: + b := b.(*Array) + return termSliceCompare(a.elems, b.elems) + case *lazyObj: + return Compare(a.force(), b) + case *object: + if x, ok := b.(*lazyObj); ok { + b = x.force() + } + b := b.(*object) + return a.Compare(b) + case Set: + b := b.(Set) + return a.Compare(b) + case *ArrayComprehension: + b := b.(*ArrayComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *ObjectComprehension: + b := b.(*ObjectComprehension) + if cmp := Compare(a.Key, b.Key); cmp != 0 { + return cmp + } + if cmp := Compare(a.Value, b.Value); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case *SetComprehension: + b := b.(*SetComprehension) + if cmp := Compare(a.Term, b.Term); cmp != 0 { + return cmp + } + return a.Body.Compare(b.Body) + case Call: + b := b.(Call) + return termSliceCompare(a, b) + case *Expr: + b := b.(*Expr) + return a.Compare(b) + case *SomeDecl: + b := b.(*SomeDecl) + return a.Compare(b) + case *Every: + b := b.(*Every) + return a.Compare(b) + case *With: + b := b.(*With) + return a.Compare(b) + case Body: + b := b.(Body) + return a.Compare(b) + case *Head: + b := b.(*Head) + return a.Compare(b) + case *Rule: + b := b.(*Rule) + return a.Compare(b) + case Args: + b := b.(Args) + return termSliceCompare(a, b) + case *Import: + b := b.(*Import) + return a.Compare(b) + case *Package: + b := b.(*Package) + return a.Compare(b) + case *Annotations: + b := b.(*Annotations) + return a.Compare(b) + case *Module: + b := b.(*Module) + return a.Compare(b) + } + panic(fmt.Sprintf("illegal value: %T", a)) +} + +type termSlice []*Term + +func (s termSlice) Less(i, j int) bool { return Compare(s[i].Value, s[j].Value) < 0 } +func (s termSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } +func (s termSlice) Len() int { return len(s) } + +func sortOrder(x interface{}) int { + switch x.(type) { + case Null: + return 0 + case Boolean: + return 1 + case Number: + return 2 + case String: + return 3 + case Var: + return 4 + case Ref: + return 5 + case *Array: + return 6 + case Object: + return 7 + case Set: + return 8 + case *ArrayComprehension: + return 9 + case *ObjectComprehension: + return 10 + case *SetComprehension: + return 11 + case Call: + return 12 + case Args: + return 13 + case *Expr: + return 100 + case *SomeDecl: + return 101 + case *Every: + return 102 + case *With: + return 110 + case *Head: + return 120 + case Body: + return 200 + case *Rule: + return 1000 + case *Import: + return 1001 + case *Package: + return 1002 + case *Annotations: + return 1003 + case *Module: + return 10000 + } + panic(fmt.Sprintf("illegal value: %T", x)) +} + +func importsCompare(a, b []*Import) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func annotationsCompare(a, b []*Annotations) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func rulesCompare(a, b []*Rule) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := a[i].Compare(b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } + if len(b) < len(a) { + return 1 + } + return 0 +} + +func termSliceCompare(a, b []*Term) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func withSliceCompare(a, b []*With) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func VarCompare(a, b Var) int { + if a == b { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func TermValueCompare(a, b *Term) int { + return a.Value.Compare(b.Value) +} + +func ValueEqual(a, b Value) bool { + // TODO(ae): why doesn't this work the same? + // + // case interface{ Equal(Value) bool }: + // return v.Equal(b) + // + // When put on top, golangci-lint even flags the other cases as unreachable.. + // but TestTopdownVirtualCache will have failing test cases when we replace + // the other cases with the above one.. 🤔 + switch v := a.(type) { + case Null: + return v.Equal(b) + case Boolean: + return v.Equal(b) + case Number: + return v.Equal(b) + case String: + return v.Equal(b) + case Var: + return v.Equal(b) + case Ref: + return v.Equal(b) + case *Array: + return v.Equal(b) + } + + return a.Compare(b) == 0 +} + +func RefCompare(a, b Ref) int { + return termSliceCompare(a, b) +} + +func RefEqual(a, b Ref) bool { + return termSliceEqual(a, b) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go new file mode 100644 index 000000000..9b0302474 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compile.go @@ -0,0 +1,5975 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "errors" + "fmt" + "io" + "maps" + "slices" + "sort" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/internal/debug" + "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// CompileErrorLimitDefault is the default number errors a compiler will allow before +// exiting. +const CompileErrorLimitDefault = 10 + +var errLimitReached = NewError(CompileErr, nil, "error limit reached") + +// Compiler contains the state of a compilation process. +type Compiler struct { + + // Errors contains errors that occurred during the compilation process. + // If there are one or more errors, the compilation process is considered + // "failed". + Errors Errors + + // Modules contains the compiled modules. The compiled modules are the + // output of the compilation process. If the compilation process failed, + // there is no guarantee about the state of the modules. + Modules map[string]*Module + + // ModuleTree organizes the modules into a tree where each node is keyed by + // an element in the module's package path. E.g., given modules containing + // the following package directives: "a", "a.b", "a.c", and "a.b", the + // resulting module tree would be: + // + // root + // | + // +--- data (no modules) + // | + // +--- a (1 module) + // | + // +--- b (2 modules) + // | + // +--- c (1 module) + // + ModuleTree *ModuleTreeNode + + // RuleTree organizes rules into a tree where each node is keyed by an + // element in the rule's path. The rule path is the concatenation of the + // containing package and the stringified rule name. E.g., given the + // following module: + // + // package ex + // p[1] { true } + // p[2] { true } + // q = true + // a.b.c = 3 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- p (2 rules) + // | + // +--- q (1 rule) + // | + // +--- a + // | + // +--- b + // | + // +--- c (1 rule) + // + // Another example with general refs containing vars at arbitrary locations: + // + // package ex + // a.b[x].d { x := "c" } # R1 + // a.b.c[x] { x := "d" } # R2 + // a.b[x][y] { x := "c"; y := "d" } # R3 + // p := true # R4 + // + // root + // | + // +--- data (no rules) + // | + // +--- ex (no rules) + // | + // +--- a + // | | + // | +--- b (R1, R3) + // | | + // | +--- c (R2) + // | + // +--- p (R4) + RuleTree *TreeNode + + // Graph contains dependencies between rules. An edge (u,v) is added to the + // graph if rule 'u' refers to the virtual document defined by 'v'. + Graph *Graph + + // TypeEnv holds type information for values inferred by the compiler. + TypeEnv *TypeEnv + + // RewrittenVars is a mapping of variables that have been rewritten + // with the key being the generated name and value being the original. + RewrittenVars map[Var]Var + + // Capabilities required by the modules that were compiled. + Required *Capabilities + + localvargen *localVarGenerator + moduleLoader ModuleLoader + ruleIndices *util.HashMap + stages []stage + maxErrs int + sorted []string // list of sorted module names + pathExists func([]string) (bool, error) + pathConflictCheckRoots []string + after map[string][]CompilerStageDefinition + metrics metrics.Metrics + capabilities *Capabilities // user-supplied capabilities + imports map[string][]*Import // saved imports from stripping + builtins map[string]*Builtin // universe of built-in functions + customBuiltins map[string]*Builtin // user-supplied custom built-in functions (deprecated: use capabilities) + unsafeBuiltinsMap map[string]struct{} // user-supplied set of unsafe built-ins functions to block (deprecated: use capabilities) + deprecatedBuiltinsMap map[string]struct{} // set of deprecated, but not removed, built-in functions + enablePrintStatements bool // indicates if print statements should be elided (default) + comprehensionIndices map[*Term]*ComprehensionIndex // comprehension key index + initialized bool // indicates if init() has been called + debug debug.Debug // emits debug information produced during compilation + schemaSet *SchemaSet // user-supplied schemas for input and data documents + inputType types.Type // global input type retrieved from schema set + annotationSet *AnnotationSet // hierarchical set of annotations + strict bool // enforce strict compilation checks + keepModules bool // whether to keep the unprocessed, parse modules (below) + parsedModules map[string]*Module // parsed, but otherwise unprocessed modules, kept track of when keepModules is true + useTypeCheckAnnotations bool // whether to provide annotated information (schemas) to the type checker + allowUndefinedFuncCalls bool // don't error on calls to unknown functions. + evalMode CompilerEvalMode // + rewriteTestRulesForTracing bool // rewrite test rules to capture dynamic values for tracing. + defaultRegoVersion RegoVersion +} + +func (c *Compiler) DefaultRegoVersion() RegoVersion { + return c.defaultRegoVersion +} + +// CompilerStage defines the interface for stages in the compiler. +type CompilerStage func(*Compiler) *Error + +// CompilerEvalMode allows toggling certain stages that are only +// needed for certain modes, Concretely, only "topdown" mode will +// have the compiler build comprehension and rule indices. +type CompilerEvalMode int + +const ( + // EvalModeTopdown (default) instructs the compiler to build rule + // and comprehension indices used by topdown evaluation. + EvalModeTopdown CompilerEvalMode = iota + + // EvalModeIR makes the compiler skip the stages for comprehension + // and rule indices. + EvalModeIR +) + +// CompilerStageDefinition defines a compiler stage +type CompilerStageDefinition struct { + Name string + MetricName string + Stage CompilerStage +} + +// RulesOptions defines the options for retrieving rules by Ref from the +// compiler. +type RulesOptions struct { + // IncludeHiddenModules determines if the result contains hidden modules, + // currently only the "system" namespace, i.e. "data.system.*". + IncludeHiddenModules bool +} + +// QueryContext contains contextual information for running an ad-hoc query. +// +// Ad-hoc queries can be run in the context of a package and imports may be +// included to provide concise access to data. +type QueryContext struct { + Package *Package + Imports []*Import +} + +// NewQueryContext returns a new QueryContext object. +func NewQueryContext() *QueryContext { + return &QueryContext{} +} + +// WithPackage sets the pkg on qc. +func (qc *QueryContext) WithPackage(pkg *Package) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Package = pkg + return qc +} + +// WithImports sets the imports on qc. +func (qc *QueryContext) WithImports(imports []*Import) *QueryContext { + if qc == nil { + qc = NewQueryContext() + } + qc.Imports = imports + return qc +} + +// Copy returns a deep copy of qc. +func (qc *QueryContext) Copy() *QueryContext { + if qc == nil { + return nil + } + cpy := *qc + if cpy.Package != nil { + cpy.Package = qc.Package.Copy() + } + cpy.Imports = make([]*Import, len(qc.Imports)) + for i := range qc.Imports { + cpy.Imports[i] = qc.Imports[i].Copy() + } + return &cpy +} + +// QueryCompiler defines the interface for compiling ad-hoc queries. +type QueryCompiler interface { + + // Compile should be called to compile ad-hoc queries. The return value is + // the compiled version of the query. + Compile(q Body) (Body, error) + + // TypeEnv returns the type environment built after running type checking + // on the query. + TypeEnv() *TypeEnv + + // WithContext sets the QueryContext on the QueryCompiler. Subsequent calls + // to Compile will take the QueryContext into account. + WithContext(qctx *QueryContext) QueryCompiler + + // WithEnablePrintStatements enables print statements in queries compiled + // with the QueryCompiler. + WithEnablePrintStatements(yes bool) QueryCompiler + + // WithUnsafeBuiltins sets the built-in functions to treat as unsafe and not + // allow inside of queries. By default the query compiler inherits the + // compiler's unsafe built-in functions. This function allows callers to + // override that set. If an empty (non-nil) map is provided, all built-ins + // are allowed. + WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler + + // WithStageAfter registers a stage to run during query compilation after + // the named stage. + WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler + + // RewrittenVars maps generated vars in the compiled query to vars from the + // parsed query. For example, given the query "input := 1" the rewritten + // query would be "__local0__ = 1". The mapping would then be {__local0__: input}. + RewrittenVars() map[Var]Var + + // ComprehensionIndex returns an index data structure for the given comprehension + // term. If no index is found, returns nil. + ComprehensionIndex(term *Term) *ComprehensionIndex + + // WithStrict enables strict mode for the query compiler. + WithStrict(strict bool) QueryCompiler +} + +// QueryCompilerStage defines the interface for stages in the query compiler. +type QueryCompilerStage func(QueryCompiler, Body) (Body, error) + +// QueryCompilerStageDefinition defines a QueryCompiler stage +type QueryCompilerStageDefinition struct { + Name string + MetricName string + Stage QueryCompilerStage +} + +type stage struct { + name string + metricName string + f func() +} + +// NewCompiler returns a new empty compiler. +func NewCompiler() *Compiler { + + c := &Compiler{ + Modules: map[string]*Module{}, + RewrittenVars: map[Var]Var{}, + Required: &Capabilities{}, + ruleIndices: util.NewHashMap(func(a, b util.T) bool { + r1, r2 := a.(Ref), b.(Ref) + return r1.Equal(r2) + }, func(x util.T) int { + return x.(Ref).Hash() + }), + maxErrs: CompileErrorLimitDefault, + after: map[string][]CompilerStageDefinition{}, + unsafeBuiltinsMap: map[string]struct{}{}, + deprecatedBuiltinsMap: map[string]struct{}{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + debug: debug.Discard(), + defaultRegoVersion: DefaultRegoVersion, + } + + c.ModuleTree = NewModuleTree(nil) + c.RuleTree = NewRuleTree(c.ModuleTree) + + c.stages = []stage{ + // Reference resolution should run first as it may be used to lazily + // load additional modules. If any stages run before resolution, they + // need to be re-run after resolution. + {"ResolveRefs", "compile_stage_resolve_refs", c.resolveAllRefs}, + // The local variable generator must be initialized after references are + // resolved and the dynamic module loader has run but before subsequent + // stages that need to generate variables. + {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, + {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, + {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, + {"CheckDuplicateImports", "compile_stage_check_imports", c.checkImports}, + {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, + {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, + {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs + {"RewriteLocalVars", "compile_stage_rewrite_local_vars", c.rewriteLocalVars}, + {"CheckVoidCalls", "compile_stage_check_void_calls", c.checkVoidCalls}, + {"RewritePrintCalls", "compile_stage_rewrite_print_calls", c.rewritePrintCalls}, + {"RewriteExprTerms", "compile_stage_rewrite_expr_terms", c.rewriteExprTerms}, + {"ParseMetadataBlocks", "compile_stage_parse_metadata_blocks", c.parseMetadataBlocks}, + {"SetAnnotationSet", "compile_stage_set_annotationset", c.setAnnotationSet}, + {"RewriteRegoMetadataCalls", "compile_stage_rewrite_rego_metadata_calls", c.rewriteRegoMetadataCalls}, + {"SetGraph", "compile_stage_set_graph", c.setGraph}, + {"RewriteComprehensionTerms", "compile_stage_rewrite_comprehension_terms", c.rewriteComprehensionTerms}, + {"RewriteRefsInHead", "compile_stage_rewrite_refs_in_head", c.rewriteRefsInHead}, + {"RewriteWithValues", "compile_stage_rewrite_with_values", c.rewriteWithModifiers}, + {"CheckRuleConflicts", "compile_stage_check_rule_conflicts", c.checkRuleConflicts}, + {"CheckUndefinedFuncs", "compile_stage_check_undefined_funcs", c.checkUndefinedFuncs}, + {"CheckSafetyRuleHeads", "compile_stage_check_safety_rule_heads", c.checkSafetyRuleHeads}, + {"CheckSafetyRuleBodies", "compile_stage_check_safety_rule_bodies", c.checkSafetyRuleBodies}, + {"RewriteEquals", "compile_stage_rewrite_equals", c.rewriteEquals}, + {"RewriteDynamicTerms", "compile_stage_rewrite_dynamic_terms", c.rewriteDynamicTerms}, + {"RewriteTestRulesForTracing", "compile_stage_rewrite_test_rules_for_tracing", c.rewriteTestRuleEqualities}, // must run after RewriteDynamicTerms + {"CheckRecursion", "compile_stage_check_recursion", c.checkRecursion}, + {"CheckTypes", "compile_stage_check_types", c.checkTypes}, // must be run after CheckRecursion + {"CheckUnsafeBuiltins", "compile_state_check_unsafe_builtins", c.checkUnsafeBuiltins}, + {"CheckDeprecatedBuiltins", "compile_state_check_deprecated_builtins", c.checkDeprecatedBuiltins}, + {"BuildRuleIndices", "compile_stage_rebuild_indices", c.buildRuleIndices}, + {"BuildComprehensionIndices", "compile_stage_rebuild_comprehension_indices", c.buildComprehensionIndices}, + {"BuildRequiredCapabilities", "compile_stage_build_required_capabilities", c.buildRequiredCapabilities}, + } + + return c +} + +// SetErrorLimit sets the number of errors the compiler can encounter before it +// quits. Zero or a negative number indicates no limit. +func (c *Compiler) SetErrorLimit(limit int) *Compiler { + c.maxErrs = limit + return c +} + +// WithEnablePrintStatements enables print statements inside of modules compiled +// by the compiler. If print statements are not enabled, calls to print() are +// erased at compile-time. +func (c *Compiler) WithEnablePrintStatements(yes bool) *Compiler { + c.enablePrintStatements = yes + return c +} + +// WithPathConflictsCheck enables base-virtual document conflict +// detection. The compiler will check that rules don't overlap with +// paths that exist as determined by the provided callable. +func (c *Compiler) WithPathConflictsCheck(fn func([]string) (bool, error)) *Compiler { + c.pathExists = fn + return c +} + +// WithPathConflictsCheckRoots enables checking path conflicts from the specified root instead +// of the top root node. Limiting conflict checks to a known set of roots, such as bundle roots, +// improves performance. Each root has the format of a "/"-delimited string, excluding the "data" +// root document. +func (c *Compiler) WithPathConflictsCheckRoots(rootPaths []string) *Compiler { + c.pathConflictCheckRoots = rootPaths + return c +} + +// WithStageAfter registers a stage to run during compilation after +// the named stage. +func (c *Compiler) WithStageAfter(after string, stage CompilerStageDefinition) *Compiler { + c.after[after] = append(c.after[after], stage) + return c +} + +// WithMetrics will set a metrics.Metrics and be used for profiling +// the Compiler instance. +func (c *Compiler) WithMetrics(metrics metrics.Metrics) *Compiler { + c.metrics = metrics + return c +} + +// WithCapabilities sets capabilities to enable during compilation. Capabilities allow the caller +// to specify the set of built-in functions available to the policy. In the future, capabilities +// may be able to restrict access to other language features. Capabilities allow callers to check +// if policies are compatible with a particular version of OPA. If policies are a compiled for a +// specific version of OPA, there is no guarantee that _this_ version of OPA can evaluate them +// successfully. +func (c *Compiler) WithCapabilities(capabilities *Capabilities) *Compiler { + c.capabilities = capabilities + return c +} + +// Capabilities returns the capabilities enabled during compilation. +func (c *Compiler) Capabilities() *Capabilities { + return c.capabilities +} + +// WithDebug sets where debug messages are written to. Passing `nil` has no +// effect. +func (c *Compiler) WithDebug(sink io.Writer) *Compiler { + if sink != nil { + c.debug = debug.New(sink) + } + return c +} + +// WithBuiltins is deprecated. +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithBuiltins(builtins map[string]*Builtin) *Compiler { + c.customBuiltins = maps.Clone(builtins) + return c +} + +// WithUnsafeBuiltins is deprecated. +// Deprecated: Use WithCapabilities instead. +func (c *Compiler) WithUnsafeBuiltins(unsafeBuiltins map[string]struct{}) *Compiler { + maps.Copy(c.unsafeBuiltinsMap, unsafeBuiltins) + return c +} + +// WithStrict toggles strict mode in the compiler. +func (c *Compiler) WithStrict(strict bool) *Compiler { + c.strict = strict + return c +} + +// WithKeepModules enables retaining unprocessed modules in the compiler. +// Note that the modules aren't copied on the way in or out -- so when +// accessing them via ParsedModules(), mutations will occur in the module +// map that was passed into Compile().` +func (c *Compiler) WithKeepModules(y bool) *Compiler { + c.keepModules = y + return c +} + +// WithUseTypeCheckAnnotations use schema annotations during type checking +func (c *Compiler) WithUseTypeCheckAnnotations(enabled bool) *Compiler { + c.useTypeCheckAnnotations = enabled + return c +} + +func (c *Compiler) WithAllowUndefinedFunctionCalls(allow bool) *Compiler { + c.allowUndefinedFuncCalls = allow + return c +} + +// WithEvalMode allows setting the CompilerEvalMode of the compiler +func (c *Compiler) WithEvalMode(e CompilerEvalMode) *Compiler { + c.evalMode = e + return c +} + +// WithRewriteTestRules enables rewriting test rules to capture dynamic values in local variables, +// so they can be accessed by tracing. +func (c *Compiler) WithRewriteTestRules(rewrite bool) *Compiler { + c.rewriteTestRulesForTracing = rewrite + return c +} + +// ParsedModules returns the parsed, unprocessed modules from the compiler. +// It is `nil` if keeping modules wasn't enabled via `WithKeepModules(true)`. +// The map includes all modules loaded via the ModuleLoader, if one was used. +func (c *Compiler) ParsedModules() map[string]*Module { + return c.parsedModules +} + +func (c *Compiler) QueryCompiler() QueryCompiler { + c.init() + c0 := *c + return newQueryCompiler(&c0) +} + +// Compile runs the compilation process on the input modules. The compiled +// version of the modules and associated data structures are stored on the +// compiler. If the compilation process fails for any reason, the compiler will +// contain a slice of errors. +func (c *Compiler) Compile(modules map[string]*Module) { + + c.init() + + c.Modules = make(map[string]*Module, len(modules)) + c.sorted = make([]string, 0, len(modules)) + + if c.keepModules { + c.parsedModules = make(map[string]*Module, len(modules)) + } else { + c.parsedModules = nil + } + + for k, v := range modules { + c.Modules[k] = v.Copy() + c.sorted = append(c.sorted, k) + if c.parsedModules != nil { + c.parsedModules[k] = v + } + } + + sort.Strings(c.sorted) + + c.compile() +} + +// WithSchemas sets a schemaSet to the compiler +func (c *Compiler) WithSchemas(schemas *SchemaSet) *Compiler { + c.schemaSet = schemas + return c +} + +// Failed returns true if a compilation error has been encountered. +func (c *Compiler) Failed() bool { + return len(c.Errors) > 0 +} + +// ComprehensionIndex returns a data structure specifying how to index comprehension +// results so that callers do not have to recompute the comprehension more than once. +// If no index is found, returns nil. +func (c *Compiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + return c.comprehensionIndices[term] +} + +// GetArity returns the number of args a function referred to by ref takes. If +// ref refers to built-in function, the built-in declaration is consulted, +// otherwise, the ref is used to perform a ruleset lookup. +func (c *Compiler) GetArity(ref Ref) int { + if bi := c.builtins[ref.String()]; bi != nil { + return bi.Decl.Arity() + } + rules := c.GetRulesExact(ref) + if len(rules) == 0 { + return -1 + } + return len(rules[0].Head.Args) +} + +// GetRulesExact returns a slice of rules referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesExact("data.a.b.c.p") => [rule1, rule2] +// GetRulesExact("data.a.b.c.p.x") => nil +// GetRulesExact("data.a.b.c") => nil +func (c *Compiler) GetRulesExact(ref Ref) (rules []*Rule) { + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + return extractRules(node.Values) +} + +// GetRulesForVirtualDocument returns a slice of rules that produce the virtual +// document referred to by the reference. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[k] = v { ... } # rule1 +// p[k1] = v1 { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesForVirtualDocument("data.a.b.c.p") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c.p.x") => [rule1, rule2] +// GetRulesForVirtualDocument("data.a.b.c") => nil +func (c *Compiler) GetRulesForVirtualDocument(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + if len(node.Values) > 0 { + return extractRules(node.Values) + } + } + + return extractRules(node.Values) +} + +// GetRulesWithPrefix returns a slice of rules that share the prefix ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { ... } # rule1 +// p[k] = v { ... } # rule2 +// q { ... } # rule3 +// +// The following calls yield the rules on the right. +// +// GetRulesWithPrefix("data.a.b.c.p") => [rule1, rule2] +// GetRulesWithPrefix("data.a.b.c.p.a") => nil +// GetRulesWithPrefix("data.a.b.c") => [rule1, rule2, rule3] +func (c *Compiler) GetRulesWithPrefix(ref Ref) (rules []*Rule) { + + node := c.RuleTree + + for _, x := range ref { + if node = node.Child(x.Value); node == nil { + return nil + } + } + + var acc func(node *TreeNode) + + acc = func(node *TreeNode) { + rules = append(rules, extractRules(node.Values)...) + for _, child := range node.Children { + if child.Hide { + continue + } + acc(child) + } + } + + acc(node) + + return rules +} + +func extractRules(s []any) []*Rule { + rules := make([]*Rule, len(s)) + for i := range s { + rules[i] = s[i].(*Rule) + } + return rules +} + +// GetRules returns a slice of rules that are referred to by ref. +// +// E.g., given the following module: +// +// package a.b.c +// +// p[x] = y { q[x] = y; ... } # rule1 +// q[x] = y { ... } # rule2 +// +// The following calls yield the rules on the right. +// +// GetRules("data.a.b.c.p") => [rule1] +// GetRules("data.a.b.c.p.x") => [rule1] +// GetRules("data.a.b.c.q") => [rule2] +// GetRules("data.a.b.c") => [rule1, rule2] +// GetRules("data.a.b.d") => nil +func (c *Compiler) GetRules(ref Ref) (rules []*Rule) { + + set := map[*Rule]struct{}{} + + for _, rule := range c.GetRulesForVirtualDocument(ref) { + set[rule] = struct{}{} + } + + for _, rule := range c.GetRulesWithPrefix(ref) { + set[rule] = struct{}{} + } + + for rule := range set { + rules = append(rules, rule) + } + + return rules +} + +// GetRulesDynamic returns a slice of rules that could be referred to by a ref. +// +// Deprecated: use GetRulesDynamicWithOpts +func (c *Compiler) GetRulesDynamic(ref Ref) []*Rule { + return c.GetRulesDynamicWithOpts(ref, RulesOptions{}) +} + +// GetRulesDynamicWithOpts returns a slice of rules that could be referred to by +// a ref. +// When parts of the ref are statically known, we use that information to narrow +// down which rules the ref could refer to, but in the most general case this +// will be an over-approximation. +// +// E.g., given the following modules: +// +// package a.b.c +// +// r1 = 1 # rule1 +// +// and: +// +// package a.d.c +// +// r2 = 2 # rule2 +// +// The following calls yield the rules on the right. +// +// GetRulesDynamicWithOpts("data.a[x].c[y]", opts) => [rule1, rule2] +// GetRulesDynamicWithOpts("data.a[x].c.r2", opts) => [rule2] +// GetRulesDynamicWithOpts("data.a.b[x][y]", opts) => [rule1] +// +// Using the RulesOptions parameter, the inclusion of hidden modules can be +// controlled: +// +// With +// +// package system.main +// +// r3 = 3 # rule3 +// +// We'd get this result: +// +// GetRulesDynamicWithOpts("data[x]", RulesOptions{IncludeHiddenModules: true}) => [rule1, rule2, rule3] +// +// Without the options, it would be excluded. +func (c *Compiler) GetRulesDynamicWithOpts(ref Ref, opts RulesOptions) []*Rule { + node := c.RuleTree + + set := map[*Rule]struct{}{} + var walk func(node *TreeNode, i int) + walk = func(node *TreeNode, i int) { + switch { + case i >= len(ref): + // We've reached the end of the reference and want to collect everything + // under this "prefix". + node.DepthFirst(func(descendant *TreeNode) bool { + insertRules(set, descendant.Values) + if opts.IncludeHiddenModules { + return false + } + return descendant.Hide + }) + + case i == 0 || IsConstant(ref[i].Value): + // The head of the ref is always grounded. In case another part of the + // ref is also grounded, we can lookup the exact child. If it's not found + // we can immediately return... + if child := node.Child(ref[i].Value); child != nil { + if len(child.Values) > 0 { + // Add any rules at this position + insertRules(set, child.Values) + } + // There might still be "sub-rules" contributing key-value "overrides" for e.g. partial object rules, continue walking + walk(child, i+1) + } else { + return + } + + default: + // This part of the ref is a dynamic term. We can't know what it refers + // to and will just need to try all of the children. + for _, child := range node.Children { + if child.Hide && !opts.IncludeHiddenModules { + continue + } + insertRules(set, child.Values) + walk(child, i+1) + } + } + } + + walk(node, 0) + rules := make([]*Rule, 0, len(set)) + for rule := range set { + rules = append(rules, rule) + } + return rules +} + +// Utility: add all rule values to the set. +func insertRules(set map[*Rule]struct{}, rules []any) { + for _, rule := range rules { + set[rule.(*Rule)] = struct{}{} + } +} + +// RuleIndex returns a RuleIndex built for the rule set referred to by path. +// The path must refer to the rule set exactly, i.e., given a rule set at path +// data.a.b.c.p, refs data.a.b.c.p.x and data.a.b.c would not return a +// RuleIndex built for the rule. +func (c *Compiler) RuleIndex(path Ref) RuleIndex { + r, ok := c.ruleIndices.Get(path) + if !ok { + return nil + } + return r.(RuleIndex) +} + +// PassesTypeCheck determines whether the given body passes type checking +func (c *Compiler) PassesTypeCheck(body Body) bool { + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + env := c.TypeEnv + _, errs := checker.CheckBody(env, body) + return len(errs) == 0 +} + +// PassesTypeCheckRules determines whether the given rules passes type checking +func (c *Compiler) PassesTypeCheckRules(rules []*Rule) Errors { + elems := []util.T{} + + for _, rule := range rules { + elems = append(elems, rule) + } + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + + var allowNet []string + if c.capabilities != nil { + allowNet = c.capabilities.AllowNet + } + + tpe, err := loadSchema(schema, allowNet) + if err != nil { + return Errors{NewError(TypeErr, nil, err.Error())} //nolint:govet + } + c.inputType = tpe + } + } + + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + + checker := newTypeChecker().WithSchemaSet(c.schemaSet).WithInputType(c.inputType) + + if c.TypeEnv == nil { + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + } + + for name, bi := range c.customBuiltins { + c.builtins[name] = bi + } + + c.TypeEnv = checker.Env(c.builtins) + } + + _, errs := checker.CheckTypes(c.TypeEnv, elems, as) + return errs +} + +// ModuleLoader defines the interface that callers can implement to enable lazy +// loading of modules during compilation. +type ModuleLoader func(resolved map[string]*Module) (parsed map[string]*Module, err error) + +// WithModuleLoader sets f as the ModuleLoader on the compiler. +// +// The compiler will invoke the ModuleLoader after resolving all references in +// the current set of input modules. The ModuleLoader can return a new +// collection of parsed modules that are to be included in the compilation +// process. This process will repeat until the ModuleLoader returns an empty +// collection or an error. If an error is returned, compilation will stop +// immediately. +func (c *Compiler) WithModuleLoader(f ModuleLoader) *Compiler { + c.moduleLoader = f + return c +} + +// WithDefaultRegoVersion sets the default Rego version to use when a module doesn't specify one; +// such as when it's hand-crafted instead of parsed. +func (c *Compiler) WithDefaultRegoVersion(regoVersion RegoVersion) *Compiler { + c.defaultRegoVersion = regoVersion + return c +} + +func (c *Compiler) counterAdd(name string, n uint64) { + if c.metrics == nil { + return + } + c.metrics.Counter(name).Add(n) +} + +func (c *Compiler) buildRuleIndices() { + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false + } + rules := extractRules(node.Values) + hasNonGroundRef := false + for _, r := range rules { + hasNonGroundRef = !r.Head.Ref().IsGround() + } + if hasNonGroundRef { + // Collect children to ensure that all rules within the extent of a rule with a general ref + // are found on the same index. E.g. the following rules should be indexed under data.a.b.c: + // + // package a + // b.c[x].e := 1 { x := input.x } + // b.c.d := 2 + // b.c.d2.e[x] := 3 { x := input.x } + for _, child := range node.Children { + child.DepthFirst(func(c *TreeNode) bool { + rules = append(rules, extractRules(c.Values)...) + return false + }) + } + } + + index := newBaseDocEqIndex(func(ref Ref) bool { + return isVirtual(c.RuleTree, ref.GroundPrefix()) + }) + if index.Build(rules) { + c.ruleIndices.Put(rules[0].Ref().GroundPrefix(), index) + } + return hasNonGroundRef // currently, we don't allow those branches to go deeper + }) + +} + +func (c *Compiler) buildComprehensionIndices() { + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(r *Rule) bool { + candidates := r.Head.Args.Vars() + candidates.Update(ReservedVars) + n := buildComprehensionIndices(c.debug, c.GetArity, candidates, c.RewrittenVars, r.Body, c.comprehensionIndices) + c.counterAdd(compileStageComprehensionIndexBuild, n) + return false + }) + } +} + +var ( + keywordsTerm = StringTerm("keywords") + pathTerm = StringTerm("path") + annotationsTerm = StringTerm("annotations") + futureKeywordsPrefix = Ref{FutureRootDocument, keywordsTerm} +) + +// buildRequiredCapabilities updates the required capabilities on the compiler +// to include any keyword and feature dependencies present in the modules. The +// built-in function dependencies will have already been added by the type +// checker. +func (c *Compiler) buildRequiredCapabilities() { + + features := map[string]struct{}{} + + // extract required keywords from modules + + keywords := map[string]struct{}{} + + for _, name := range c.sorted { + for _, imp := range c.imports[name] { + mod := c.Modules[name] + path := imp.Path.Value.(Ref) + switch { + case path.Equal(RegoV1CompatibleRef): + if !c.moduleIsRegoV1(mod) { + features[FeatureRegoV1Import] = struct{}{} + } + case path.HasPrefix(futureKeywordsPrefix): + if len(path) == 2 { + if c.moduleIsRegoV1(mod) { + for kw := range futureKeywords { + keywords[kw] = struct{}{} + } + } else { + for kw := range allFutureKeywords { + keywords[kw] = struct{}{} + } + } + } else { + kw := string(path[2].Value.(String)) + if c.moduleIsRegoV1(mod) { + for allowedKw := range futureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } else { + for allowedKw := range allFutureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } + } + } + } + } + + c.Required.FutureKeywords = util.KeysSorted(keywords) + + // extract required features from modules + + for _, name := range c.sorted { + mod := c.Modules[name] + + if c.moduleIsRegoV1(mod) { + features[FeatureRegoV1] = struct{}{} + } else { + for _, rule := range mod.Rules { + refLen := len(rule.Head.Reference) + if refLen >= 3 { + if refLen > len(rule.Head.Reference.ConstantPrefix()) { + features[FeatureRefHeads] = struct{}{} + } else { + features[FeatureRefHeadStringPrefixes] = struct{}{} + } + } + } + } + } + + c.Required.Features = util.KeysSorted(features) + + for i, bi := range c.Required.Builtins { + c.Required.Builtins[i] = bi.Minimal() + } +} + +// checkRecursion ensures that there are no recursive definitions, i.e., there are +// no cycles in the Graph. +func (c *Compiler) checkRecursion() { + eq := func(a, b util.T) bool { + return a.(*Rule) == b.(*Rule) + } + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + for _, rule := range node.Values { + for node := rule.(*Rule); node != nil; node = node.Else { + c.checkSelfPath(node.Loc(), eq, node, node) + } + } + return false + }) +} + +func (c *Compiler) checkSelfPath(loc *Location, eq func(a, b util.T) bool, a, b util.T) { + tr := NewGraphTraversal(c.Graph) + if p := util.DFSPath(tr, eq, a, b); len(p) > 0 { + n := make([]string, 0, len(p)) + for _, x := range p { + n = append(n, astNodeToString(x)) + } + c.err(NewError(RecursionErr, loc, "rule %v is recursive: %v", astNodeToString(a), strings.Join(n, " -> "))) + } +} + +func astNodeToString(x interface{}) string { + return x.(*Rule).Ref().String() +} + +// checkRuleConflicts ensures that rules definitions are not in conflict. +func (c *Compiler) checkRuleConflicts() { + rw := rewriteVarsInRef(c.RewrittenVars) + + c.RuleTree.DepthFirst(func(node *TreeNode) bool { + if len(node.Values) == 0 { + return false // go deeper + } + + kinds := make(map[RuleKind]struct{}, len(node.Values)) + completeRules := 0 + partialRules := 0 + arities := make(map[int]struct{}, len(node.Values)) + name := "" + var conflicts []Ref + defaultRules := make([]*Rule, 0) + + for _, rule := range node.Values { + r := rule.(*Rule) + ref := r.Ref() + name = rw(ref.Copy()).String() // varRewriter operates in-place + kinds[r.Head.RuleKind()] = struct{}{} + arities[len(r.Head.Args)] = struct{}{} + if r.Default { + defaultRules = append(defaultRules, r) + } + + // Single-value rules may not have any other rules in their extent. + // Rules with vars in their ref are allowed to have rules inside their extent. + // Only the ground portion (terms before the first var term) of a rule's ref is considered when determining + // whether it's inside the extent of another (c.RuleTree is organized this way already). + // These pairs are invalid: + // + // data.p.q.r { true } # data.p.q is { "r": true } + // data.p.q.r.s { true } + // + // data.p.q.r { true } + // data.p.q.r[s].t { s = input.key } + // + // But this is allowed: + // + // data.p.q.r { true } + // data.p.q[r].s.t { r = input.key } + // + // data.p[r] := x { r = input.key; x = input.bar } + // data.p.q[r] := x { r = input.key; x = input.bar } + // + // data.p.q[r] { r := input.r } + // data.p.q.r.s { true } + // + // data.p.q[r] = 1 { r := "r" } + // data.p.q.s = 2 + // + // data.p[q][r] { q := input.q; r := input.r } + // data.p.q.r { true } + // + // data.p.q[r] { r := input.r } + // data.p[q].r { q := input.q } + // + // data.p.q[r][s] { r := input.r; s := input.s } + // data.p[q].r.s { q := input.q } + + if r.Ref().IsGround() && len(node.Children) > 0 { + conflicts = node.flattenChildren() + } + + if r.Head.RuleKind() == SingleValue && r.Head.Ref().IsGround() { + completeRules++ + } else { + partialRules++ + } + } + + switch { + case conflicts != nil: + c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "rule %v conflicts with %v", name, conflicts)) + + case len(kinds) > 1 || len(arities) > 1 || (completeRules >= 1 && partialRules >= 1): + c.err(NewError(TypeErr, node.Values[0].(*Rule).Loc(), "conflicting rules %v found", name)) + + case len(defaultRules) > 1: + + defaultRuleLocations := strings.Builder{} + defaultRuleLocations.WriteString(defaultRules[0].Loc().String()) + for i := 1; i < len(defaultRules); i++ { + defaultRuleLocations.WriteString(", ") + defaultRuleLocations.WriteString(defaultRules[i].Loc().String()) + } + + c.err(NewError( + TypeErr, + defaultRules[0].Module.Package.Loc(), + "multiple default rules %s found at %s", + name, defaultRuleLocations.String()), + ) + } + + return false + }) + + if c.pathExists != nil { + for _, err := range CheckPathConflicts(c, c.pathExists) { + c.err(err) + } + } + + // NOTE(sr): depthfirst might better use sorted for stable errs? + c.ModuleTree.DepthFirst(func(node *ModuleTreeNode) bool { + for _, mod := range node.Modules { + for _, rule := range mod.Rules { + ref := rule.Head.Ref().GroundPrefix() + // Rules with a dynamic portion in their ref are exempted, as a conflict within the dynamic portion + // can only be detected at eval-time. + if len(ref) < len(rule.Head.Ref()) { + continue + } + + childNode, tail := node.find(ref) + if childNode != nil && len(tail) == 0 { + for _, childMod := range childNode.Modules { + // Avoid recursively checking a module for equality unless we know it's a possible self-match. + if childMod.Equal(mod) { + continue // don't self-conflict + } + msg := fmt.Sprintf("%v conflicts with rule %v defined at %v", childMod.Package, rule.Head.Ref(), rule.Loc()) + c.err(NewError(TypeErr, mod.Package.Loc(), msg)) //nolint:govet + } + } + } + } + return false + }) +} + +func (c *Compiler) checkUndefinedFuncs() { + for _, name := range c.sorted { + m := c.Modules[name] + for _, err := range checkUndefinedFuncs(c.TypeEnv, m, c.GetArity, c.RewrittenVars) { + c.err(err) + } + } +} + +func checkUndefinedFuncs(env *TypeEnv, x interface{}, arity func(Ref) int, rwVars map[Var]Var) Errors { + + var errs Errors + + WalkExprs(x, func(expr *Expr) bool { + if !expr.IsCall() { + return false + } + ref := expr.Operator() + if arity := arity(ref); arity >= 0 { + operands := len(expr.Operands()) + if expr.Generated { // an output var was added + if !expr.IsEquality() && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands-1)) + return true + } + } else { // either output var or not + if operands != arity && operands != arity+1 { + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, arityMismatchError(env, ref, expr, arity, operands)) + return true + } + } + return false + } + ref = rewriteVarsInRef(rwVars)(ref) + errs = append(errs, NewError(TypeErr, expr.Loc(), "undefined function %v", ref)) + return true + }) + + return errs +} + +func arityMismatchError(env *TypeEnv, f Ref, expr *Expr, exp, act int) *Error { + if want, ok := env.Get(f).(*types.Function); ok { // generate richer error for built-in functions + have := make([]types.Type, len(expr.Operands())) + for i, op := range expr.Operands() { + have[i] = env.Get(op) + } + return newArgError(expr.Loc(), f, "arity mismatch", have, want.NamedFuncArgs()) + } + if act != 1 { + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d arguments", f, exp, act) + } + return NewError(TypeErr, expr.Loc(), "function %v has arity %d, got %d argument", f, exp, act) +} + +// checkSafetyRuleBodies ensures that variables appearing in negated expressions or non-target +// positions of built-in expressions will be bound when evaluating the rule from left +// to right, re-ordering as necessary. +func (c *Compiler) checkSafetyRuleBodies() { + for _, name := range c.sorted { + m := c.Modules[name] + WalkRules(m, func(r *Rule) bool { + safe := ReservedVars.Copy() + safe.Update(r.Head.Args.Vars()) + r.Body = c.checkBodySafety(safe, r.Body) + return false + }) + } +} + +func (c *Compiler) checkBodySafety(safe VarSet, b Body) Body { + reordered, unsafe := reorderBodyForSafety(c.builtins, c.GetArity, safe, b) + if errs := safetyErrorSlice(unsafe, c.RewrittenVars); len(errs) > 0 { + for _, err := range errs { + c.err(err) + } + return b + } + return reordered +} + +// SafetyCheckVisitorParams defines the AST visitor parameters to use for collecting +// variables during the safety check. This has to be exported because it's relied on +// by the copy propagation implementation in topdown. +var SafetyCheckVisitorParams = VarVisitorParams{ + SkipRefCallHead: true, + SkipClosures: true, +} + +// checkSafetyRuleHeads ensures that variables appearing in the head of a +// rule also appear in the body. +func (c *Compiler) checkSafetyRuleHeads() { + + for _, name := range c.sorted { + m := c.Modules[name] + WalkRules(m, func(r *Rule) bool { + safe := r.Body.Vars(SafetyCheckVisitorParams) + safe.Update(r.Head.Args.Vars()) + unsafe := r.Head.Vars().Diff(safe) + for v := range unsafe { + if w, ok := c.RewrittenVars[v]; ok { + v = w + } + if !v.IsGenerated() { + c.err(NewError(UnsafeVarErr, r.Loc(), "var %v is unsafe", v)) + } + } + return false + }) + } +} + +func compileSchema(goSchema interface{}, allowNet []string) (*gojsonschema.Schema, error) { + gojsonschema.SetAllowNet(allowNet) + + var refLoader gojsonschema.JSONLoader + sl := gojsonschema.NewSchemaLoader() + + if goSchema != nil { + refLoader = gojsonschema.NewGoLoader(goSchema) + } else { + return nil, fmt.Errorf("no schema as input to compile") + } + schemasCompiled, err := sl.Compile(refLoader) + if err != nil { + return nil, fmt.Errorf("unable to compile the schema: %w", err) + } + return schemasCompiled, nil +} + +func mergeSchemas(schemas ...*gojsonschema.SubSchema) (*gojsonschema.SubSchema, error) { + if len(schemas) == 0 { + return nil, nil + } + var result = schemas[0] + + for i := range schemas { + if len(schemas[i].PropertiesChildren) > 0 { + if !schemas[i].Types.Contains("object") { + if err := schemas[i].Types.Add("object"); err != nil { + return nil, fmt.Errorf("unable to set the type in schemas") + } + } + } else if len(schemas[i].ItemsChildren) > 0 { + if !schemas[i].Types.Contains("array") { + if err := schemas[i].Types.Add("array"); err != nil { + return nil, fmt.Errorf("unable to set the type in schemas") + } + } + } + } + + for i := 1; i < len(schemas); i++ { + if result.Types.String() != schemas[i].Types.String() { + return nil, fmt.Errorf("unable to merge these schemas: type mismatch: %v and %v", result.Types.String(), schemas[i].Types.String()) + } else if result.Types.Contains("object") && len(result.PropertiesChildren) > 0 && schemas[i].Types.Contains("object") && len(schemas[i].PropertiesChildren) > 0 { + result.PropertiesChildren = append(result.PropertiesChildren, schemas[i].PropertiesChildren...) + } else if result.Types.Contains("array") && len(result.ItemsChildren) > 0 && schemas[i].Types.Contains("array") && len(schemas[i].ItemsChildren) > 0 { + for j := 0; j < len(schemas[i].ItemsChildren); j++ { + if len(result.ItemsChildren)-1 < j && !(len(schemas[i].ItemsChildren)-1 < j) { + result.ItemsChildren = append(result.ItemsChildren, schemas[i].ItemsChildren[j]) + } + if result.ItemsChildren[j].Types.String() != schemas[i].ItemsChildren[j].Types.String() { + return nil, fmt.Errorf("unable to merge these schemas") + } + } + } + } + return result, nil +} + +type schemaParser struct { + definitionCache map[string]*cachedDef +} + +type cachedDef struct { + properties []*types.StaticProperty +} + +func newSchemaParser() *schemaParser { + return &schemaParser{ + definitionCache: map[string]*cachedDef{}, + } +} + +func (parser *schemaParser) parseSchema(schema interface{}) (types.Type, error) { + return parser.parseSchemaWithPropertyKey(schema, "") +} + +func (parser *schemaParser) parseSchemaWithPropertyKey(schema interface{}, propertyKey string) (types.Type, error) { + subSchema, ok := schema.(*gojsonschema.SubSchema) + if !ok { + return nil, fmt.Errorf("unexpected schema type %v", subSchema) + } + + // Handle referenced schemas, returns directly when a $ref is found + if subSchema.RefSchema != nil { + if existing, ok := parser.definitionCache[subSchema.Ref.String()]; ok { + return types.NewObject(existing.properties, nil), nil + } + return parser.parseSchemaWithPropertyKey(subSchema.RefSchema, subSchema.Ref.String()) + } + + // Handle anyOf + if subSchema.AnyOf != nil { + var orType types.Type + + // If there is a core schema, find its type first + if subSchema.Types.IsTyped() { + copySchema := *subSchema + copySchemaRef := ©Schema + copySchemaRef.AnyOf = nil + coreType, err := parser.parseSchema(copySchemaRef) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", subSchema, err) + } + + // Only add Object type with static props to orType + if objType, ok := coreType.(*types.Object); ok { + if objType.StaticProperties() != nil && objType.DynamicProperties() == nil { + orType = types.Or(orType, coreType) + } + } + } + + // Iterate through every property of AnyOf and add it to orType + for _, pSchema := range subSchema.AnyOf { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + orType = types.Or(newtype, orType) + } + + return orType, nil + } + + if subSchema.AllOf != nil { + subSchemaArray := subSchema.AllOf + allOfResult, err := mergeSchemas(subSchemaArray...) + if err != nil { + return nil, err + } + + if subSchema.Types.IsTyped() { + if (subSchema.Types.Contains("object") && allOfResult.Types.Contains("object")) || (subSchema.Types.Contains("array") && allOfResult.Types.Contains("array")) { + objectOrArrayResult, err := mergeSchemas(allOfResult, subSchema) + if err != nil { + return nil, err + } + return parser.parseSchema(objectOrArrayResult) + } else if subSchema.Types.String() != allOfResult.Types.String() { + return nil, fmt.Errorf("unable to merge these schemas") + } + } + return parser.parseSchema(allOfResult) + } + + if subSchema.Types.IsTyped() { + if subSchema.Types.Contains("boolean") { + return types.B, nil + + } else if subSchema.Types.Contains("string") { + return types.S, nil + + } else if subSchema.Types.Contains("integer") || subSchema.Types.Contains("number") { + return types.N, nil + + } else if subSchema.Types.Contains("object") { + if len(subSchema.PropertiesChildren) > 0 { + def := &cachedDef{ + properties: make([]*types.StaticProperty, 0, len(subSchema.PropertiesChildren)), + } + for _, pSchema := range subSchema.PropertiesChildren { + def.properties = append(def.properties, types.NewStaticProperty(pSchema.Property, nil)) + } + if propertyKey != "" { + parser.definitionCache[propertyKey] = def + } + for _, pSchema := range subSchema.PropertiesChildren { + newtype, err := parser.parseSchema(pSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v: %w", pSchema, err) + } + for i, prop := range def.properties { + if prop.Key == pSchema.Property { + def.properties[i].Value = newtype + break + } + } + } + return types.NewObject(def.properties, nil), nil + } + return types.NewObject(nil, types.NewDynamicProperty(types.A, types.A)), nil + + } else if subSchema.Types.Contains("array") { + if len(subSchema.ItemsChildren) > 0 { + if subSchema.ItemsChildrenIsSingleSchema { + iSchema := subSchema.ItemsChildren[0] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + return types.NewArray(nil, newtype), nil + } + newTypes := make([]types.Type, 0, len(subSchema.ItemsChildren)) + for i := 0; i != len(subSchema.ItemsChildren); i++ { + iSchema := subSchema.ItemsChildren[i] + newtype, err := parser.parseSchema(iSchema) + if err != nil { + return nil, fmt.Errorf("unexpected schema type %v", iSchema) + } + newTypes = append(newTypes, newtype) + } + return types.NewArray(newTypes, nil), nil + } + return types.NewArray(nil, types.A), nil + } + } + + // Assume types if not specified in schema + if len(subSchema.PropertiesChildren) > 0 { + if err := subSchema.Types.Add("object"); err == nil { + return parser.parseSchema(subSchema) + } + } else if len(subSchema.ItemsChildren) > 0 { + if err := subSchema.Types.Add("array"); err == nil { + return parser.parseSchema(subSchema) + } + } + + return types.A, nil +} + +func (c *Compiler) setAnnotationSet() { + // Sorting modules by name for stable error reporting + sorted := make([]*Module, 0, len(c.Modules)) + for _, mName := range c.sorted { + sorted = append(sorted, c.Modules[mName]) + } + + as, errs := BuildAnnotationSet(sorted) + for _, err := range errs { + c.err(err) + } + c.annotationSet = as +} + +// checkTypes runs the type checker on all rules. The type checker builds a +// TypeEnv that is stored on the compiler. +func (c *Compiler) checkTypes() { + // Recursion is caught in earlier step, so this cannot fail. + sorted, _ := c.Graph.Sort() + checker := newTypeChecker(). + WithAllowNet(c.capabilities.AllowNet). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + WithBuiltins(c.builtins). + WithRequiredCapabilities(c.Required). + WithVarRewriter(rewriteVarsInRef(c.RewrittenVars)). + WithAllowUndefinedFunctionCalls(c.allowUndefinedFuncCalls) + var as *AnnotationSet + if c.useTypeCheckAnnotations { + as = c.annotationSet + } + env, errs := checker.CheckTypes(c.TypeEnv, sorted, as) + for _, err := range errs { + c.err(err) + } + c.TypeEnv = env +} + +func (c *Compiler) checkUnsafeBuiltins() { + if len(c.unsafeBuiltinsMap) == 0 { + return + } + + for _, name := range c.sorted { + errs := checkUnsafeBuiltins(c.unsafeBuiltinsMap, c.Modules[name]) + for _, err := range errs { + c.err(err) + } + } +} + +func (c *Compiler) checkDeprecatedBuiltins() { + checkNeeded := false + for _, b := range c.Required.Builtins { + if _, found := c.deprecatedBuiltinsMap[b.Name]; found { + checkNeeded = true + break + } + } + if !checkNeeded { + return + } + + for _, name := range c.sorted { + mod := c.Modules[name] + if c.strict || mod.regoV1Compatible() { + errs := checkDeprecatedBuiltins(c.deprecatedBuiltinsMap, mod) + for _, err := range errs { + c.err(err) + } + } + } +} + +func (c *Compiler) runStage(metricName string, f func()) { + if c.metrics != nil { + c.metrics.Timer(metricName).Start() + defer c.metrics.Timer(metricName).Stop() + } + f() +} + +func (c *Compiler) runStageAfter(metricName string, s CompilerStage) *Error { + if c.metrics != nil { + c.metrics.Timer(metricName).Start() + defer c.metrics.Timer(metricName).Stop() + } + return s(c) +} + +func (c *Compiler) compile() { + + defer func() { + if r := recover(); r != nil && r != errLimitReached { + panic(r) + } + }() + + for _, s := range c.stages { + if c.evalMode == EvalModeIR { + switch s.name { + case "BuildRuleIndices", "BuildComprehensionIndices": + continue // skip these stages + } + } + + if c.allowUndefinedFuncCalls && (s.name == "CheckUndefinedFuncs" || s.name == "CheckSafetyRuleBodies") { + continue + } + + c.runStage(s.metricName, s.f) + if c.Failed() { + return + } + for _, a := range c.after[s.name] { + if err := c.runStageAfter(a.MetricName, a.Stage); err != nil { + c.err(err) + return + } + } + } +} + +func (c *Compiler) init() { + + if c.initialized { + return + } + + if c.capabilities == nil { + c.capabilities = CapabilitiesForThisVersion() + } + + c.builtins = make(map[string]*Builtin, len(c.capabilities.Builtins)+len(c.customBuiltins)) + + for _, bi := range c.capabilities.Builtins { + c.builtins[bi.Name] = bi + if bi.IsDeprecated() { + c.deprecatedBuiltinsMap[bi.Name] = struct{}{} + } + } + + for name, bi := range c.customBuiltins { + c.builtins[name] = bi + } + + // Load the global input schema if one was provided. + if c.schemaSet != nil { + if schema := c.schemaSet.Get(SchemaRootRef); schema != nil { + tpe, err := loadSchema(schema, c.capabilities.AllowNet) + if err != nil { + c.err(NewError(TypeErr, nil, err.Error())) //nolint:govet + } else { + c.inputType = tpe + } + } + } + + c.TypeEnv = newTypeChecker(). + WithSchemaSet(c.schemaSet). + WithInputType(c.inputType). + Env(c.builtins) + + c.initialized = true +} + +func (c *Compiler) err(err *Error) { + if c.maxErrs > 0 && len(c.Errors) >= c.maxErrs { + c.Errors = append(c.Errors, errLimitReached) + panic(errLimitReached) + } + c.Errors = append(c.Errors, err) +} + +func (c *Compiler) getExports() *util.HashMap { + + rules := util.NewHashMap(func(a, b util.T) bool { + return a.(Ref).Equal(b.(Ref)) + }, func(v util.T) int { + return v.(Ref).Hash() + }) + + for _, name := range c.sorted { + mod := c.Modules[name] + + for _, rule := range mod.Rules { + hashMapAdd(rules, mod.Package.Path, rule.Head.Ref().GroundPrefix()) + } + } + + return rules +} + +func hashMapAdd(rules *util.HashMap, pkg, rule Ref) { + prev, ok := rules.Get(pkg) + if !ok { + rules.Put(pkg, []Ref{rule}) + return + } + for _, p := range prev.([]Ref) { + if p.Equal(rule) { + return + } + } + rules.Put(pkg, append(prev.([]Ref), rule)) +} + +func (c *Compiler) GetAnnotationSet() *AnnotationSet { + return c.annotationSet +} + +func (c *Compiler) checkImports() { + modules := make([]*Module, 0, len(c.Modules)) + + supportsRegoV1Import := c.capabilities.ContainsFeature(FeatureRegoV1Import) || + c.capabilities.ContainsFeature(FeatureRegoV1) + + for _, name := range c.sorted { + mod := c.Modules[name] + + for _, imp := range mod.Imports { + if !supportsRegoV1Import && RegoV1CompatibleRef.Equal(imp.Path.Value) { + c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) + } + } + + if c.strict || c.moduleIsRegoV1Compatible(mod) { + modules = append(modules, mod) + } + } + + errs := checkDuplicateImports(modules) + for _, err := range errs { + c.err(err) + } +} + +func (c *Compiler) checkKeywordOverrides() { + for _, name := range c.sorted { + mod := c.Modules[name] + if c.strict || c.moduleIsRegoV1Compatible(mod) { + errs := checkRootDocumentOverrides(mod) + for _, err := range errs { + c.err(err) + } + } + } +} + +func (c *Compiler) moduleIsRegoV1(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1: + return true + } + return false + } + return mod.regoVersion == RegoV1 +} + +func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1, RegoV0CompatV1: + return true + } + return false + } + return mod.regoV1Compatible() +} + +// resolveAllRefs resolves references in expressions to their fully qualified values. +// +// For instance, given the following module: +// +// package a.b +// import data.foo.bar +// p[x] { bar[_] = x } +// +// The reference "bar[_]" would be resolved to "data.foo.bar[_]". +// +// Ref rules are resolved, too: +// +// package a.b +// q { c.d.e == 1 } +// c.d[e] := 1 if e := "e" +// +// The reference "c.d.e" would be resolved to "data.a.b.c.d.e". +func (c *Compiler) resolveAllRefs() { + + rules := c.getExports() + + for _, name := range c.sorted { + mod := c.Modules[name] + + var ruleExports []Ref + if x, ok := rules.Get(mod.Package.Path); ok { + ruleExports = x.([]Ref) + } + + globals := getGlobals(mod.Package, ruleExports, mod.Imports) + + WalkRules(mod, func(rule *Rule) bool { + err := resolveRefsInRule(globals, rule) + if err != nil { + c.err(NewError(CompileErr, rule.Location, err.Error())) //nolint:govet + } + return false + }) + + if c.strict { // check for unused imports + for _, imp := range mod.Imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue // ignore future and rego imports + } + + for v, u := range globals { + if v.Equal(imp.Name()) && !u.used { + c.err(NewError(CompileErr, imp.Location, "%s unused", imp.String())) + } + } + } + } + } + + if c.moduleLoader != nil { + + parsed, err := c.moduleLoader(c.Modules) + if err != nil { + c.err(NewError(CompileErr, nil, err.Error())) //nolint:govet + return + } + + if len(parsed) == 0 { + return + } + + for id, module := range parsed { + c.Modules[id] = module.Copy() + c.sorted = append(c.sorted, id) + if c.parsedModules != nil { + c.parsedModules[id] = module + } + } + + sort.Strings(c.sorted) + c.resolveAllRefs() + } +} + +func (c *Compiler) removeImports() { + c.imports = make(map[string][]*Import, len(c.Modules)) + for name := range c.Modules { + c.imports[name] = c.Modules[name].Imports + c.Modules[name].Imports = nil + } +} + +func (c *Compiler) initLocalVarGen() { + c.localvargen = newLocalVarGeneratorForModuleSet(c.sorted, c.Modules) +} + +func (c *Compiler) rewriteComprehensionTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + _, _ = rewriteComprehensionTerms(f, mod) // ignore error + } +} + +func (c *Compiler) rewriteExprTerms() { + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + rewriteExprTermsInHead(c.localvargen, rule) + rule.Body = rewriteExprTermsInBody(c.localvargen, rule.Body) + return false + }) + } +} + +func (c *Compiler) rewriteRuleHeadRefs() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + WalkRules(c.Modules[name], func(rule *Rule) bool { + + ref := rule.Head.Ref() + // NOTE(sr): We're backfilling Refs here -- all parser code paths would have them, but + // it's possible to construct Module{} instances from Golang code, so we need + // to accommodate for that, too. + if len(rule.Head.Reference) == 0 { + rule.Head.Reference = ref + } + + cannotSpeakStringPrefixRefs := true + cannotSpeakGeneralRefs := true + for _, f := range c.capabilities.Features { + switch f { + case FeatureRefHeadStringPrefixes: + cannotSpeakStringPrefixRefs = false + case FeatureRefHeads: + cannotSpeakGeneralRefs = false + case FeatureRegoV1: + cannotSpeakStringPrefixRefs = false + cannotSpeakGeneralRefs = false + } + } + + if cannotSpeakStringPrefixRefs && cannotSpeakGeneralRefs && rule.Head.Name == "" { + c.err(NewError(CompileErr, rule.Loc(), "rule heads with refs are not supported: %v", rule.Head.Reference)) + return true + } + + for i := 1; i < len(ref); i++ { + if cannotSpeakGeneralRefs && (rule.Head.RuleKind() == MultiValue || i != len(ref)-1) { // last + if _, ok := ref[i].Value.(String); !ok { + c.err(NewError(TypeErr, rule.Loc(), "rule heads with general refs (containing variables) are not supported: %v", rule.Head.Reference)) + continue + } + } + + // Rewrite so that any non-scalar elements in the rule's ref are vars: + // p.q.r[y.z] { ... } => p.q.r[__local0__] { __local0__ = y.z } + // p.q[a.b][c.d] { ... } => p.q[__local0__] { __local0__ = a.b; __local1__ = c.d } + // because that's what the RuleTree knows how to deal with. + if _, ok := ref[i].Value.(Var); !ok && !IsScalar(ref[i].Value) { + expr := f.Generate(ref[i]) + if i == len(ref)-1 && rule.Head.Key.Equal(ref[i]) { + rule.Head.Key = expr.Operand(0) + } + rule.Head.Reference[i] = expr.Operand(0) + rule.Body.Append(expr) + } + } + + return true + }) + } +} + +func (c *Compiler) checkVoidCalls() { + for _, name := range c.sorted { + mod := c.Modules[name] + for _, err := range checkVoidCalls(c.TypeEnv, mod) { + c.err(err) + } + } +} + +func (c *Compiler) rewritePrintCalls() { + var modified bool + if !c.enablePrintStatements { + for _, name := range c.sorted { + if erasePrintCalls(c.Modules[name]) { + modified = true + } + } + } else { + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(r *Rule) bool { + safe := r.Head.Args.Vars() + safe.Update(ReservedVars) + vis := func(b Body) bool { + modrec, errs := rewritePrintCalls(c.localvargen, c.GetArity, safe, b) + if modrec { + modified = true + } + for _, err := range errs { + c.err(err) + } + return false + } + WalkBodies(r.Head, vis) + WalkBodies(r.Body, vis) + return false + }) + } + } + if modified { + c.Required.addBuiltinSorted(Print) + } +} + +// checkVoidCalls returns errors for any expressions that treat void function +// calls as values. The only void functions in Rego are specific built-ins like +// print(). +func checkVoidCalls(env *TypeEnv, x interface{}) Errors { + var errs Errors + WalkTerms(x, func(x *Term) bool { + if call, ok := x.Value.(Call); ok { + if tpe, ok := env.Get(call[0]).(*types.Function); ok && tpe.Result() == nil { + errs = append(errs, NewError(TypeErr, x.Loc(), "%v used as value", call)) + } + } + return false + }) + return errs +} + +// rewritePrintCalls will rewrite the body so that print operands are captured +// in local variables and their evaluation occurs within a comprehension. +// Wrapping the terms inside of a comprehension ensures that undefined values do +// not short-circuit evaluation. +// +// For example, given the following print statement: +// +// print("the value of x is:", input.x) +// +// The expression would be rewritten to: +// +// print({__local0__ | __local0__ = "the value of x is:"}, {__local1__ | __local1__ = input.x}) +func rewritePrintCalls(gen *localVarGenerator, getArity func(Ref) int, globals VarSet, body Body) (bool, Errors) { + + var errs Errors + var modified bool + + // Visit comprehension bodies recursively to ensure print statements inside + // those bodies only close over variables that are safe. + for i := range body { + if ContainsClosures(body[i]) { + safe := outputVarsForBody(body[:i], getArity, globals) + safe.Update(globals) + WalkClosures(body[i], func(x interface{}) bool { + var modrec bool + var errsrec Errors + switch x := x.(type) { + case *SetComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ArrayComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *ObjectComprehension: + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + case *Every: + safe.Update(x.KeyValueVars()) + modrec, errsrec = rewritePrintCalls(gen, getArity, safe, x.Body) + } + if modrec { + modified = true + } + errs = append(errs, errsrec...) + return true + }) + if len(errs) > 0 { + return false, errs + } + } + } + + for i := range body { + + if !isPrintCall(body[i]) { + continue + } + + modified = true + + var errs Errors + safe := outputVarsForBody(body[:i], getArity, globals) + safe.Update(globals) + args := body[i].Operands() + + for j := range args { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(args[j]) + unsafe := vis.Vars().Diff(safe) + for _, v := range unsafe.Sorted() { + errs = append(errs, NewError(CompileErr, args[j].Loc(), "var %v is undeclared", v)) + } + } + + if len(errs) > 0 { + return false, errs + } + + arr := NewArray() + + for j := range args { + x := NewTerm(gen.Generate()).SetLocation(args[j].Loc()) + capture := Equality.Expr(x, args[j]).SetLocation(args[j].Loc()) + arr = arr.Append(SetComprehensionTerm(x, NewBody(capture)).SetLocation(args[j].Loc())) + } + + body.Set(NewExpr([]*Term{ + NewTerm(InternalPrint.Ref()).SetLocation(body[i].Loc()), + NewTerm(arr).SetLocation(body[i].Loc()), + }).SetLocation(body[i].Loc()), i) + } + + return modified, nil +} + +func erasePrintCalls(node interface{}) bool { + var modified bool + NewGenericVisitor(func(x interface{}) bool { + var modrec bool + switch x := x.(type) { + case *Rule: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ArrayComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *SetComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *ObjectComprehension: + modrec, x.Body = erasePrintCallsInBody(x.Body) + case *Every: + modrec, x.Body = erasePrintCallsInBody(x.Body) + } + if modrec { + modified = true + } + return false + }).Walk(node) + return modified +} + +func erasePrintCallsInBody(x Body) (bool, Body) { + + if !containsPrintCall(x) { + return false, x + } + + var cpy Body + + for i := range x { + + // Recursively visit any comprehensions contained in this expression. + erasePrintCalls(x[i]) + + if !isPrintCall(x[i]) { + cpy.Append(x[i]) + } + } + + if len(cpy) == 0 { + term := BooleanTerm(true).SetLocation(x.Loc()) + expr := NewExpr(term).SetLocation(x.Loc()) + cpy.Append(expr) + } + + return true, cpy +} + +func containsPrintCall(x interface{}) bool { + var found bool + WalkExprs(x, func(expr *Expr) bool { + if !found { + if isPrintCall(expr) { + found = true + } + } + return found + }) + return found +} + +var printRef = Print.Ref() + +func isPrintCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(printRef) +} + +// rewriteRefsInHead will rewrite rules so that the head does not contain any +// terms that require evaluation (e.g., refs or comprehensions). If the key or +// value contains one or more of these terms, the key or value will be moved +// into the body and assigned to a new variable. The new variable will replace +// the key or value in the head. +// +// For instance, given the following rule: +// +// p[{"foo": data.foo[i]}] { i < 100 } +// +// The rule would be re-written as: +// +// p[__local0__] { i < 100; __local0__ = {"foo": data.foo[i]} } +func (c *Compiler) rewriteRefsInHead() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + if requiresEval(rule.Head.Key) { + expr := f.Generate(rule.Head.Key) + rule.Head.Key = expr.Operand(0) + rule.Body.Append(expr) + } + if requiresEval(rule.Head.Value) { + expr := f.Generate(rule.Head.Value) + rule.Head.Value = expr.Operand(0) + rule.Body.Append(expr) + } + for i := 0; i < len(rule.Head.Args); i++ { + if requiresEval(rule.Head.Args[i]) { + expr := f.Generate(rule.Head.Args[i]) + rule.Head.Args[i] = expr.Operand(0) + rule.Body.Append(expr) + } + } + return false + }) + } +} + +func (c *Compiler) rewriteEquals() { + modified := false + for _, name := range c.sorted { + mod := c.Modules[name] + modified = rewriteEquals(mod) || modified + } + if modified { + c.Required.addBuiltinSorted(Equal) + } +} + +func (c *Compiler) rewriteDynamicTerms() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + rule.Body = rewriteDynamics(f, rule.Body) + return false + }) + } +} + +// rewriteTestRuleEqualities rewrites equality expressions in test rule bodies to create local vars for statements that would otherwise +// not have their values captured through tracing, such as refs and comprehensions not unified/assigned to a local var. +// For example, given the following module: +// +// package test +// +// p.q contains v if { +// some v in numbers.range(1, 3) +// } +// +// p.r := "foo" +// +// test_rule { +// p == { +// "q": {4, 5, 6} +// } +// } +// +// `p` in `test_rule` resolves to `data.test.p`, which won't be an entry in the virtual-cache and must therefore be calculated after-the-fact. +// If `p` isn't captured in a local var, there is no trivial way to retrieve its value for test reporting. +func (c *Compiler) rewriteTestRuleEqualities() { + if !c.rewriteTestRulesForTracing { + return + } + + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + WalkRules(mod, func(rule *Rule) bool { + if strings.HasPrefix(string(rule.Head.Name), "test_") { + rule.Body = rewriteTestEqualities(f, rule.Body) + } + return false + }) + } +} + +func (c *Compiler) parseMetadataBlocks() { + // Only parse annotations if rego.metadata built-ins are called + regoMetadataCalled := false + for _, name := range c.sorted { + mod := c.Modules[name] + WalkExprs(mod, func(expr *Expr) bool { + if isRegoMetadataChainCall(expr) || isRegoMetadataRuleCall(expr) { + regoMetadataCalled = true + } + return regoMetadataCalled + }) + + if regoMetadataCalled { + break + } + } + + if regoMetadataCalled { + // NOTE: Possible optimization: only parse annotations for modules on the path of rego.metadata-calling module + for _, name := range c.sorted { + mod := c.Modules[name] + + if len(mod.Annotations) == 0 { + var errs Errors + mod.Annotations, errs = parseAnnotations(mod.Comments) + errs = append(errs, attachAnnotationsNodes(mod)...) + for _, err := range errs { + c.err(err) + } + + attachRuleAnnotations(mod) + } + } + } +} + +func (c *Compiler) rewriteRegoMetadataCalls() { + eqFactory := newEqualityFactory(c.localvargen) + + _, chainFuncAllowed := c.builtins[RegoMetadataChain.Name] + _, ruleFuncAllowed := c.builtins[RegoMetadataRule.Name] + + for _, name := range c.sorted { + mod := c.Modules[name] + + WalkRules(mod, func(rule *Rule) bool { + var firstChainCall *Expr + var firstRuleCall *Expr + + WalkExprs(rule, func(expr *Expr) bool { + if chainFuncAllowed && firstChainCall == nil && isRegoMetadataChainCall(expr) { + firstChainCall = expr + } else if ruleFuncAllowed && firstRuleCall == nil && isRegoMetadataRuleCall(expr) { + firstRuleCall = expr + } + return firstChainCall != nil && firstRuleCall != nil + }) + + chainCalled := firstChainCall != nil + ruleCalled := firstRuleCall != nil + + if chainCalled || ruleCalled { + body := make(Body, 0, len(rule.Body)+2) + + var metadataChainVar Var + if chainCalled { + // Create and inject metadata chain for rule + + chain, err := createMetadataChain(c.annotationSet.Chain(rule)) + if err != nil { + c.err(err) + return false + } + + chain.Location = firstChainCall.Location + eq := eqFactory.Generate(chain) + metadataChainVar = eq.Operands()[0].Value.(Var) + body.Append(eq) + } + + var metadataRuleVar Var + if ruleCalled { + // Create and inject metadata for rule + + var metadataRuleTerm *Term + + a := getPrimaryRuleAnnotations(c.annotationSet, rule) + if a != nil { + annotObj, err := a.toObject() + if err != nil { + c.err(err) + return false + } + metadataRuleTerm = NewTerm(*annotObj) + } else { + // If rule has no annotations, assign an empty object + metadataRuleTerm = ObjectTerm() + } + + metadataRuleTerm.Location = firstRuleCall.Location + eq := eqFactory.Generate(metadataRuleTerm) + metadataRuleVar = eq.Operands()[0].Value.(Var) + body.Append(eq) + } + + for _, expr := range rule.Body { + body.Append(expr) + } + rule.Body = body + + vis := func(b Body) bool { + for _, err := range rewriteRegoMetadataCalls(&metadataChainVar, &metadataRuleVar, b, &c.RewrittenVars) { + c.err(err) + } + return false + } + WalkBodies(rule.Head, vis) + WalkBodies(rule.Body, vis) + } + + return false + }) + } +} + +func getPrimaryRuleAnnotations(as *AnnotationSet, rule *Rule) *Annotations { + annots := as.GetRuleScope(rule) + + if len(annots) == 0 { + return nil + } + + // Sort by annotation location; chain must start with annotations declared closest to rule, then going outward + slices.SortStableFunc(annots, func(a, b *Annotations) int { + return -a.Location.Compare(b.Location) + }) + + return annots[0] +} + +func rewriteRegoMetadataCalls(metadataChainVar *Var, metadataRuleVar *Var, body Body, rewrittenVars *map[Var]Var) Errors { + var errs Errors + + WalkClosures(body, func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *SetComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *ObjectComprehension: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + case *Every: + errs = rewriteRegoMetadataCalls(metadataChainVar, metadataRuleVar, x.Body, rewrittenVars) + } + return true + }) + + for i := range body { + expr := body[i] + var metadataVar Var + + if metadataChainVar != nil && isRegoMetadataChainCall(expr) { + metadataVar = *metadataChainVar + } else if metadataRuleVar != nil && isRegoMetadataRuleCall(expr) { + metadataVar = *metadataRuleVar + } else { + continue + } + + // NOTE(johanfylling): An alternative strategy would be to walk the body and replace all operands[0] + // usages with *metadataChainVar + operands := expr.Operands() + var newExpr *Expr + if len(operands) > 0 { // There is an output var to rewrite + rewrittenVar := operands[0] + newExpr = Equality.Expr(rewrittenVar, NewTerm(metadataVar)) + } else { // No output var, just rewrite expr to metadataVar + newExpr = NewExpr(NewTerm(metadataVar)) + } + + newExpr.Generated = true + newExpr.Location = expr.Location + body.Set(newExpr, i) + } + + return errs +} + +var regoMetadataChainRef = RegoMetadataChain.Ref() +var regoMetadataRuleRef = RegoMetadataRule.Ref() + +func isRegoMetadataChainCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataChainRef) +} + +func isRegoMetadataRuleCall(x *Expr) bool { + return x.IsCall() && x.Operator().Equal(regoMetadataRuleRef) +} + +func createMetadataChain(chain []*AnnotationsRef) (*Term, *Error) { + + metaArray := NewArray() + for _, link := range chain { + p := link.Path.toArray(). + Slice(1, -1) // Dropping leading 'data' element of path + obj := NewObject( + Item(pathTerm, NewTerm(p)), + ) + if link.Annotations != nil { + annotObj, err := link.Annotations.toObject() + if err != nil { + return nil, err + } + obj.Insert(annotationsTerm, NewTerm(*annotObj)) + } + metaArray = metaArray.Append(NewTerm(obj)) + } + + return NewTerm(metaArray), nil +} + +func (c *Compiler) rewriteLocalVars() { + + var assignment bool + + for _, name := range c.sorted { + mod := c.Modules[name] + gen := c.localvargen + + WalkRules(mod, func(rule *Rule) bool { + argsStack := newLocalDeclaredVars() + + args := NewVarVisitor() + if c.strict { + args.Walk(rule.Head.Args) + } + unusedArgs := args.Vars() + + c.rewriteLocalArgVars(gen, argsStack, rule) + + // Rewrite local vars in each else-branch of the rule. + // Note: this is done instead of a walk so that we can capture any unused function arguments + // across else-branches. + for rule := rule; rule != nil; rule = rule.Else { + stack, errs := c.rewriteLocalVarsInRule(rule, unusedArgs, argsStack, gen) + if stack.assignment { + assignment = true + } + + for arg := range unusedArgs { + if stack.Count(arg) > 1 { + delete(unusedArgs, arg) + } + } + + for _, err := range errs { + c.err(err) + } + } + + if c.strict { + // Report an error for each unused function argument + for arg := range unusedArgs { + if !arg.IsWildcard() { + c.err(NewError(CompileErr, rule.Head.Location, "unused argument %v. (hint: use _ (wildcard variable) instead)", arg)) + } + } + } + + return true + }) + } + + if assignment { + c.Required.addBuiltinSorted(Assign) + } +} + +func (c *Compiler) rewriteLocalVarsInRule(rule *Rule, unusedArgs VarSet, argsStack *localDeclaredVars, gen *localVarGenerator) (*localDeclaredVars, Errors) { + // Rewrite assignments contained in head of rule. Assignments can + // occur in rule head if they're inside a comprehension. Note, + // assigned vars in comprehensions in the head will be rewritten + // first to preserve scoping rules. For example: + // + // p = [x | x := 1] { x := 2 } becomes p = [__local0__ | __local0__ = 1] { __local1__ = 2 } + // + // This behaviour is consistent scoping inside the body. For example: + // + // p = xs { x := 2; xs = [x | x := 1] } becomes p = xs { __local0__ = 2; xs = [__local1__ | __local1__ = 1] } + nestedXform := &rewriteNestedHeadVarLocalTransform{ + gen: gen, + RewrittenVars: c.RewrittenVars, + strict: c.strict, + } + + NewGenericVisitor(nestedXform.Visit).Walk(rule.Head) + + for _, err := range nestedXform.errs { + c.err(err) + } + + // Rewrite assignments in body. + used := NewVarSet() + + for _, t := range rule.Head.Ref()[1:] { + used.Update(t.Vars()) + } + + if rule.Head.Key != nil { + used.Update(rule.Head.Key.Vars()) + } + + if rule.Head.Value != nil { + valueVars := rule.Head.Value.Vars() + used.Update(valueVars) + for arg := range unusedArgs { + if valueVars.Contains(arg) { + delete(unusedArgs, arg) + } + } + } + + stack := argsStack.Copy() + + body, declared, errs := rewriteLocalVars(gen, stack, used, rule.Body, c.strict) + + // For rewritten vars use the collection of all variables that + // were in the stack at some point in time. + maps.Copy(c.RewrittenVars, stack.rewritten) + + rule.Body = body + + // Rewrite vars in head that refer to locally declared vars in the body. + localXform := rewriteHeadVarLocalTransform{declared: declared} + + for i := range rule.Head.Args { + rule.Head.Args[i], _ = transformTerm(localXform, rule.Head.Args[i]) + } + + for i := 1; i < len(rule.Head.Ref()); i++ { + rule.Head.Reference[i], _ = transformTerm(localXform, rule.Head.Ref()[i]) + } + if rule.Head.Key != nil { + rule.Head.Key, _ = transformTerm(localXform, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value, _ = transformTerm(localXform, rule.Head.Value) + } + return stack, errs +} + +type rewriteNestedHeadVarLocalTransform struct { + gen *localVarGenerator + errs Errors + RewrittenVars map[Var]Var + strict bool +} + +func (xform *rewriteNestedHeadVarLocalTransform) Visit(x interface{}) bool { + + if term, ok := x.(*Term); ok { + + stop := false + stack := newLocalDeclaredVars() + + switch x := term.Value.(type) { + case *object: + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + NewGenericVisitor(xform.Visit).Walk(kcpy) + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + stop = true + case *set: + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + stop = true + case *ArrayComprehension: + xform.errs = rewriteDeclaredVarsInArrayComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *SetComprehension: + xform.errs = rewriteDeclaredVarsInSetComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + case *ObjectComprehension: + xform.errs = rewriteDeclaredVarsInObjectComprehension(xform.gen, stack, x, xform.errs, xform.strict) + stop = true + } + + maps.Copy(xform.RewrittenVars, stack.rewritten) + + return stop + } + + return false +} + +type rewriteHeadVarLocalTransform struct { + declared map[Var]Var +} + +func (xform rewriteHeadVarLocalTransform) Transform(x interface{}) (interface{}, error) { + if v, ok := x.(Var); ok { + if gv, ok := xform.declared[v]; ok { + return gv, nil + } + } + return x, nil +} + +func (c *Compiler) rewriteLocalArgVars(gen *localVarGenerator, stack *localDeclaredVars, rule *Rule) { + + vis := &ruleArgLocalRewriter{ + stack: stack, + gen: gen, + } + + for i := range rule.Head.Args { + Walk(vis, rule.Head.Args[i]) + } + + for i := range vis.errs { + c.err(vis.errs[i]) + } +} + +type ruleArgLocalRewriter struct { + stack *localDeclaredVars + gen *localVarGenerator + errs []*Error +} + +func (vis *ruleArgLocalRewriter) Visit(x interface{}) Visitor { + + t, ok := x.(*Term) + if !ok { + return vis + } + + switch v := t.Value.(type) { + case Var: + gv, ok := vis.stack.Declared(v) + if ok { + vis.stack.Seen(v) + } else { + gv = vis.gen.Generate() + vis.stack.Insert(v, gv, argVar) + } + t.Value = gv + return nil + case *object: + if cpy, err := v.Map(func(k, v *Term) (*Term, *Term, error) { + vcpy := v.Copy() + Walk(vis, vcpy) + return k, vcpy, nil + }); err != nil { + vis.errs = append(vis.errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = cpy + } + return nil + case Null, Boolean, Number, String, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Set: + // Scalars are no-ops. Comprehensions are handled above. Sets must not + // contain variables. + return nil + case Call: + vis.errs = append(vis.errs, NewError(CompileErr, t.Location, "rule arguments cannot contain calls")) + return nil + default: + // Recurse on refs and arrays. Any embedded + // variables can be rewritten. + return vis + } +} + +func (c *Compiler) rewriteWithModifiers() { + f := newEqualityFactory(c.localvargen) + for _, name := range c.sorted { + mod := c.Modules[name] + t := NewGenericTransformer(func(x interface{}) (interface{}, error) { + body, ok := x.(Body) + if !ok { + return x, nil + } + body, err := rewriteWithModifiersInBody(c, c.unsafeBuiltinsMap, f, body) + if err != nil { + c.err(err) + } + + return body, nil + }) + _, _ = Transform(t, mod) // ignore error + } +} + +func (c *Compiler) setModuleTree() { + c.ModuleTree = NewModuleTree(c.Modules) +} + +func (c *Compiler) setRuleTree() { + c.RuleTree = NewRuleTree(c.ModuleTree) +} + +func (c *Compiler) setGraph() { + list := func(r Ref) []*Rule { + return c.GetRulesDynamicWithOpts(r, RulesOptions{IncludeHiddenModules: true}) + } + c.Graph = NewGraph(c.Modules, list) +} + +type queryCompiler struct { + compiler *Compiler + qctx *QueryContext + typeEnv *TypeEnv + rewritten map[Var]Var + after map[string][]QueryCompilerStageDefinition + unsafeBuiltins map[string]struct{} + comprehensionIndices map[*Term]*ComprehensionIndex + enablePrintStatements bool +} + +func newQueryCompiler(compiler *Compiler) QueryCompiler { + qc := &queryCompiler{ + compiler: compiler, + qctx: nil, + after: map[string][]QueryCompilerStageDefinition{}, + comprehensionIndices: map[*Term]*ComprehensionIndex{}, + } + return qc +} + +func (qc *queryCompiler) WithStrict(strict bool) QueryCompiler { + qc.compiler.WithStrict(strict) + return qc +} + +func (qc *queryCompiler) WithEnablePrintStatements(yes bool) QueryCompiler { + qc.enablePrintStatements = yes + return qc +} + +func (qc *queryCompiler) WithContext(qctx *QueryContext) QueryCompiler { + qc.qctx = qctx + return qc +} + +func (qc *queryCompiler) WithStageAfter(after string, stage QueryCompilerStageDefinition) QueryCompiler { + qc.after[after] = append(qc.after[after], stage) + return qc +} + +func (qc *queryCompiler) WithUnsafeBuiltins(unsafe map[string]struct{}) QueryCompiler { + qc.unsafeBuiltins = unsafe + return qc +} + +func (qc *queryCompiler) RewrittenVars() map[Var]Var { + return qc.rewritten +} + +func (qc *queryCompiler) ComprehensionIndex(term *Term) *ComprehensionIndex { + if result, ok := qc.comprehensionIndices[term]; ok { + return result + } else if result, ok := qc.compiler.comprehensionIndices[term]; ok { + return result + } + return nil +} + +func (qc *queryCompiler) runStage(metricName string, qctx *QueryContext, query Body, s func(*QueryContext, Body) (Body, error)) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qctx, query) +} + +func (qc *queryCompiler) runStageAfter(metricName string, query Body, s QueryCompilerStage) (Body, error) { + if qc.compiler.metrics != nil { + qc.compiler.metrics.Timer(metricName).Start() + defer qc.compiler.metrics.Timer(metricName).Stop() + } + return s(qc, query) +} + +type queryStage = struct { + name string + metricName string + f func(*QueryContext, Body) (Body, error) +} + +func (qc *queryCompiler) Compile(query Body) (Body, error) { + if len(query) == 0 { + return nil, Errors{NewError(CompileErr, nil, "empty query cannot be compiled")} + } + + query = query.Copy() + + stages := []queryStage{ + {"CheckKeywordOverrides", "query_compile_stage_check_keyword_overrides", qc.checkKeywordOverrides}, + {"ResolveRefs", "query_compile_stage_resolve_refs", qc.resolveRefs}, + {"RewriteLocalVars", "query_compile_stage_rewrite_local_vars", qc.rewriteLocalVars}, + {"CheckVoidCalls", "query_compile_stage_check_void_calls", qc.checkVoidCalls}, + {"RewritePrintCalls", "query_compile_stage_rewrite_print_calls", qc.rewritePrintCalls}, + {"RewriteExprTerms", "query_compile_stage_rewrite_expr_terms", qc.rewriteExprTerms}, + {"RewriteComprehensionTerms", "query_compile_stage_rewrite_comprehension_terms", qc.rewriteComprehensionTerms}, + {"RewriteWithValues", "query_compile_stage_rewrite_with_values", qc.rewriteWithModifiers}, + {"CheckUndefinedFuncs", "query_compile_stage_check_undefined_funcs", qc.checkUndefinedFuncs}, + {"CheckSafety", "query_compile_stage_check_safety", qc.checkSafety}, + {"RewriteDynamicTerms", "query_compile_stage_rewrite_dynamic_terms", qc.rewriteDynamicTerms}, + {"CheckTypes", "query_compile_stage_check_types", qc.checkTypes}, + {"CheckUnsafeBuiltins", "query_compile_stage_check_unsafe_builtins", qc.checkUnsafeBuiltins}, + {"CheckDeprecatedBuiltins", "query_compile_stage_check_deprecated_builtins", qc.checkDeprecatedBuiltins}, + } + if qc.compiler.evalMode == EvalModeTopdown { + stages = append(stages, queryStage{"BuildComprehensionIndex", "query_compile_stage_build_comprehension_index", qc.buildComprehensionIndices}) + } + + qctx := qc.qctx.Copy() + + for _, s := range stages { + var err error + query, err = qc.runStage(s.metricName, qctx, query, s.f) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + for _, s := range qc.after[s.name] { + query, err = qc.runStageAfter(s.MetricName, query, s.Stage) + if err != nil { + return nil, qc.applyErrorLimit(err) + } + } + } + + return query, nil +} + +func (qc *queryCompiler) TypeEnv() *TypeEnv { + return qc.typeEnv +} + +func (qc *queryCompiler) applyErrorLimit(err error) error { + var errs Errors + if errors.As(err, &errs) { + if qc.compiler.maxErrs > 0 && len(errs) > qc.compiler.maxErrs { + err = append(errs[:qc.compiler.maxErrs], errLimitReached) + } + } + return err +} + +func (qc *queryCompiler) checkKeywordOverrides(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + if errs := checkRootDocumentOverrides(body); len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) resolveRefs(qctx *QueryContext, body Body) (Body, error) { + + var globals map[Var]*usedRef + + if qctx != nil { + pkg := qctx.Package + // Query compiler ought to generate a package if one was not provided and one or more imports were provided. + // The generated package name could even be an empty string to avoid conflicts (it doesn't have to be valid syntactically) + if pkg == nil && len(qctx.Imports) > 0 { + pkg = &Package{Path: RefTerm(VarTerm("")).Value.(Ref)} + } + if pkg != nil { + var ruleExports []Ref + rules := qc.compiler.getExports() + if exist, ok := rules.Get(pkg.Path); ok { + ruleExports = exist.([]Ref) + } + + globals = getGlobals(qctx.Package, ruleExports, qctx.Imports) + qctx.Imports = nil + } + } + + ignore := &declaredVarStack{declaredVars(body)} + + return resolveRefsInBody(globals, ignore, body), nil +} + +func (qc *queryCompiler) rewriteComprehensionTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + node, err := rewriteComprehensionTerms(f, body) + if err != nil { + return nil, err + } + return node.(Body), nil +} + +func (qc *queryCompiler) rewriteDynamicTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + f := newEqualityFactory(gen) + return rewriteDynamics(f, body), nil +} + +func (qc *queryCompiler) rewriteExprTerms(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + return rewriteExprTermsInBody(gen, body), nil +} + +func (qc *queryCompiler) rewriteLocalVars(_ *QueryContext, body Body) (Body, error) { + gen := newLocalVarGenerator("q", body) + stack := newLocalDeclaredVars() + body, _, err := rewriteLocalVars(gen, stack, nil, body, qc.compiler.strict) + if len(err) != 0 { + return nil, err + } + + // The vars returned during the rewrite will include all seen vars, + // even if they're not declared with an assignment operation. We don't + // want to include these inside the rewritten set though. + qc.rewritten = maps.Clone(stack.rewritten) + + return body, nil +} + +func (qc *queryCompiler) rewritePrintCalls(_ *QueryContext, body Body) (Body, error) { + if !qc.enablePrintStatements { + _, cpy := erasePrintCallsInBody(body) + return cpy, nil + } + gen := newLocalVarGenerator("q", body) + if _, errs := rewritePrintCalls(gen, qc.compiler.GetArity, ReservedVars, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkVoidCalls(_ *QueryContext, body Body) (Body, error) { + if errs := checkVoidCalls(qc.compiler.TypeEnv, body); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkUndefinedFuncs(_ *QueryContext, body Body) (Body, error) { + if errs := checkUndefinedFuncs(qc.compiler.TypeEnv, body, qc.compiler.GetArity, qc.rewritten); len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) checkSafety(_ *QueryContext, body Body) (Body, error) { + safe := ReservedVars.Copy() + reordered, unsafe := reorderBodyForSafety(qc.compiler.builtins, qc.compiler.GetArity, safe, body) + if errs := safetyErrorSlice(unsafe, qc.RewrittenVars()); len(errs) > 0 { + return nil, errs + } + return reordered, nil +} + +func (qc *queryCompiler) checkTypes(_ *QueryContext, body Body) (Body, error) { + var errs Errors + checker := newTypeChecker(). + WithSchemaSet(qc.compiler.schemaSet). + WithInputType(qc.compiler.inputType). + WithVarRewriter(rewriteVarsInRef(qc.rewritten, qc.compiler.RewrittenVars)) + qc.typeEnv, errs = checker.CheckBody(qc.compiler.TypeEnv, body) + if len(errs) > 0 { + return nil, errs + } + + return body, nil +} + +func (qc *queryCompiler) checkUnsafeBuiltins(_ *QueryContext, body Body) (Body, error) { + errs := checkUnsafeBuiltins(qc.unsafeBuiltinsMap(), body) + if len(errs) > 0 { + return nil, errs + } + return body, nil +} + +func (qc *queryCompiler) unsafeBuiltinsMap() map[string]struct{} { + if qc.unsafeBuiltins != nil { + return qc.unsafeBuiltins + } + return qc.compiler.unsafeBuiltinsMap +} + +func (qc *queryCompiler) checkDeprecatedBuiltins(_ *QueryContext, body Body) (Body, error) { + if qc.compiler.strict { + errs := checkDeprecatedBuiltins(qc.compiler.deprecatedBuiltinsMap, body) + if len(errs) > 0 { + return nil, errs + } + } + return body, nil +} + +func (qc *queryCompiler) rewriteWithModifiers(_ *QueryContext, body Body) (Body, error) { + f := newEqualityFactory(newLocalVarGenerator("q", body)) + body, err := rewriteWithModifiersInBody(qc.compiler, qc.unsafeBuiltinsMap(), f, body) + if err != nil { + return nil, Errors{err} + } + return body, nil +} + +func (qc *queryCompiler) buildComprehensionIndices(_ *QueryContext, body Body) (Body, error) { + // NOTE(tsandall): The query compiler does not have a metrics object so we + // cannot record index metrics currently. + _ = buildComprehensionIndices(qc.compiler.debug, qc.compiler.GetArity, ReservedVars, qc.RewrittenVars(), body, qc.comprehensionIndices) + return body, nil +} + +// ComprehensionIndex specifies how the comprehension term can be indexed. The keys +// tell the evaluator what variables to use for indexing. In the future, the index +// could be expanded with more information that would allow the evaluator to index +// a larger fragment of comprehensions (e.g., by closing over variables in the outer +// query.) +type ComprehensionIndex struct { + Term *Term + Keys []*Term +} + +func (ci *ComprehensionIndex) String() string { + if ci == nil { + return "" + } + return fmt.Sprintf("", NewArray(ci.Keys...)) +} + +func buildComprehensionIndices(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, node interface{}, result map[*Term]*ComprehensionIndex) uint64 { + var n uint64 + cpy := candidates.Copy() + WalkBodies(node, func(b Body) bool { + for _, expr := range b { + index := getComprehensionIndex(dbg, arity, cpy, rwVars, expr) + if index != nil { + result[index.Term] = index + n++ + } + // Any variables appearing in the expressions leading up to the comprehension + // are fair-game to be used as index keys. + cpy.Update(expr.Vars(VarVisitorParams{SkipClosures: true, SkipRefCallHead: true})) + } + return false + }) + return n +} + +func getComprehensionIndex(dbg debug.Debug, arity func(Ref) int, candidates VarSet, rwVars map[Var]Var, expr *Expr) *ComprehensionIndex { + + // Ignore everything except = expressions. Extract + // the comprehension term from the expression. + if !expr.IsEquality() || expr.Negated || len(expr.With) > 0 { + // No debug message, these are assumed to be known hinderances + // to comprehension indexing. + return nil + } + + var term *Term + + lhs, rhs := expr.Operand(0), expr.Operand(1) + + if _, ok := lhs.Value.(Var); ok && IsComprehension(rhs.Value) { + term = rhs + } else if _, ok := rhs.Value.(Var); ok && IsComprehension(lhs.Value) { + term = lhs + } + + if term == nil { + // no debug for this, it's the ordinary "nothing to do here" case + return nil + } + + // Ignore comprehensions that contain expressions that close over variables + // in the outer body if those variables are not also output variables in the + // comprehension body. In other words, ignore comprehensions that we cannot + // safely evaluate without bindings from the outer body. For example: + // + // x = [1] + // [true | data.y[z] = x] # safe to evaluate w/o outer body + // [true | data.y[z] = x[0]] # NOT safe to evaluate because 'x' would be unsafe. + // + // By identifying output variables in the body we also know what to index on by + // intersecting with candidate variables from the outer query. + // + // For example: + // + // x = data.foo[_] + // _ = [y | data.bar[y] = x] # index on 'x' + // + // This query goes from O(data.foo*data.bar) to O(data.foo+data.bar). + var body Body + + switch x := term.Value.(type) { + case *ArrayComprehension: + body = x.Body + case *SetComprehension: + body = x.Body + case *ObjectComprehension: + body = x.Body + } + + outputs := outputVarsForBody(body, arity, ReservedVars) + unsafe := body.Vars(SafetyCheckVisitorParams).Diff(outputs).Diff(ReservedVars) + + if len(unsafe) > 0 { + dbg.Printf("%s: comprehension index: unsafe vars: %v", expr.Location, unsafe) + return nil + } + + // Similarly, ignore comprehensions that contain references with output variables + // that intersect with the candidates. Indexing these comprehensions could worsen + // performance. + regressionVis := newComprehensionIndexRegressionCheckVisitor(candidates) + regressionVis.Walk(body) + if regressionVis.worse { + dbg.Printf("%s: comprehension index: output vars intersect candidates", expr.Location) + return nil + } + + // Check if any nested comprehensions close over candidates. If any intersection is found + // the comprehension cannot be cached because it would require closing over the candidates + // which the evaluator does not support today. + nestedVis := newComprehensionIndexNestedCandidateVisitor(candidates) + nestedVis.Walk(body) + if nestedVis.found { + dbg.Printf("%s: comprehension index: nested comprehensions close over candidates", expr.Location) + return nil + } + + // Make a sorted set of variable names that will serve as the index key set. + // Sort to ensure deterministic indexing. In future this could be relaxed + // if we can decide that one ordering is better than another. If the set is + // empty, there is no indexing to do. + indexVars := candidates.Intersect(outputs) + if len(indexVars) == 0 { + dbg.Printf("%s: comprehension index: no index vars", expr.Location) + return nil + } + + result := make([]*Term, 0, len(indexVars)) + + for v := range indexVars { + result = append(result, NewTerm(v)) + } + + slices.SortFunc(result, TermValueCompare) + + debugRes := make([]*Term, len(result)) + for i, r := range result { + if o, ok := rwVars[r.Value.(Var)]; ok { + debugRes[i] = NewTerm(o) + } else { + debugRes[i] = r + } + } + dbg.Printf("%s: comprehension index: built with keys: %v", expr.Location, debugRes) + return &ComprehensionIndex{Term: term, Keys: result} +} + +type comprehensionIndexRegressionCheckVisitor struct { + candidates VarSet + seen VarSet + worse bool +} + +// TODO(tsandall): Improve this so that users can either supply this list explicitly +// or the information is maintained on the built-in function declaration. What we really +// need to know is whether the built-in function allows callers to push down output +// values or not. It's unlikely that anything outside of OPA does this today so this +// solution is fine for now. +var comprehensionIndexBlacklist = map[string]int{ + WalkBuiltin.Name: len(WalkBuiltin.Decl.FuncArgs().Args), +} + +func newComprehensionIndexRegressionCheckVisitor(candidates VarSet) *comprehensionIndexRegressionCheckVisitor { + return &comprehensionIndexRegressionCheckVisitor{ + candidates: candidates, + seen: NewVarSet(), + } +} + +func (vis *comprehensionIndexRegressionCheckVisitor) Walk(x interface{}) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexRegressionCheckVisitor) visit(x interface{}) bool { + if !vis.worse { + switch x := x.(type) { + case *Expr: + operands := x.Operands() + if pos := comprehensionIndexBlacklist[x.Operator().String()]; pos > 0 && pos < len(operands) { + vis.assertEmptyIntersection(operands[pos].Vars()) + } + case Ref: + vis.assertEmptyIntersection(x.OutputVars()) + case Var: + vis.seen.Add(x) + // Always skip comprehensions. We do not have to visit their bodies here. + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + } + return vis.worse +} + +func (vis *comprehensionIndexRegressionCheckVisitor) assertEmptyIntersection(vs VarSet) { + for v := range vs { + if vis.candidates.Contains(v) && !vis.seen.Contains(v) { + vis.worse = true + return + } + } +} + +type comprehensionIndexNestedCandidateVisitor struct { + candidates VarSet + found bool +} + +func newComprehensionIndexNestedCandidateVisitor(candidates VarSet) *comprehensionIndexNestedCandidateVisitor { + return &comprehensionIndexNestedCandidateVisitor{ + candidates: candidates, + } +} + +func (vis *comprehensionIndexNestedCandidateVisitor) Walk(x interface{}) { + NewGenericVisitor(vis.visit).Walk(x) +} + +func (vis *comprehensionIndexNestedCandidateVisitor) visit(x interface{}) bool { + + if vis.found { + return true + } + + if v, ok := x.(Value); ok && IsComprehension(v) { + varVis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + varVis.Walk(v) + vis.found = len(varVis.Vars().Intersect(vis.candidates)) > 0 + return true + } + + return false +} + +// ModuleTreeNode represents a node in the module tree. The module +// tree is keyed by the package path. +type ModuleTreeNode struct { + Key Value + Modules []*Module + Children map[Value]*ModuleTreeNode + Hide bool +} + +func (n *ModuleTreeNode) String() string { + var rules []string + for _, m := range n.Modules { + for _, r := range m.Rules { + rules = append(rules, r.Head.String()) + } + } + return fmt.Sprintf("", n.Key, n.Children, rules, n.Hide) +} + +// NewModuleTree returns a new ModuleTreeNode that represents the root +// of the module tree populated with the given modules. +func NewModuleTree(mods map[string]*Module) *ModuleTreeNode { + root := &ModuleTreeNode{ + Children: map[Value]*ModuleTreeNode{}, + } + for _, name := range util.KeysSorted(mods) { + m := mods[name] + node := root + for i, x := range m.Package.Path { + c, ok := node.Children[x.Value] + if !ok { + var hide bool + if i == 1 && x.Value.Compare(SystemDocumentKey) == 0 { + hide = true + } + c = &ModuleTreeNode{ + Key: x.Value, + Children: map[Value]*ModuleTreeNode{}, + Hide: hide, + } + node.Children[x.Value] = c + } + node = c + } + node.Modules = append(node.Modules, m) + } + return root +} + +// Size returns the number of modules in the tree. +func (n *ModuleTreeNode) Size() int { + s := len(n.Modules) + for _, c := range n.Children { + s += c.Size() + } + return s +} + +// Child returns n's child with key k. +func (n *ModuleTreeNode) child(k Value) *ModuleTreeNode { + switch k.(type) { + case String, Var: + return n.Children[k] + } + return nil +} + +// Find dereferences ref along the tree. ref[0] is converted to a String +// for convenience. +func (n *ModuleTreeNode) find(ref Ref) (*ModuleTreeNode, Ref) { + if v, ok := ref[0].Value.(Var); ok { + ref = Ref{StringTerm(string(v))}.Concat(ref[1:]) + } + node := n + for i, r := range ref { + next := node.child(r.Value) + if next == nil { + tail := make(Ref, len(ref)-i) + tail[0] = VarTerm(string(ref[i].Value.(String))) + copy(tail[1:], ref[i+1:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the module tree rooted at n. +// If f returns true, traversal will not continue to the children of n. +func (n *ModuleTreeNode) DepthFirst(f func(*ModuleTreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +// TreeNode represents a node in the rule tree. The rule tree is keyed by +// rule path. +type TreeNode struct { + Key Value + Values []any + Children map[Value]*TreeNode + Sorted []Value + Hide bool +} + +func (n *TreeNode) String() string { + return fmt.Sprintf("", n.Key, n.Values, n.Sorted, n.Hide) +} + +// NewRuleTree returns a new TreeNode that represents the root +// of the rule tree populated with the given rules. +func NewRuleTree(mtree *ModuleTreeNode) *TreeNode { + root := TreeNode{ + Key: mtree.Key, + } + + mtree.DepthFirst(func(m *ModuleTreeNode) bool { + for _, mod := range m.Modules { + if len(mod.Rules) == 0 { + root.add(mod.Package.Path, nil) + } + for _, rule := range mod.Rules { + root.add(rule.Ref().GroundPrefix(), rule) + } + } + return false + }) + + // ensure that data.system's TreeNode is hidden + node, tail := root.find(DefaultRootRef.Append(NewTerm(SystemDocumentKey))) + if len(tail) == 0 { // found + node.Hide = true + } + + root.DepthFirst(func(x *TreeNode) bool { + x.sort() + return false + }) + + return &root +} + +func (n *TreeNode) add(path Ref, rule *Rule) { + node, tail := n.find(path) + if len(tail) > 0 { + sub := treeNodeFromRef(tail, rule) + if node.Children == nil { + node.Children = make(map[Value]*TreeNode, 1) + } + node.Children[sub.Key] = sub + node.Sorted = append(node.Sorted, sub.Key) + } else { + if rule != nil { + node.Values = append(node.Values, rule) + } + } +} + +// Size returns the number of rules in the tree. +func (n *TreeNode) Size() int { + s := len(n.Values) + for _, c := range n.Children { + s += c.Size() + } + return s +} + +// Child returns n's child with key k. +func (n *TreeNode) Child(k Value) *TreeNode { + switch k.(type) { + case Ref, Call: + return nil + default: + return n.Children[k] + } +} + +// Find dereferences ref along the tree +func (n *TreeNode) Find(ref Ref) *TreeNode { + node := n + for _, r := range ref { + node = node.Child(r.Value) + if node == nil { + return nil + } + } + return node +} + +// Iteratively dereferences ref along the node's subtree. +// - If matching fails immediately, the tail will contain the full ref. +// - Partial matching will result in a tail of non-zero length. +// - A complete match will result in a 0 length tail. +func (n *TreeNode) find(ref Ref) (*TreeNode, Ref) { + node := n + for i := range ref { + next := node.Child(ref[i].Value) + if next == nil { + tail := make(Ref, len(ref)-i) + copy(tail, ref[i:]) + return node, tail + } + node = next + } + return node, nil +} + +// DepthFirst performs a depth-first traversal of the rule tree rooted at n. If +// f returns true, traversal will not continue to the children of n. +func (n *TreeNode) DepthFirst(f func(*TreeNode) bool) { + if f(n) { + return + } + for _, node := range n.Children { + node.DepthFirst(f) + } +} + +func (n *TreeNode) sort() { + slices.SortFunc(n.Sorted, Value.Compare) +} + +func treeNodeFromRef(ref Ref, rule *Rule) *TreeNode { + depth := len(ref) - 1 + key := ref[depth].Value + node := &TreeNode{ + Key: key, + Children: nil, + } + if rule != nil { + node.Values = []any{rule} + } + + for i := len(ref) - 2; i >= 0; i-- { + key := ref[i].Value + node = &TreeNode{ + Key: key, + Children: map[Value]*TreeNode{ref[i+1].Value: node}, + Sorted: []Value{ref[i+1].Value}, + } + } + return node +} + +// flattenChildren flattens all children's rule refs into a sorted array. +func (n *TreeNode) flattenChildren() []Ref { + ret := newRefSet() + for _, sub := range n.Children { // we only want the children, so don't use n.DepthFirst() right away + sub.DepthFirst(func(x *TreeNode) bool { + for _, r := range x.Values { + rule := r.(*Rule) + ret.AddPrefix(rule.Ref()) + } + return false + }) + } + + slices.SortFunc(ret.s, RefCompare) + return ret.s +} + +// Graph represents the graph of dependencies between rules. +type Graph struct { + adj map[util.T]map[util.T]struct{} + radj map[util.T]map[util.T]struct{} + nodes map[util.T]struct{} + sorted []util.T +} + +// NewGraph returns a new Graph based on modules. The list function must return +// the rules referred to directly by the ref. +func NewGraph(modules map[string]*Module, list func(Ref) []*Rule) *Graph { + + graph := &Graph{ + adj: map[util.T]map[util.T]struct{}{}, + radj: map[util.T]map[util.T]struct{}{}, + nodes: map[util.T]struct{}{}, + sorted: nil, + } + + // Create visitor to walk a rule AST and add edges to the rule graph for + // each dependency. + vis := func(a *Rule) *GenericVisitor { + stop := false + return NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case Ref: + for _, b := range list(x) { + for node := b; node != nil; node = node.Else { + graph.addDependency(a, node) + } + } + case *Rule: + if stop { + // Do not recurse into else clauses (which will be handled + // by the outer visitor.) + return true + } + stop = true + } + return false + }) + } + + // Walk over all rules, add them to graph, and build adjacency lists. + for _, module := range modules { + WalkRules(module, func(a *Rule) bool { + graph.addNode(a) + vis(a).Walk(a) + return false + }) + } + + return graph +} + +// Dependencies returns the set of rules that x depends on. +func (g *Graph) Dependencies(x util.T) map[util.T]struct{} { + return g.adj[x] +} + +// Dependents returns the set of rules that depend on x. +func (g *Graph) Dependents(x util.T) map[util.T]struct{} { + return g.radj[x] +} + +// Sort returns a slice of rules sorted by dependencies. If a cycle is found, +// ok is set to false. +func (g *Graph) Sort() (sorted []util.T, ok bool) { + if g.sorted != nil { + return g.sorted, true + } + + sorter := &graphSort{ + sorted: make([]util.T, 0, len(g.nodes)), + deps: g.Dependencies, + marked: map[util.T]struct{}{}, + temp: map[util.T]struct{}{}, + } + + for node := range g.nodes { + if !sorter.Visit(node) { + return nil, false + } + } + + g.sorted = sorter.sorted + return g.sorted, true +} + +func (g *Graph) addDependency(u util.T, v util.T) { + + if _, ok := g.nodes[u]; !ok { + g.addNode(u) + } + + if _, ok := g.nodes[v]; !ok { + g.addNode(v) + } + + edges, ok := g.adj[u] + if !ok { + edges = map[util.T]struct{}{} + g.adj[u] = edges + } + + edges[v] = struct{}{} + + edges, ok = g.radj[v] + if !ok { + edges = map[util.T]struct{}{} + g.radj[v] = edges + } + + edges[u] = struct{}{} +} + +func (g *Graph) addNode(n util.T) { + g.nodes[n] = struct{}{} +} + +type graphSort struct { + sorted []util.T + deps func(util.T) map[util.T]struct{} + marked map[util.T]struct{} + temp map[util.T]struct{} +} + +func (sort *graphSort) Marked(node util.T) bool { + _, marked := sort.marked[node] + return marked +} + +func (sort *graphSort) Visit(node util.T) (ok bool) { + if _, ok := sort.temp[node]; ok { + return false + } + if sort.Marked(node) { + return true + } + sort.temp[node] = struct{}{} + for other := range sort.deps(node) { + if !sort.Visit(other) { + return false + } + } + sort.marked[node] = struct{}{} + delete(sort.temp, node) + sort.sorted = append(sort.sorted, node) + return true +} + +// GraphTraversal is a Traversal that understands the dependency graph +type GraphTraversal struct { + graph *Graph + visited map[util.T]struct{} +} + +// NewGraphTraversal returns a Traversal for the dependency graph +func NewGraphTraversal(graph *Graph) *GraphTraversal { + return &GraphTraversal{ + graph: graph, + visited: map[util.T]struct{}{}, + } +} + +// Edges lists all dependency connections for a given node +func (g *GraphTraversal) Edges(x util.T) []util.T { + r := []util.T{} + for v := range g.graph.Dependencies(x) { + r = append(r, v) + } + return r +} + +// Visited returns whether a node has been visited, setting a node to visited if not +func (g *GraphTraversal) Visited(u util.T) bool { + _, ok := g.visited[u] + g.visited[u] = struct{}{} + return ok +} + +type unsafePair struct { + Expr *Expr + Vars VarSet +} + +type unsafeVarLoc struct { + Var Var + Loc *Location +} + +type unsafeVars map[*Expr]VarSet + +func (vs unsafeVars) Add(e *Expr, v Var) { + if u, ok := vs[e]; ok { + u[v] = struct{}{} + } else { + vs[e] = VarSet{v: struct{}{}} + } +} + +func (vs unsafeVars) Set(e *Expr, s VarSet) { + vs[e] = s +} + +func (vs unsafeVars) Update(o unsafeVars) { + for k, v := range o { + if _, ok := vs[k]; !ok { + vs[k] = VarSet{} + } + vs[k].Update(v) + } +} + +func (vs unsafeVars) Vars() (result []unsafeVarLoc) { + + locs := map[Var]*Location{} + + // If var appears in multiple sets then pick first by location. + for expr, vars := range vs { + for v := range vars { + if locs[v].Compare(expr.Location) > 0 { + locs[v] = expr.Location + } + } + } + + for v, loc := range locs { + result = append(result, unsafeVarLoc{ + Var: v, + Loc: loc, + }) + } + + slices.SortFunc(result, func(a, b unsafeVarLoc) int { + return a.Loc.Compare(b.Loc) + }) + + return result +} + +func (vs unsafeVars) Slice() (result []unsafePair) { + for expr, vs := range vs { + result = append(result, unsafePair{ + Expr: expr, + Vars: vs, + }) + } + return +} + +// reorderBodyForSafety returns a copy of the body ordered such that +// left to right evaluation of the body will not encounter unbound variables +// in input positions or negated expressions. +// +// Expressions are added to the re-ordered body as soon as they are considered +// safe. If multiple expressions become safe in the same pass, they are added +// in their original order. This results in minimal re-ordering of the body. +// +// If the body cannot be reordered to ensure safety, the second return value +// contains a mapping of expressions to unsafe variables in those expressions. +func reorderBodyForSafety(builtins map[string]*Builtin, arity func(Ref) int, globals VarSet, body Body) (Body, unsafeVars) { + + bodyVars := body.Vars(SafetyCheckVisitorParams) + reordered := make(Body, 0, len(body)) + safe := VarSet{} + unsafe := unsafeVars{} + + for _, e := range body { + for v := range e.Vars(SafetyCheckVisitorParams) { + if globals.Contains(v) { + safe.Add(v) + } else { + unsafe.Add(e, v) + } + } + } + + for { + n := len(reordered) + + for _, e := range body { + if reordered.Contains(e) { + continue + } + + ovs := outputVarsForExpr(e, arity, safe) + + // check closures: is this expression closing over variables that + // haven't been made safe by what's already included in `reordered`? + vs := unsafeVarsInClosures(e) + cv := vs.Intersect(bodyVars).Diff(globals) + uv := cv.Diff(outputVarsForBody(reordered, arity, safe)) + + if len(uv) > 0 { + if uv.Equal(ovs) { // special case "closure-self" + continue + } + unsafe.Set(e, uv) + } + + for v := range unsafe[e] { + if ovs.Contains(v) || safe.Contains(v) { + delete(unsafe[e], v) + } + } + + if len(unsafe[e]) == 0 { + delete(unsafe, e) + reordered.Append(e) + safe.Update(ovs) // this expression's outputs are safe + } + } + + if len(reordered) == n { // fixed point, could not add any expr of body + break + } + } + + // Recursively visit closures and perform the safety checks on them. + // Update the globals at each expression to include the variables that could + // be closed over. + g := globals.Copy() + for i, e := range reordered { + if i > 0 { + g.Update(reordered[i-1].Vars(SafetyCheckVisitorParams)) + } + xform := &bodySafetyTransformer{ + builtins: builtins, + arity: arity, + current: e, + globals: g, + unsafe: unsafe, + } + NewGenericVisitor(xform.Visit).Walk(e) + } + + return reordered, unsafe +} + +type bodySafetyTransformer struct { + builtins map[string]*Builtin + arity func(Ref) int + current *Expr + globals VarSet + unsafe unsafeVars +} + +func (xform *bodySafetyTransformer) Visit(x interface{}) bool { + switch term := x.(type) { + case *Term: + switch x := term.Value.(type) { + case *object: + cpy, _ := x.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + NewGenericVisitor(xform.Visit).Walk(kcpy) + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return kcpy, vcpy, nil + }) + term.Value = cpy + return true + case *set: + cpy, _ := x.Map(func(v *Term) (*Term, error) { + vcpy := v.Copy() + NewGenericVisitor(xform.Visit).Walk(vcpy) + return vcpy, nil + }) + term.Value = cpy + return true + case *ArrayComprehension: + xform.reorderArrayComprehensionSafety(x) + return true + case *ObjectComprehension: + xform.reorderObjectComprehensionSafety(x) + return true + case *SetComprehension: + xform.reorderSetComprehensionSafety(x) + return true + } + case *Expr: + if ev, ok := term.Terms.(*Every); ok { + xform.globals.Update(ev.KeyValueVars()) + ev.Body = xform.reorderComprehensionSafety(NewVarSet(), ev.Body) + return true + } + } + return false +} + +func (xform *bodySafetyTransformer) reorderComprehensionSafety(tv VarSet, body Body) Body { + bv := body.Vars(SafetyCheckVisitorParams) + bv.Update(xform.globals) + uv := tv.Diff(bv) + for v := range uv { + xform.unsafe.Add(xform.current, v) + } + + r, u := reorderBodyForSafety(xform.builtins, xform.arity, xform.globals, body) + if len(u) == 0 { + return r + } + + xform.unsafe.Update(u) + return body +} + +func (xform *bodySafetyTransformer) reorderArrayComprehensionSafety(ac *ArrayComprehension) { + ac.Body = xform.reorderComprehensionSafety(ac.Term.Vars(), ac.Body) +} + +func (xform *bodySafetyTransformer) reorderObjectComprehensionSafety(oc *ObjectComprehension) { + tv := oc.Key.Vars() + tv.Update(oc.Value.Vars()) + oc.Body = xform.reorderComprehensionSafety(tv, oc.Body) +} + +func (xform *bodySafetyTransformer) reorderSetComprehensionSafety(sc *SetComprehension) { + sc.Body = xform.reorderComprehensionSafety(sc.Term.Vars(), sc.Body) +} + +// unsafeVarsInClosures collects vars that are contained in closures within +// this expression. +func unsafeVarsInClosures(e *Expr) VarSet { + vs := VarSet{} + WalkClosures(e, func(x interface{}) bool { + vis := &VarVisitor{vars: vs} + if ev, ok := x.(*Every); ok { + vis.Walk(ev.Body) + return true + } + vis.Walk(x) + return true + }) + return vs +} + +// OutputVarsFromBody returns all variables which are the "output" for +// the given body. For safety checks this means that they would be +// made safe by the body. +func OutputVarsFromBody(c *Compiler, body Body, safe VarSet) VarSet { + return outputVarsForBody(body, c.GetArity, safe) +} + +func outputVarsForBody(body Body, arity func(Ref) int, safe VarSet) VarSet { + o := safe.Copy() + for _, e := range body { + o.Update(outputVarsForExpr(e, arity, o)) + } + return o.Diff(safe) +} + +// OutputVarsFromExpr returns all variables which are the "output" for +// the given expression. For safety checks this means that they would be +// made safe by the expr. +func OutputVarsFromExpr(c *Compiler, expr *Expr, safe VarSet) VarSet { + return outputVarsForExpr(expr, c.GetArity, safe) +} + +func outputVarsForExpr(expr *Expr, arity func(Ref) int, safe VarSet) VarSet { + + // Negated expressions must be safe. + if expr.Negated { + return VarSet{} + } + + // With modifier inputs must be safe. + for _, with := range expr.With { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(with) + vars := vis.Vars() + unsafe := vars.Diff(safe) + if len(unsafe) > 0 { + return VarSet{} + } + } + + switch terms := expr.Terms.(type) { + case *Term: + return outputVarsForTerms(expr, safe) + case []*Term: + if expr.IsEquality() { + return outputVarsForExprEq(expr, safe) + } + + operator, ok := terms[0].Value.(Ref) + if !ok { + return VarSet{} + } + + ar := arity(operator) + if ar < 0 { + return VarSet{} + } + + return outputVarsForExprCall(expr, ar, safe, terms) + case *Every: + return outputVarsForTerms(terms.Domain, safe) + default: + panic("illegal expression") + } +} + +func outputVarsForExprEq(expr *Expr, safe VarSet) VarSet { + + if !validEqAssignArgCount(expr) { + return safe + } + + output := outputVarsForTerms(expr, safe) + output.Update(safe) + output.Update(Unify(output, expr.Operand(0), expr.Operand(1))) + + return output.Diff(safe) +} + +func outputVarsForExprCall(expr *Expr, arity int, safe VarSet, terms []*Term) VarSet { + + output := outputVarsForTerms(expr, safe) + + numInputTerms := arity + 1 + if numInputTerms >= len(terms) { + return output + } + + params := VarVisitorParams{ + SkipClosures: true, + SkipSets: true, + SkipObjectKeys: true, + SkipRefHead: true, + } + vis := NewVarVisitor().WithParams(params) + vis.Walk(Args(terms[:numInputTerms])) + unsafe := vis.Vars().Diff(output).Diff(safe) + + if len(unsafe) > 0 { + return VarSet{} + } + + vis = NewVarVisitor().WithParams(params) + vis.Walk(Args(terms[numInputTerms:])) + output.Update(vis.vars) + return output +} + +func outputVarsForTerms(expr interface{}, safe VarSet) VarSet { + output := VarSet{} + WalkTerms(expr, func(x *Term) bool { + switch r := x.Value.(type) { + case *SetComprehension, *ArrayComprehension, *ObjectComprehension: + return true + case Ref: + if !isRefSafe(r, safe) { + return true + } + output.Update(r.OutputVars()) + return false + } + return false + }) + return output +} + +type equalityFactory struct { + gen *localVarGenerator +} + +func newEqualityFactory(gen *localVarGenerator) *equalityFactory { + return &equalityFactory{gen} +} + +func (f *equalityFactory) Generate(other *Term) *Expr { + term := NewTerm(f.gen.Generate()).SetLocation(other.Location) + expr := Equality.Expr(term, other) + expr.Generated = true + expr.Location = other.Location + return expr +} + +type localVarGenerator struct { + exclude VarSet + suffix string + next int +} + +func newLocalVarGeneratorForModuleSet(sorted []string, modules map[string]*Module) *localVarGenerator { + exclude := NewVarSet() + vis := &VarVisitor{vars: exclude} + for _, key := range sorted { + vis.Walk(modules[key]) + } + return &localVarGenerator{exclude: exclude, next: 0} +} + +func newLocalVarGenerator(suffix string, node interface{}) *localVarGenerator { + exclude := NewVarSet() + vis := &VarVisitor{vars: exclude} + vis.Walk(node) + return &localVarGenerator{exclude: exclude, suffix: suffix, next: 0} +} + +func (l *localVarGenerator) Generate() Var { + for { + result := Var("__local" + l.suffix + strconv.Itoa(l.next) + "__") + l.next++ + if !l.exclude.Contains(result) { + return result + } + } +} + +func getGlobals(pkg *Package, rules []Ref, imports []*Import) map[Var]*usedRef { + + globals := make(map[Var]*usedRef, len(rules)) // NB: might grow bigger with imports + + // Populate globals with exports within the package. + for _, ref := range rules { + v := ref[0].Value.(Var) + globals[v] = &usedRef{ref: pkg.Path.Append(StringTerm(string(v)))} + } + + // Populate globals with imports. + for _, imp := range imports { + path := imp.Path.Value.(Ref) + if FutureRootDocument.Equal(path[0]) || RegoRootDocument.Equal(path[0]) { + continue // ignore future and rego imports + } + globals[imp.Name()] = &usedRef{ref: path} + } + + return globals +} + +func requiresEval(x *Term) bool { + if x == nil { + return false + } + return ContainsRefs(x) || ContainsComprehensions(x) +} + +func resolveRef(globals map[Var]*usedRef, ignore *declaredVarStack, ref Ref) Ref { + + r := Ref{} + for i, x := range ref { + switch v := x.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(x.Location) + } + if i == 0 { + r = cpy + } else { + r = append(r, NewTerm(cpy).SetLocation(x.Location)) + } + g.used = true + } else { + r = append(r, x) + } + case Ref, *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + r = append(r, resolveRefsInTerm(globals, ignore, x)) + default: + r = append(r, x) + } + } + + return r +} + +type usedRef struct { + ref Ref + used bool +} + +func resolveRefsInRule(globals map[Var]*usedRef, rule *Rule) error { + ignore := &declaredVarStack{} + + vars := NewVarSet() + var vis *GenericVisitor + var err error + + // Walk args to collect vars and transform body so that callers can shadow + // root documents. + vis = NewGenericVisitor(func(x interface{}) bool { + if err != nil { + return true + } + switch x := x.(type) { + case Var: + vars.Add(x) + + // Object keys cannot be pattern matched so only walk values. + case *object: + x.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + + // Skip terms that could contain vars that cannot be pattern matched. + case Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + return true + + case *Term: + if _, ok := x.Value.(Ref); ok { + if RootDocumentRefs.Contains(x) { + // We could support args named input, data, etc. however + // this would require rewriting terms in the head and body. + // Preventing root document shadowing is simpler, and + // arguably, will prevent confusing names from being used. + // NOTE: this check is also performed as part of strict-mode in + // checkRootDocumentOverrides. + err = fmt.Errorf("args must not shadow %v (use a different variable name)", x) + return true + } + } + } + return false + }) + + vis.Walk(rule.Head.Args) + + if err != nil { + return err + } + + ignore.Push(vars) + ignore.Push(declaredVars(rule.Body)) + + ref := rule.Head.Ref() + for i := 1; i < len(ref); i++ { + ref[i] = resolveRefsInTerm(globals, ignore, ref[i]) + } + if rule.Head.Key != nil { + rule.Head.Key = resolveRefsInTerm(globals, ignore, rule.Head.Key) + } + + if rule.Head.Value != nil { + rule.Head.Value = resolveRefsInTerm(globals, ignore, rule.Head.Value) + } + + rule.Body = resolveRefsInBody(globals, ignore, rule.Body) + return nil +} + +func resolveRefsInBody(globals map[Var]*usedRef, ignore *declaredVarStack, body Body) Body { + r := make([]*Expr, 0, len(body)) + for _, expr := range body { + r = append(r, resolveRefsInExpr(globals, ignore, expr)) + } + return r +} + +func resolveRefsInExpr(globals map[Var]*usedRef, ignore *declaredVarStack, expr *Expr) *Expr { + cpy := *expr + switch ts := expr.Terms.(type) { + case *Term: + cpy.Terms = resolveRefsInTerm(globals, ignore, ts) + case []*Term: + buf := make([]*Term, len(ts)) + for i := 0; i < len(ts); i++ { + buf[i] = resolveRefsInTerm(globals, ignore, ts[i]) + } + cpy.Terms = buf + case *SomeDecl: + if val, ok := ts.Symbols[0].Value.(Call); ok { + cpy.Terms = &SomeDecl{Symbols: []*Term{CallTerm(resolveRefsInTermSlice(globals, ignore, val)...)}} + } + case *Every: + locals := NewVarSet() + if ts.Key != nil { + locals.Update(ts.Key.Vars()) + } + locals.Update(ts.Value.Vars()) + ignore.Push(locals) + cpy.Terms = &Every{ + Key: ts.Key.Copy(), // TODO(sr): do more? + Value: ts.Value.Copy(), // TODO(sr): do more? + Domain: resolveRefsInTerm(globals, ignore, ts.Domain), + Body: resolveRefsInBody(globals, ignore, ts.Body), + } + ignore.Pop() + } + for _, w := range cpy.With { + w.Target = resolveRefsInTerm(globals, ignore, w.Target) + w.Value = resolveRefsInTerm(globals, ignore, w.Value) + } + return &cpy +} + +func resolveRefsInTerm(globals map[Var]*usedRef, ignore *declaredVarStack, term *Term) *Term { + switch v := term.Value.(type) { + case Var: + if g, ok := globals[v]; ok && !ignore.Contains(v) { + cpy := g.ref.Copy() + for i := range cpy { + cpy[i].SetLocation(term.Location) + } + g.used = true + return NewTerm(cpy).SetLocation(term.Location) + } + return term + case Ref: + fqn := resolveRef(globals, ignore, v) + cpy := *term + cpy.Value = fqn + return &cpy + case *object: + cpy := *term + cpy.Value, _ = v.Map(func(k, v *Term) (*Term, *Term, error) { + k = resolveRefsInTerm(globals, ignore, k) + v = resolveRefsInTerm(globals, ignore, v) + return k, v, nil + }) + return &cpy + case *Array: + cpy := *term + cpy.Value = NewArray(resolveRefsInTermArray(globals, ignore, v)...) + return &cpy + case Call: + cpy := *term + cpy.Value = Call(resolveRefsInTermSlice(globals, ignore, v)) + return &cpy + case Set: + s, _ := v.Map(func(e *Term) (*Term, error) { + return resolveRefsInTerm(globals, ignore, e), nil + }) + cpy := *term + cpy.Value = s + return &cpy + case *ArrayComprehension: + ac := &ArrayComprehension{} + ignore.Push(declaredVars(v.Body)) + ac.Term = resolveRefsInTerm(globals, ignore, v.Term) + ac.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = ac + ignore.Pop() + return &cpy + case *ObjectComprehension: + oc := &ObjectComprehension{} + ignore.Push(declaredVars(v.Body)) + oc.Key = resolveRefsInTerm(globals, ignore, v.Key) + oc.Value = resolveRefsInTerm(globals, ignore, v.Value) + oc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = oc + ignore.Pop() + return &cpy + case *SetComprehension: + sc := &SetComprehension{} + ignore.Push(declaredVars(v.Body)) + sc.Term = resolveRefsInTerm(globals, ignore, v.Term) + sc.Body = resolveRefsInBody(globals, ignore, v.Body) + cpy := *term + cpy.Value = sc + ignore.Pop() + return &cpy + default: + return term + } +} + +func resolveRefsInTermArray(globals map[Var]*usedRef, ignore *declaredVarStack, terms *Array) []*Term { + cpy := make([]*Term, terms.Len()) + for i := 0; i < terms.Len(); i++ { + cpy[i] = resolveRefsInTerm(globals, ignore, terms.Elem(i)) + } + return cpy +} + +func resolveRefsInTermSlice(globals map[Var]*usedRef, ignore *declaredVarStack, terms []*Term) []*Term { + cpy := make([]*Term, len(terms)) + for i := 0; i < len(terms); i++ { + cpy[i] = resolveRefsInTerm(globals, ignore, terms[i]) + } + return cpy +} + +type declaredVarStack []VarSet + +func (s declaredVarStack) Contains(v Var) bool { + for i := len(s) - 1; i >= 0; i-- { + if _, ok := s[i][v]; ok { + return ok + } + } + return false +} + +func (s declaredVarStack) Add(v Var) { + s[len(s)-1].Add(v) +} + +func (s *declaredVarStack) Push(vs VarSet) { + *s = append(*s, vs) +} + +func (s *declaredVarStack) Pop() { + curr := *s + *s = curr[:len(curr)-1] +} + +func declaredVars(x interface{}) VarSet { + vars := NewVarSet() + vis := NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case *Expr: + if x.IsAssignment() && validEqAssignArgCount(x) { + WalkVars(x.Operand(0), func(v Var) bool { + vars.Add(v) + return false + }) + } else if decl, ok := x.Terms.(*SomeDecl); ok { + for i := range decl.Symbols { + switch val := decl.Symbols[i].Value.(type) { + case Var: + vars.Add(val) + case Call: + args := val[1:] + if len(args) == 3 { // some x, y in xs + WalkVars(args[1], func(v Var) bool { + vars.Add(v) + return false + }) + } + // some x in xs + WalkVars(args[0], func(v Var) bool { + vars.Add(v) + return false + }) + } + } + } + case *ArrayComprehension, *SetComprehension, *ObjectComprehension: + return true + } + return false + }) + vis.Walk(x) + return vars +} + +// rewriteComprehensionTerms will rewrite comprehensions so that the term part +// is bound to a variable in the body. This allows any type of term to be used +// in the term part (even if the term requires evaluation.) +// +// For instance, given the following comprehension: +// +// [x[0] | x = y[_]; y = [1,2,3]] +// +// The comprehension would be rewritten as: +// +// [__local0__ | x = y[_]; y = [1,2,3]; __local0__ = x[0]] +func rewriteComprehensionTerms(f *equalityFactory, node interface{}) (interface{}, error) { + return TransformComprehensions(node, func(x interface{}) (Value, error) { + switch x := x.(type) { + case *ArrayComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + case *SetComprehension: + if requiresEval(x.Term) { + expr := f.Generate(x.Term) + x.Term = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + case *ObjectComprehension: + if requiresEval(x.Key) { + expr := f.Generate(x.Key) + x.Key = expr.Operand(0) + x.Body.Append(expr) + } + if requiresEval(x.Value) { + expr := f.Generate(x.Value) + x.Value = expr.Operand(0) + x.Body.Append(expr) + } + return x, nil + } + panic("illegal type") + }) +} + +// rewriteEquals will rewrite exprs under x as unification calls instead of == +// calls. For example: +// +// data.foo == data.bar is rewritten as data.foo = data.bar +// +// This stage should only run the safety check (since == is a built-in with no +// outputs, so the inputs must not be marked as safe.) +// +// This stage is not executed by the query compiler by default because when +// callers specify == instead of = they expect to receive a true/false/undefined +// result back whereas with = the result is only ever true/undefined. For +// partial evaluation cases we do want to rewrite == to = to simplify the +// result. +func rewriteEquals(x interface{}) (modified bool) { + doubleEq := Equal.Ref() + unifyOp := Equality.Ref() + t := NewGenericTransformer(func(x interface{}) (interface{}, error) { + if x, ok := x.(*Expr); ok && x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + modified = true + x.SetOperator(NewTerm(unifyOp)) + } + } + return x, nil + }) + _, _ = Transform(t, x) // ignore error + return modified +} + +func rewriteTestEqualities(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + // We can't rewrite negated expressions; if the extracted term is undefined, evaluation would fail before + // reaching the negation check. + if !expr.Negated && !expr.Generated { + switch { + case expr.IsEquality(): + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsShallow(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsShallow(expr, f, terms[2], result) + case expr.IsEvery(): + // We rewrite equalities inside of every-bodies as a fail here will be the cause of the test-rule fail. + // Failures inside other expressions with closures, such as comprehensions, won't cause the test-rule to fail, so we skip those. + every := expr.Terms.(*Every) + every.Body = rewriteTestEqualities(f, every.Body) + } + } + result = appendExpr(result, expr) + } + return result +} + +func rewriteDynamicsShallow(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch term.Value.(type) { + case Ref, *ArrayComprehension, *SetComprehension, *ObjectComprehension: + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +// rewriteDynamics will rewrite the body so that dynamic terms (i.e., refs and +// comprehensions) are bound to vars earlier in the query. This translation +// results in eager evaluation. +// +// For instance, given the following query: +// +// foo(data.bar) = 1 +// +// The rewritten version will be: +// +// __local0__ = data.bar; foo(__local0__) = 1 +func rewriteDynamics(f *equalityFactory, body Body) Body { + result := make(Body, 0, len(body)) + for _, expr := range body { + switch { + case expr.IsEquality(): + result = rewriteDynamicsEqExpr(f, expr, result) + case expr.IsCall(): + result = rewriteDynamicsCallExpr(f, expr, result) + case expr.IsEvery(): + result = rewriteDynamicsEveryExpr(f, expr, result) + default: + result = rewriteDynamicsTermExpr(f, expr, result) + } + } + return result +} + +func appendExpr(body Body, expr *Expr) Body { + body.Append(expr) + return body +} + +func rewriteDynamicsEqExpr(f *equalityFactory, expr *Expr, result Body) Body { + if !validEqAssignArgCount(expr) { + return appendExpr(result, expr) + } + terms := expr.Terms.([]*Term) + result, terms[1] = rewriteDynamicsInTerm(expr, f, terms[1], result) + result, terms[2] = rewriteDynamicsInTerm(expr, f, terms[2], result) + return appendExpr(result, expr) +} + +func rewriteDynamicsCallExpr(f *equalityFactory, expr *Expr, result Body) Body { + terms := expr.Terms.([]*Term) + for i := 1; i < len(terms); i++ { + result, terms[i] = rewriteDynamicsOne(expr, f, terms[i], result) + } + return appendExpr(result, expr) +} + +func rewriteDynamicsEveryExpr(f *equalityFactory, expr *Expr, result Body) Body { + ev := expr.Terms.(*Every) + result, ev.Domain = rewriteDynamicsOne(expr, f, ev.Domain, result) + ev.Body = rewriteDynamics(f, ev.Body) + return appendExpr(result, expr) +} + +func rewriteDynamicsTermExpr(f *equalityFactory, expr *Expr, result Body) Body { + term := expr.Terms.(*Term) + result, expr.Terms = rewriteDynamicsInTerm(expr, f, term, result) + return appendExpr(result, expr) +} + +func rewriteDynamicsInTerm(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + case *ArrayComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *SetComprehension: + v.Body = rewriteDynamics(f, v.Body) + case *ObjectComprehension: + v.Body = rewriteDynamics(f, v.Body) + default: + result, term = rewriteDynamicsOne(original, f, term, result) + } + return result, term +} + +func rewriteDynamicsOne(original *Expr, f *equalityFactory, term *Term, result Body) (Body, *Term) { + switch v := term.Value.(type) { + case Ref: + for i := 1; i < len(v); i++ { + result, v[i] = rewriteDynamicsOne(original, f, v[i], result) + } + generated := f.Generate(term) + generated.With = original.With + result.Append(generated) + connectGeneratedExprs(original, generated) + return result, result[len(result)-1].Operand(0) + case *Array: + for i := 0; i < v.Len(); i++ { + var t *Term + result, t = rewriteDynamicsOne(original, f, v.Elem(i), result) + v.set(i, t) + } + return result, term + case *object: + cpy := NewObject() + v.Foreach(func(key, value *Term) { + result, key = rewriteDynamicsOne(original, f, key, result) + result, value = rewriteDynamicsOne(original, f, value, result) + cpy.Insert(key, value) + }) + return result, NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy := NewSet() + for _, term := range v.Slice() { + var rw *Term + result, rw = rewriteDynamicsOne(original, f, term, result) + cpy.Add(rw) + } + return result, NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *SetComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + case *ObjectComprehension: + var extra *Expr + v.Body, extra = rewriteDynamicsComprehensionBody(original, f, v.Body, term) + result.Append(extra) + connectGeneratedExprs(original, extra) + return result, result[len(result)-1].Operand(0) + } + return result, term +} + +func rewriteDynamicsComprehensionBody(original *Expr, f *equalityFactory, body Body, term *Term) (Body, *Expr) { + body = rewriteDynamics(f, body) + generated := f.Generate(term) + generated.With = original.With + return body, generated +} + +func rewriteExprTermsInHead(gen *localVarGenerator, rule *Rule) { + for i := range rule.Head.Args { + support, output := expandExprTerm(gen, rule.Head.Args[i]) + for j := range support { + rule.Body.Append(support[j]) + } + rule.Head.Args[i] = output + } + if rule.Head.Key != nil { + support, output := expandExprTerm(gen, rule.Head.Key) + for i := range support { + rule.Body.Append(support[i]) + } + rule.Head.Key = output + } + if rule.Head.Value != nil { + support, output := expandExprTerm(gen, rule.Head.Value) + for i := range support { + rule.Body.Append(support[i]) + } + rule.Head.Value = output + } +} + +func rewriteExprTermsInBody(gen *localVarGenerator, body Body) Body { + cpy := make(Body, 0, len(body)) + for i := 0; i < len(body); i++ { + for _, expr := range expandExpr(gen, body[i]) { + cpy.Append(expr) + } + } + return cpy +} + +func expandExpr(gen *localVarGenerator, expr *Expr) (result []*Expr) { + for i := range expr.With { + extras, value := expandExprTerm(gen, expr.With[i].Value) + expr.With[i].Value = value + result = append(result, extras...) + } + switch terms := expr.Terms.(type) { + case *Term: + extras, term := expandExprTerm(gen, terms) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + result = append(result, extras...) + expr.Terms = term + result = append(result, expr) + case []*Term: + for i := 1; i < len(terms); i++ { + var extras []*Expr + extras, terms[i] = expandExprTerm(gen, terms[i]) + connectGeneratedExprs(expr, extras...) + if len(expr.With) > 0 { + for i := range extras { + extras[i].With = expr.With + } + } + result = append(result, extras...) + } + result = append(result, expr) + case *Every: + var extras []*Expr + + term := NewTerm(gen.Generate()).SetLocation(terms.Domain.Location) + eq := Equality.Expr(term, terms.Domain).SetLocation(terms.Domain.Location) + eq.Generated = true + eq.With = expr.With + extras = expandExpr(gen, eq) + terms.Domain = term + + terms.Body = rewriteExprTermsInBody(gen, terms.Body) + result = append(result, extras...) + result = append(result, expr) + } + return +} + +func connectGeneratedExprs(parent *Expr, children ...*Expr) { + for _, child := range children { + child.generatedFrom = parent + parent.generates = append(parent.generates, child) + } +} + +func expandExprTerm(gen *localVarGenerator, term *Term) (support []*Expr, output *Term) { + output = term + switch v := term.Value.(type) { + case Call: + for i := 1; i < len(v); i++ { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + output = NewTerm(gen.Generate()).SetLocation(term.Location) + expr := v.MakeExpr(output).SetLocation(term.Location) + expr.Generated = true + support = append(support, expr) + case Ref: + support = expandExprRef(gen, v) + case *Array: + support = expandExprTermArray(gen, v) + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + extras1, expandedKey := expandExprTerm(gen, k) + extras2, expandedValue := expandExprTerm(gen, v) + support = append(support, extras1...) + support = append(support, extras2...) + return expandedKey, expandedValue, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case Set: + cpy, _ := v.Map(func(x *Term) (*Term, error) { + extras, expanded := expandExprTerm(gen, x) + support = append(support, extras...) + return expanded, nil + }) + output = NewTerm(cpy).SetLocation(term.Location) + case *ArrayComprehension: + support, term := expandExprTerm(gen, v.Term) + for i := range support { + v.Body.Append(support[i]) + } + v.Term = term + v.Body = rewriteExprTermsInBody(gen, v.Body) + case *SetComprehension: + support, term := expandExprTerm(gen, v.Term) + for i := range support { + v.Body.Append(support[i]) + } + v.Term = term + v.Body = rewriteExprTermsInBody(gen, v.Body) + case *ObjectComprehension: + support, key := expandExprTerm(gen, v.Key) + for i := range support { + v.Body.Append(support[i]) + } + v.Key = key + support, value := expandExprTerm(gen, v.Value) + for i := range support { + v.Body.Append(support[i]) + } + v.Value = value + v.Body = rewriteExprTermsInBody(gen, v.Body) + } + return +} + +func expandExprRef(gen *localVarGenerator, v []*Term) (support []*Expr) { + // Start by calling a normal expandExprTerm on all terms. + support = expandExprTermSlice(gen, v) + + // Rewrite references in order to support indirect references. We rewrite + // e.g. + // + // [1, 2, 3][i] + // + // to + // + // __local_var = [1, 2, 3] + // __local_var[i] + // + // to support these. This only impacts the reference subject, i.e. the + // first item in the slice. + var subject = v[0] + switch subject.Value.(type) { + case *Array, Object, Set, *ArrayComprehension, *SetComprehension, *ObjectComprehension, Call: + f := newEqualityFactory(gen) + assignToLocal := f.Generate(subject) + support = append(support, assignToLocal) + v[0] = assignToLocal.Operand(0) + } + return +} + +func expandExprTermArray(gen *localVarGenerator, arr *Array) (support []*Expr) { + for i := 0; i < arr.Len(); i++ { + extras, v := expandExprTerm(gen, arr.Elem(i)) + arr.set(i, v) + support = append(support, extras...) + } + return +} + +func expandExprTermSlice(gen *localVarGenerator, v []*Term) (support []*Expr) { + for i := 0; i < len(v); i++ { + var extras []*Expr + extras, v[i] = expandExprTerm(gen, v[i]) + support = append(support, extras...) + } + return +} + +type localDeclaredVars struct { + vars []*declaredVarSet + + // rewritten contains a mapping of *all* user-defined variables + // that have been rewritten whereas vars contains the state + // from the current query (not any nested queries, and all vars + // seen). + rewritten map[Var]Var + + // indicates if an assignment (:= operator) has been seen *ever* + assignment bool +} + +type varOccurrence int + +const ( + newVar varOccurrence = iota + argVar + seenVar + assignedVar + declaredVar +) + +type declaredVarSet struct { + vs map[Var]Var + reverse map[Var]Var + occurrence map[Var]varOccurrence + count map[Var]int +} + +func newDeclaredVarSet() *declaredVarSet { + return &declaredVarSet{ + vs: map[Var]Var{}, + reverse: map[Var]Var{}, + occurrence: map[Var]varOccurrence{}, + count: map[Var]int{}, + } +} + +func newLocalDeclaredVars() *localDeclaredVars { + return &localDeclaredVars{ + vars: []*declaredVarSet{newDeclaredVarSet()}, + rewritten: map[Var]Var{}, + } +} + +func (s *localDeclaredVars) Copy() *localDeclaredVars { + stack := &localDeclaredVars{ + vars: []*declaredVarSet{}, + rewritten: map[Var]Var{}, + } + + for i := range s.vars { + stack.vars = append(stack.vars, newDeclaredVarSet()) + maps.Copy(stack.vars[0].vs, s.vars[i].vs) + maps.Copy(stack.vars[0].reverse, s.vars[i].reverse) + maps.Copy(stack.vars[0].occurrence, s.vars[i].occurrence) + maps.Copy(stack.vars[0].count, s.vars[i].count) + } + + maps.Copy(stack.rewritten, s.rewritten) + + return stack +} + +func (s *localDeclaredVars) Push() { + s.vars = append(s.vars, newDeclaredVarSet()) +} + +func (s *localDeclaredVars) Pop() *declaredVarSet { + sl := s.vars + curr := sl[len(sl)-1] + s.vars = sl[:len(sl)-1] + return curr +} + +func (s localDeclaredVars) Peek() *declaredVarSet { + return s.vars[len(s.vars)-1] +} + +func (s localDeclaredVars) Insert(x, y Var, occurrence varOccurrence) { + elem := s.vars[len(s.vars)-1] + elem.vs[x] = y + elem.reverse[y] = x + elem.occurrence[x] = occurrence + + elem.count[x] = 1 + + // If the variable has been rewritten (where x != y, with y being + // the generated value), store it in the map of rewritten vars. + // Assume that the generated values are unique for the compilation. + if !x.Equal(y) { + s.rewritten[y] = x + } +} + +func (s localDeclaredVars) Declared(x Var) (y Var, ok bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if y, ok = s.vars[i].vs[x]; ok { + return + } + } + return +} + +// Occurrence returns a flag that indicates whether x has occurred in the +// current scope. +func (s localDeclaredVars) Occurrence(x Var) varOccurrence { + return s.vars[len(s.vars)-1].occurrence[x] +} + +// GlobalOccurrence returns a flag that indicates whether x has occurred in the +// global scope. +func (s localDeclaredVars) GlobalOccurrence(x Var) (varOccurrence, bool) { + for i := len(s.vars) - 1; i >= 0; i-- { + if occ, ok := s.vars[i].occurrence[x]; ok { + return occ, true + } + } + return newVar, false +} + +// Seen marks x as seen by incrementing its counter +func (s localDeclaredVars) Seen(x Var) { + for i := len(s.vars) - 1; i >= 0; i-- { + dvs := s.vars[i] + if c, ok := dvs.count[x]; ok { + dvs.count[x] = c + 1 + return + } + } + + s.vars[len(s.vars)-1].count[x] = 1 +} + +// Count returns how many times x has been seen +func (s localDeclaredVars) Count(x Var) int { + for i := len(s.vars) - 1; i >= 0; i-- { + if c, ok := s.vars[i].count[x]; ok { + return c + } + } + + return 0 +} + +// rewriteLocalVars rewrites bodies to remove assignment/declaration +// expressions. For example: +// +// a := 1; p[a] +// +// Is rewritten to: +// +// __local0__ = 1; p[__local0__] +// +// During rewriting, assignees are validated to prevent use before declaration. +func rewriteLocalVars(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, strict bool) (Body, map[Var]Var, Errors) { + var errs Errors + body, errs = rewriteDeclaredVarsInBody(g, stack, used, body, errs, strict) + return body, stack.Peek().vs, errs +} + +func rewriteDeclaredVarsInBody(g *localVarGenerator, stack *localDeclaredVars, used VarSet, body Body, errs Errors, strict bool) (Body, Errors) { + + var cpy Body + + for i := range body { + var expr *Expr + switch { + case body[i].IsAssignment(): + stack.assignment = true + expr, errs = rewriteDeclaredAssignment(g, stack, body[i], errs, strict) + case body[i].IsSome(): + expr, errs = rewriteSomeDeclStatement(g, stack, body[i], errs, strict) + case body[i].IsEvery(): + expr, errs = rewriteEveryStatement(g, stack, body[i], errs, strict) + default: + expr, errs = rewriteDeclaredVarsInExpr(g, stack, body[i], errs, strict) + } + if expr != nil { + cpy.Append(expr) + } + } + + // If the body only contained a var statement it will be empty at this + // point. Append true to the body to ensure that it's non-empty (zero length + // bodies are not supported.) + if len(cpy) == 0 { + cpy.Append(NewExpr(BooleanTerm(true))) + } + + errs = checkUnusedAssignedVars(body, stack, used, errs, strict) + return cpy, checkUnusedDeclaredVars(body, stack, used, cpy, errs) +} + +func checkUnusedAssignedVars(body Body, stack *localDeclaredVars, used VarSet, errs Errors, strict bool) Errors { + + if !strict || len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + unused := NewVarSet() + + for v, occ := range dvs.occurrence { + // A var that was assigned in this scope must have been seen (used) more than once (the time of assignment) in + // the same, or nested, scope to be counted as used. + if !v.IsWildcard() && stack.Count(v) <= 1 && occ == assignedVar { + unused.Add(dvs.vs[v]) + } + } + + rewrittenUsed := NewVarSet() + for v := range used { + if gv, ok := stack.Declared(v); ok { + rewrittenUsed.Add(gv) + } else { + rewrittenUsed.Add(v) + } + } + + unused = unused.Diff(rewrittenUsed) + + for _, gv := range unused.Sorted() { + found := false + for i := range body { + if body[i].Vars(VarVisitorParams{}).Contains(gv) { + errs = append(errs, NewError(CompileErr, body[i].Loc(), "assigned var %v unused", dvs.reverse[gv])) + found = true + break + } + } + if !found { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "assigned var %v unused", dvs.reverse[gv])) + } + } + + return errs +} + +func checkUnusedDeclaredVars(body Body, stack *localDeclaredVars, used VarSet, cpy Body, errs Errors) Errors { + + // NOTE(tsandall): Do not generate more errors if there are existing + // declaration errors. + if len(errs) > 0 { + return errs + } + + dvs := stack.Peek() + declared := NewVarSet() + + for v, occ := range dvs.occurrence { + if occ == declaredVar { + declared.Add(dvs.vs[v]) + } + } + + bodyvars := cpy.Vars(VarVisitorParams{}) + + for v := range used { + if gv, ok := stack.Declared(v); ok { + bodyvars.Add(gv) + } else { + bodyvars.Add(v) + } + } + + unused := declared.Diff(bodyvars).Diff(used) + + for _, gv := range unused.Sorted() { + rv := dvs.reverse[gv] + if !rv.IsGenerated() { + // Scan through body exprs, looking for a match between the + // bad var's original name, and each expr's declared vars. + foundUnusedVarByName := false + for i := range body { + varsDeclaredInExpr := declaredVars(body[i]) + if varsDeclaredInExpr.Contains(dvs.reverse[gv]) { + // TODO(philipc): Clean up the offset logic here when the parser + // reports more accurate locations. + errs = append(errs, NewError(CompileErr, body[i].Loc(), "declared var %v unused", dvs.reverse[gv])) + foundUnusedVarByName = true + break + } + } + // Default error location returned. + if !foundUnusedVarByName { + errs = append(errs, NewError(CompileErr, body[0].Loc(), "declared var %v unused", dvs.reverse[gv])) + } + } + } + + return errs +} + +func rewriteEveryStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + every := e.Terms.(*Every) + + errs = rewriteDeclaredVarsInTermRecursive(g, stack, every.Domain, errs, strict) + + stack.Push() + defer stack.Pop() + + // if the key exists, rewrite + if every.Key != nil { + if v := every.Key.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) //nolint:govet + } + every.Key.Value = gv + } + } else { // if the key doesn't exist, add dummy local + every.Key = NewTerm(g.Generate()) + } + + // value is always present + if v := every.Value.Value.(Var); !v.IsWildcard() { + gv, err := rewriteDeclaredVar(g, stack, v, declaredVar) + if err != nil { + return nil, append(errs, NewError(CompileErr, every.Loc(), err.Error())) //nolint:govet + } + every.Value.Value = gv + } + + used := NewVarSet() + every.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, every.Body, errs, strict) + + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) +} + +func rewriteSomeDeclStatement(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + e := expr.Copy() + decl := e.Terms.(*SomeDecl) + for i := range decl.Symbols { + switch v := decl.Symbols[i].Value.(type) { + case Var: + if _, err := rewriteDeclaredVar(g, stack, v, declaredVar); err != nil { + return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) //nolint:govet + } + case Call: + var key, val, container *Term + switch len(v) { + case 4: // member3 + key = v[1] + val = v[2] + container = v[3] + case 3: // member + key = NewTerm(g.Generate()) + val = v[1] + container = v[2] + } + + var rhs *Term + switch c := container.Value.(type) { + case Ref: + rhs = RefTerm(append(c, key)...) + default: + rhs = RefTerm(container, key) + } + e.Terms = []*Term{ + RefTerm(VarTerm(Equality.Name)), val, rhs, + } + + for _, v0 := range outputVarsForExprEq(e, container.Vars()).Sorted() { + if _, err := rewriteDeclaredVar(g, stack, v0, declaredVar); err != nil { + return nil, append(errs, NewError(CompileErr, decl.Loc(), err.Error())) //nolint:govet + } + } + return rewriteDeclaredVarsInExpr(g, stack, e, errs, strict) + } + } + return nil, errs +} + +func rewriteDeclaredVarsInExpr(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + vis := NewGenericVisitor(func(x interface{}) bool { + var stop bool + switch x := x.(type) { + case *Term: + stop, errs = rewriteDeclaredVarsInTerm(g, stack, x, errs, strict) + case *With: + stop, errs = true, rewriteDeclaredVarsInWithRecursive(g, stack, x, errs, strict) + } + return stop + }) + vis.Walk(expr) + return expr, errs +} + +func rewriteDeclaredAssignment(g *localVarGenerator, stack *localDeclaredVars, expr *Expr, errs Errors, strict bool) (*Expr, Errors) { + + if expr.Negated { + errs = append(errs, NewError(CompileErr, expr.Location, "cannot assign vars inside negated expression")) + return expr, errs + } + + numErrsBefore := len(errs) + + if !validEqAssignArgCount(expr) { + return expr, errs + } + + // Rewrite terms on right hand side capture seen vars and recursively + // process comprehensions before left hand side is processed. Also + // rewrite with modifier. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, expr.Operand(1), errs, strict) + + for _, w := range expr.With { + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) + } + + // Rewrite vars on left hand side with unique names. Catch redeclaration + // and invalid term types here. + var vis func(t *Term) bool + + vis = func(t *Term) bool { + switch v := t.Value.(type) { + case Var: + if gv, err := rewriteDeclaredVar(g, stack, v, assignedVar); err != nil { + errs = append(errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = gv + } + return true + case *Array: + return false + case *object: + v.Foreach(func(_, v *Term) { + WalkTerms(v, vis) + }) + return true + case Ref: + if RootDocumentRefs.Contains(t) { + if gv, err := rewriteDeclaredVar(g, stack, v[0].Value.(Var), assignedVar); err != nil { + errs = append(errs, NewError(CompileErr, t.Location, err.Error())) //nolint:govet + } else { + t.Value = gv + } + return true + } + } + errs = append(errs, NewError(CompileErr, t.Location, "cannot assign to %v", ValueName(t.Value))) + return true + } + + WalkTerms(expr.Operand(0), vis) + + if len(errs) == numErrsBefore { + loc := expr.Operator()[0].Location + expr.SetOperator(RefTerm(VarTerm(Equality.Name).SetLocation(loc)).SetLocation(loc)) + } + + return expr, errs +} + +func rewriteDeclaredVarsInTerm(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) (bool, Errors) { + switch v := term.Value.(type) { + case Var: + if gv, ok := stack.Declared(v); ok { + term.Value = gv + stack.Seen(v) + } else if stack.Occurrence(v) == newVar { + stack.Insert(v, v, seenVar) + } + case Ref: + if RootDocumentRefs.Contains(term) { + x := v[0].Value.(Var) + if occ, ok := stack.GlobalOccurrence(x); ok && occ != seenVar { + gv, _ := stack.Declared(x) + term.Value = gv + } + + return true, errs + } + return false, errs + case Call: + ref := v[0] + WalkVars(ref, func(v Var) bool { + if gv, ok := stack.Declared(v); ok && !gv.Equal(v) { + // We will rewrite the ref of a function call, which is never ok since we don't have first-class functions. + errs = append(errs, NewError(CompileErr, term.Location, "called function %s shadowed", ref)) + return true + } + return false + }) + return false, errs + case *object: + cpy, _ := v.Map(func(k, v *Term) (*Term, *Term, error) { + kcpy := k.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, kcpy, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v, errs, strict) + return kcpy, v, nil + }) + term.Value = cpy + case Set: + cpy, _ := v.Map(func(elem *Term) (*Term, error) { + elemcpy := elem.Copy() + errs = rewriteDeclaredVarsInTermRecursive(g, stack, elemcpy, errs, strict) + return elemcpy, nil + }) + term.Value = cpy + case *ArrayComprehension: + errs = rewriteDeclaredVarsInArrayComprehension(g, stack, v, errs, strict) + case *SetComprehension: + errs = rewriteDeclaredVarsInSetComprehension(g, stack, v, errs, strict) + case *ObjectComprehension: + errs = rewriteDeclaredVarsInObjectComprehension(g, stack, v, errs, strict) + default: + return false, errs + } + return true, errs +} + +func rewriteDeclaredVarsInTermRecursive(g *localVarGenerator, stack *localDeclaredVars, term *Term, errs Errors, strict bool) Errors { + WalkTerms(term, func(t *Term) bool { + var stop bool + stop, errs = rewriteDeclaredVarsInTerm(g, stack, t, errs, strict) + return stop + }) + return errs +} + +func rewriteDeclaredVarsInWithRecursive(g *localVarGenerator, stack *localDeclaredVars, w *With, errs Errors, strict bool) Errors { + // NOTE(sr): `with input as` and `with input.a.b.c as` are deliberately skipped here: `input` could + // have been shadowed by a local variable/argument but should NOT be replaced in the `with` target. + // + // We cannot drop `input` from the stack since it's conceivable to do `with input[input] as` where + // the second input is meant to be the local var. It's a terrible idea, but when you're shadowing + // `input` those might be your thing. + errs = rewriteDeclaredVarsInTermRecursive(g, stack, w.Target, errs, strict) + if sdwInput, ok := stack.Declared(InputRootDocument.Value.(Var)); ok { // Was "input" shadowed... + switch value := w.Target.Value.(type) { + case Var: + if sdwInput.Equal(value) { // ...and replaced? If so, fix it + w.Target.Value = InputRootRef + } + case Ref: + if sdwInput.Equal(value[0].Value.(Var)) { + w.Target.Value.(Ref)[0].Value = InputRootDocument.Value + } + } + } + // No special handling of the `with` value + return rewriteDeclaredVarsInTermRecursive(g, stack, w.Value, errs, strict) +} + +func rewriteDeclaredVarsInArrayComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ArrayComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Term.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInSetComprehension(g *localVarGenerator, stack *localDeclaredVars, v *SetComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Term.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Term, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVarsInObjectComprehension(g *localVarGenerator, stack *localDeclaredVars, v *ObjectComprehension, errs Errors, strict bool) Errors { + used := NewVarSet() + used.Update(v.Key.Vars()) + used.Update(v.Value.Vars()) + + stack.Push() + v.Body, errs = rewriteDeclaredVarsInBody(g, stack, used, v.Body, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Key, errs, strict) + errs = rewriteDeclaredVarsInTermRecursive(g, stack, v.Value, errs, strict) + stack.Pop() + return errs +} + +func rewriteDeclaredVar(g *localVarGenerator, stack *localDeclaredVars, v Var, occ varOccurrence) (gv Var, err error) { + switch stack.Occurrence(v) { + case seenVar: + return gv, fmt.Errorf("var %v referenced above", v) + case assignedVar: + return gv, fmt.Errorf("var %v assigned above", v) + case declaredVar: + return gv, fmt.Errorf("var %v declared above", v) + case argVar: + return gv, fmt.Errorf("arg %v redeclared", v) + } + gv = g.Generate() + stack.Insert(v, gv, occ) + return +} + +// rewriteWithModifiersInBody will rewrite the body so that with modifiers do +// not contain terms that require evaluation as values. If this function +// encounters an invalid with modifier target then it will raise an error. +func rewriteWithModifiersInBody(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, body Body) (Body, *Error) { + var result Body + for i := range body { + exprs, err := rewriteWithModifier(c, unsafeBuiltinsMap, f, body[i]) + if err != nil { + return nil, err + } + if len(exprs) > 0 { + for _, expr := range exprs { + result.Append(expr) + } + } else { + result.Append(body[i]) + } + } + return result, nil +} + +func rewriteWithModifier(c *Compiler, unsafeBuiltinsMap map[string]struct{}, f *equalityFactory, expr *Expr) ([]*Expr, *Error) { + + var result []*Expr + for i := range expr.With { + eval, err := validateWith(c, unsafeBuiltinsMap, expr, i) + if err != nil { + return nil, err + } + + if eval { + eq := f.Generate(expr.With[i].Value) + result = append(result, eq) + expr.With[i].Value = eq.Operand(0) + } + } + + return append(result, expr), nil +} + +func validateWith(c *Compiler, unsafeBuiltinsMap map[string]struct{}, expr *Expr, i int) (bool, *Error) { + target, value := expr.With[i].Target, expr.With[i].Value + + // Ensure that values that are built-ins are rewritten to Ref (not Var) + if v, ok := value.Value.(Var); ok { + if _, ok := c.builtins[v.String()]; ok { + value.Value = Ref([]*Term{NewTerm(v)}) + } + } + isBuiltinRefOrVar, err := isBuiltinRefOrVar(c.builtins, unsafeBuiltinsMap, target) + if err != nil { + return false, err + } + + isAllowedUnknownFuncCall := false + if c.allowUndefinedFuncCalls { + switch target.Value.(type) { + case Ref, Var: + isAllowedUnknownFuncCall = true + } + } + + switch { + case isDataRef(target): + ref := target.Value.(Ref) + targetNode := c.RuleTree + for i := 0; i < len(ref)-1; i++ { + child := targetNode.Child(ref[i].Value) + if child == nil { + break + } else if len(child.Values) > 0 { + return false, NewError(CompileErr, target.Loc(), "with keyword cannot partially replace virtual document(s)") + } + targetNode = child + } + + if targetNode != nil { + // NOTE(sr): at this point in the compiler stages, we don't have a fully-populated + // TypeEnv yet -- so we have to make do with this check to see if the replacement + // target is a function. It's probably wrong for arity-0 functions, but those are + // and edge case anyways. + if child := targetNode.Child(ref[len(ref)-1].Value); child != nil { + for _, v := range child.Values { + if len(v.(*Rule).Head.Args) > 0 { + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + } + } + } + } + + // If the with-value is a ref to a function, but not a call, we can't rewrite it + if r, ok := value.Value.(Ref); ok { + // TODO: check that target ref doesn't exist? + if valueNode := c.RuleTree.Find(r); valueNode != nil { + for _, v := range valueNode.Values { + if len(v.(*Rule).Head.Args) > 0 { + return false, nil + } + } + } + } + case isInputRef(target): // ok, valid + case isBuiltinRefOrVar: + + // NOTE(sr): first we ensure that parsed Var builtins (`count`, `concat`, etc) + // are rewritten to their proper Ref convention + if v, ok := target.Value.(Var); ok { + target.Value = Ref([]*Term{NewTerm(v)}) + } + + targetRef := target.Value.(Ref) + bi := c.builtins[targetRef.String()] // safe because isBuiltinRefOrVar checked this + if err := validateWithBuiltinTarget(bi, targetRef, target.Loc()); err != nil { + return false, err + } + + if ok, err := validateWithFunctionValue(c.builtins, unsafeBuiltinsMap, c.RuleTree, value); err != nil || ok { + return false, err // err may be nil + } + case isAllowedUnknownFuncCall: + // The target isn't a ref to the input doc, data doc, or a known built-in, but it might be a ref to an unknown built-in. + return false, nil + default: + return false, NewError(TypeErr, target.Location, "with keyword target must reference existing %v, %v, or a function", InputRootDocument, DefaultRootDocument) + } + return requiresEval(value), nil +} + +func validateWithBuiltinTarget(bi *Builtin, target Ref, loc *location.Location) *Error { + switch bi.Name { + case Equality.Name, + RegoMetadataChain.Name, + RegoMetadataRule.Name: + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of %q invalid", bi.Name) + } + + switch { + case target.HasPrefix(Ref([]*Term{VarTerm("internal")})): + return NewError(CompileErr, loc, "with keyword replacing built-in function: replacement of internal function %q invalid", target) + + case bi.Relation: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a relation") + + case bi.Decl.Result() == nil: + return NewError(CompileErr, loc, "with keyword replacing built-in function: target must not be a void function") + } + return nil +} + +func validateWithFunctionValue(bs map[string]*Builtin, unsafeMap map[string]struct{}, ruleTree *TreeNode, value *Term) (bool, *Error) { + if v, ok := value.Value.(Ref); ok { + if ruleTree.Find(v) != nil { // ref exists in rule tree + return true, nil + } + } + return isBuiltinRefOrVar(bs, unsafeMap, value) +} + +func isInputRef(term *Term) bool { + if ref, ok := term.Value.(Ref); ok { + if ref.HasPrefix(InputRootRef) { + return true + } + } + return false +} + +func isDataRef(term *Term) bool { + if ref, ok := term.Value.(Ref); ok { + if ref.HasPrefix(DefaultRootRef) { + return true + } + } + return false +} + +func isBuiltinRefOrVar(bs map[string]*Builtin, unsafeBuiltinsMap map[string]struct{}, term *Term) (bool, *Error) { + switch v := term.Value.(type) { + case Ref, Var: + if _, ok := unsafeBuiltinsMap[v.String()]; ok { + return false, NewError(CompileErr, term.Location, "with keyword replacing built-in function: target must not be unsafe: %q", v) + } + _, ok := bs[v.String()] + return ok, nil + } + return false, nil +} + +func isVirtual(node *TreeNode, ref Ref) bool { + for i := range ref { + child := node.Child(ref[i].Value) + if child == nil { + return false + } else if len(child.Values) > 0 { + return true + } + node = child + } + return true +} + +func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) { + + if len(unsafe) == 0 { + return + } + + for _, pair := range unsafe.Vars() { + v := pair.Var + if w, ok := rewritten[v]; ok { + v = w + } + if !v.IsGenerated() { + if _, ok := allFutureKeywords[string(v)]; ok { + result = append(result, NewError(UnsafeVarErr, pair.Loc, + "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) + continue + } + result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %v is unsafe", v)) + } + } + + if len(result) > 0 { + return + } + + // If the expression contains unsafe generated variables, report which + // expressions are unsafe instead of the variables that are unsafe (since + // the latter are not meaningful to the user.) + pairs := unsafe.Slice() + + slices.SortFunc(pairs, func(a, b unsafePair) int { + return a.Expr.Location.Compare(b.Expr.Location) + }) + + // Report at most one error per generated variable. + seen := NewVarSet() + + for _, expr := range pairs { + before := len(seen) + for v := range expr.Vars { + if v.IsGenerated() { + seen.Add(v) + } + } + if len(seen) > before { + result = append(result, NewError(UnsafeVarErr, expr.Expr.Location, "expression is unsafe")) + } + } + + return +} + +func checkUnsafeBuiltins(unsafeBuiltinsMap map[string]struct{}, node interface{}) Errors { + errs := make(Errors, 0) + WalkExprs(node, func(x *Expr) bool { + if x.IsCall() { + operator := x.Operator().String() + if _, ok := unsafeBuiltinsMap[operator]; ok { + errs = append(errs, NewError(TypeErr, x.Loc(), "unsafe built-in function calls in expression: %v", operator)) + } + } + return false + }) + return errs +} + +func rewriteVarsInRef(vars ...map[Var]Var) varRewriter { + return func(node Ref) Ref { + i, _ := TransformVars(node, func(v Var) (Value, error) { + for _, m := range vars { + if u, ok := m[v]; ok { + return u, nil + } + } + return v, nil + }) + return i.(Ref) + } +} + +// NOTE(sr): This is duplicated with compile/compile.go; but moving it into another location +// would cause a circular dependency -- the refSet definition needs ast.Ref. If we make it +// public in the ast package, the compile package could take it from there, but it would also +// increase our public interface. Let's reconsider if we need it in a third place. +type refSet struct { + s []Ref +} + +func newRefSet(x ...Ref) *refSet { + result := &refSet{} + for i := range x { + result.AddPrefix(x[i]) + } + return result +} + +// ContainsPrefix returns true if r is prefixed by any of the existing refs in the set. +func (rs *refSet) ContainsPrefix(r Ref) bool { + return slices.ContainsFunc(rs.s, r.HasPrefix) +} + +// AddPrefix inserts r into the set if r is not prefixed by any existing +// refs in the set. If any existing refs are prefixed by r, those existing +// refs are removed. +func (rs *refSet) AddPrefix(r Ref) { + if rs.ContainsPrefix(r) { + return + } + cpy := []Ref{r} + for i := range rs.s { + if !rs.s[i].HasPrefix(r) { + cpy = append(cpy, rs.s[i]) + } + } + rs.s = cpy +} + +// Sorted returns a sorted slice of terms for refs in the set. +func (rs *refSet) Sorted() []*Term { + terms := make([]*Term, len(rs.s)) + for i := range rs.s { + terms[i] = NewTerm(rs.s[i]) + } + slices.SortFunc(terms, TermValueCompare) + return terms +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go new file mode 100644 index 000000000..7d81d45e6 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compilehelper.go @@ -0,0 +1,62 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// CompileModules takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModules(modules map[string]string) (*Compiler, error) { + return CompileModulesWithOpt(modules, CompileOpts{}) +} + +// CompileOpts defines a set of options for the compiler. +type CompileOpts struct { + EnablePrintStatements bool + ParserOptions ParserOptions +} + +// CompileModulesWithOpt takes a set of Rego modules represented as strings and +// compiles them for evaluation. The keys of the map are used as filenames. +func CompileModulesWithOpt(modules map[string]string, opts CompileOpts) (*Compiler, error) { + + parsed := make(map[string]*Module, len(modules)) + + for f, module := range modules { + var pm *Module + var err error + if pm, err = ParseModuleWithOpts(f, module, opts.ParserOptions); err != nil { + return nil, err + } + parsed[f] = pm + } + + compiler := NewCompiler(). + WithDefaultRegoVersion(opts.ParserOptions.RegoVersion). + WithEnablePrintStatements(opts.EnablePrintStatements) + compiler.Compile(parsed) + + if compiler.Failed() { + return nil, compiler.Errors + } + + return compiler, nil +} + +// MustCompileModules compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModules(modules map[string]string) *Compiler { + return MustCompileModulesWithOpts(modules, CompileOpts{}) +} + +// MustCompileModulesWithOpts compiles a set of Rego modules represented as strings. If +// the compilation process fails, this function panics. +func MustCompileModulesWithOpts(modules map[string]string, opts CompileOpts) *Compiler { + + compiler, err := CompileModulesWithOpt(modules, opts) + if err != nil { + panic(err) + } + + return compiler +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/compilemetrics.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/compilemetrics.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go new file mode 100644 index 000000000..685cc6b69 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/conflicts.go @@ -0,0 +1,79 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "slices" + "strings" +) + +// CheckPathConflicts returns a set of errors indicating paths that +// are in conflict with the result of the provided callable. +func CheckPathConflicts(c *Compiler, exists func([]string) (bool, error)) Errors { + var errs Errors + + root := c.RuleTree.Child(DefaultRootDocument.Value) + if root == nil { + return nil + } + + if len(c.pathConflictCheckRoots) == 0 || slices.Contains(c.pathConflictCheckRoots, "") { + for _, child := range root.Children { + errs = append(errs, checkDocumentConflicts(child, exists, nil)...) + } + return errs + } + + for _, rootPath := range c.pathConflictCheckRoots { + // traverse AST from `path` to go to the new root + paths := strings.Split(rootPath, "/") + node := root + for _, key := range paths { + node = node.Child(String(key)) + if node == nil { + break + } + } + + if node == nil { + // could not find the node from the AST (e.g. `path` is from a data file) + // then no conflict is possible + continue + } + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, paths)...) + } + } + + return errs +} + +func checkDocumentConflicts(node *TreeNode, exists func([]string) (bool, error), path []string) Errors { + + switch key := node.Key.(type) { + case String: + path = append(path, string(key)) + default: // other key types cannot conflict with data + return nil + } + + if len(node.Values) > 0 { + s := strings.Join(path, "/") + if ok, err := exists(path); err != nil { + return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflict check for data path %v: %v", s, err.Error())} + } else if ok { + return Errors{NewError(CompileErr, node.Values[0].(*Rule).Loc(), "conflicting rule for data path %v found", s)} + } + } + + var errs Errors + + for _, child := range node.Children { + errs = append(errs, checkDocumentConflicts(child, exists, path)...) + } + + return errs +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go new file mode 100644 index 000000000..62b04e301 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/doc.go @@ -0,0 +1,36 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package ast declares Rego syntax tree types and also includes a parser and compiler for preparing policies for execution in the policy engine. +// +// Rego policies are defined using a relatively small set of types: modules, package and import declarations, rules, expressions, and terms. At their core, policies consist of rules that are defined by one or more expressions over documents available to the policy engine. The expressions are defined by intrinsic values (terms) such as strings, objects, variables, etc. +// +// Rego policies are typically defined in text files and then parsed and compiled by the policy engine at runtime. The parsing stage takes the text or string representation of the policy and converts it into an abstract syntax tree (AST) that consists of the types mentioned above. The AST is organized as follows: +// +// Module +// | +// +--- Package (Reference) +// | +// +--- Imports +// | | +// | +--- Import (Term) +// | +// +--- Rules +// | +// +--- Rule +// | +// +--- Head +// | | +// | +--- Name (Variable) +// | | +// | +--- Key (Term) +// | | +// | +--- Value (Term) +// | +// +--- Body +// | +// +--- Expression (Term | Terms | Variable Declaration) +// +// At query time, the policy engine expects policies to have been compiled. The compilation stage takes one or more modules and compiles them into a format that the policy engine supports. +package ast diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/env.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/env.go new file mode 100644 index 000000000..fb374b173 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/env.go @@ -0,0 +1,526 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// TypeEnv contains type info for static analysis such as type checking. +type TypeEnv struct { + tree *typeTreeNode + next *TypeEnv + newChecker func() *typeChecker +} + +// newTypeEnv returns an empty TypeEnv. The constructor is not exported because +// type environments should only be created by the type checker. +func newTypeEnv(f func() *typeChecker) *TypeEnv { + return &TypeEnv{ + tree: newTypeTree(), + newChecker: f, + } +} + +// Get returns the type of x. +func (env *TypeEnv) Get(x interface{}) types.Type { + + if term, ok := x.(*Term); ok { + x = term.Value + } + + switch x := x.(type) { + + // Scalars. + case Null: + return types.NewNull() + case Boolean: + return types.NewBoolean() + case Number: + return types.NewNumber() + case String: + return types.NewString() + + // Composites. + case *Array: + static := make([]types.Type, x.Len()) + for i := range static { + tpe := env.Get(x.Elem(i).Value) + static[i] = tpe + } + + var dynamic types.Type + if len(static) == 0 { + dynamic = types.A + } + + return types.NewArray(static, dynamic) + + case *lazyObj: + return env.Get(x.force()) + case *object: + static := []*types.StaticProperty{} + var dynamic *types.DynamicProperty + + x.Foreach(func(k, v *Term) { + if IsConstant(k.Value) { + kjson, err := JSON(k.Value) + if err == nil { + tpe := env.Get(v) + static = append(static, types.NewStaticProperty(kjson, tpe)) + return + } + } + // Can't handle it as a static property, fallback to dynamic + typeK := env.Get(k.Value) + typeV := env.Get(v.Value) + dynamic = types.NewDynamicProperty(typeK, typeV) + }) + + if len(static) == 0 && dynamic == nil { + dynamic = types.NewDynamicProperty(types.A, types.A) + } + + return types.NewObject(static, dynamic) + + case Set: + var tpe types.Type + x.Foreach(func(elem *Term) { + other := env.Get(elem.Value) + tpe = types.Or(tpe, other) + }) + if tpe == nil { + tpe = types.A + } + return types.NewSet(tpe) + + // Comprehensions. + case *ArrayComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewArray(nil, cpy.Get(x.Term)) + } + return nil + case *ObjectComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewObject(nil, types.NewDynamicProperty(cpy.Get(x.Key), cpy.Get(x.Value))) + } + return nil + case *SetComprehension: + cpy, errs := env.newChecker().CheckBody(env, x.Body) + if len(errs) == 0 { + return types.NewSet(cpy.Get(x.Term)) + } + return nil + + // Refs. + case Ref: + return env.getRef(x) + + // Vars. + case Var: + if node := env.tree.Child(x); node != nil { + return node.Value() + } + if env.next != nil { + return env.next.Get(x) + } + return nil + + // Calls. + case Call: + return nil + + default: + panic("unreachable") + } +} + +func (env *TypeEnv) getRef(ref Ref) types.Type { + + node := env.tree.Child(ref[0].Value) + if node == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(node, ref, ref[1:]) +} + +func (env *TypeEnv) getRefFallback(ref Ref) types.Type { + + if env.next != nil { + return env.next.Get(ref) + } + + if RootDocumentNames.Contains(ref[0]) { + return types.A + } + + return nil +} + +func (env *TypeEnv) getRefRec(node *typeTreeNode, ref, tail Ref) types.Type { + if len(tail) == 0 { + return env.getRefRecExtent(node) + } + + if node.Leaf() { + if node.children.Len() > 0 { + if child := node.Child(tail[0].Value); child != nil { + return env.getRefRec(child, ref, tail[1:]) + } + } + return selectRef(node.Value(), tail) + } + + if !IsConstant(tail[0].Value) { + return selectRef(env.getRefRecExtent(node), tail) + } + + child := node.Child(tail[0].Value) + if child == nil { + return env.getRefFallback(ref) + } + + return env.getRefRec(child, ref, tail[1:]) +} + +func (env *TypeEnv) getRefRecExtent(node *typeTreeNode) types.Type { + + if node.Leaf() { + return node.Value() + } + + children := []*types.StaticProperty{} + + node.Children().Iter(func(k, v util.T) bool { + key := k.(Value) + child := v.(*typeTreeNode) + + tpe := env.getRefRecExtent(child) + + // NOTE(sr): Converting to Golang-native types here is an extension of what we did + // before -- only supporting strings. But since we cannot differentiate sets and arrays + // that way, we could reconsider. + switch key.(type) { + case String, Number, Boolean: // skip anything else + propKey, err := JSON(key) + if err != nil { + panic(fmt.Errorf("unreachable, ValueToInterface: %w", err)) + } + children = append(children, types.NewStaticProperty(propKey, tpe)) + } + return false + }) + + // TODO(tsandall): for now, these objects can have any dynamic properties + // because we don't have schema for base docs. Once schemas are supported + // we can improve this. + return types.NewObject(children, types.NewDynamicProperty(types.S, types.A)) +} + +func (env *TypeEnv) wrap() *TypeEnv { + cpy := *env + cpy.next = env + cpy.tree = newTypeTree() + return &cpy +} + +// typeTreeNode is used to store type information in a tree. +type typeTreeNode struct { + key Value + value types.Type + children *util.HashMap +} + +func newTypeTree() *typeTreeNode { + return &typeTreeNode{ + key: nil, + value: nil, + children: util.NewHashMap(valueEq, valueHash), + } +} + +func (n *typeTreeNode) Child(key Value) *typeTreeNode { + value, ok := n.children.Get(key) + if !ok { + return nil + } + return value.(*typeTreeNode) +} + +func (n *typeTreeNode) Children() *util.HashMap { + return n.children +} + +func (n *typeTreeNode) Get(path Ref) types.Type { + curr := n + for _, term := range path { + child, ok := curr.children.Get(term.Value) + if !ok { + return nil + } + curr = child.(*typeTreeNode) + } + return curr.Value() +} + +func (n *typeTreeNode) Leaf() bool { + return n.value != nil +} + +func (n *typeTreeNode) PutOne(key Value, tpe types.Type) { + c, ok := n.children.Get(key) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = key + n.children.Put(key, child) + } else { + child = c.(*typeTreeNode) + } + + child.value = tpe +} + +func (n *typeTreeNode) Put(path Ref, tpe types.Type) { + curr := n + for _, term := range path { + c, ok := curr.children.Get(term.Value) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else { + child = c.(*typeTreeNode) + } + + curr = child + } + curr.value = tpe +} + +// Insert inserts tpe at path in the tree, but also merges the value into any types.Object present along that path. +// If a types.Object is inserted, any leafs already present further down the tree are merged into the inserted object. +// path must be ground. +func (n *typeTreeNode) Insert(path Ref, tpe types.Type, env *TypeEnv) { + curr := n + for i, term := range path { + c, ok := curr.children.Get(term.Value) + + var child *typeTreeNode + if !ok { + child = newTypeTree() + child.key = term.Value + curr.children.Put(child.key, child) + } else { + child = c.(*typeTreeNode) + + if child.value != nil && i+1 < len(path) { + // If child has an object value, merge the new value into it. + if o, ok := child.value.(*types.Object); ok { + var err error + child.value, err = insertIntoObject(o, path[i+1:], tpe, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } + } + + curr = child + } + + curr.value = mergeTypes(curr.value, tpe) + + if _, ok := tpe.(*types.Object); ok && curr.children.Len() > 0 { + // merge all leafs into the inserted object + leafs := curr.Leafs() + for p, t := range leafs { + var err error + curr.value, err = insertIntoObject(curr.value.(*types.Object), *p, t, env) + if err != nil { + panic(fmt.Errorf("unreachable, insertIntoObject: %w", err)) + } + } + } +} + +// mergeTypes merges the types of 'a' and 'b'. If both are sets, their 'of' types are joined with an types.Or. +// If both are objects, the key types of their dynamic properties are joined with types.Or:s, and their value types +// are recursively merged (using mergeTypes). +// If 'a' and 'b' are both objects, and at least one of them have static properties, they are joined +// with an types.Or, instead of being merged. +// If 'a' is an Any containing an Object, and 'b' is an Object (or vice versa); AND both objects have no +// static properties, they are merged. +// If 'a' and 'b' are different types, they are joined with an types.Or. +func mergeTypes(a, b types.Type) types.Type { + if a == nil { + return b + } + + if b == nil { + return a + } + + switch a := a.(type) { + case *types.Object: + if bObj, ok := b.(*types.Object); ok && len(a.StaticProperties()) == 0 && len(bObj.StaticProperties()) == 0 { + if len(a.StaticProperties()) > 0 || len(bObj.StaticProperties()) > 0 { + return types.Or(a, bObj) + } + + aDynProps := a.DynamicProperties() + bDynProps := bObj.DynamicProperties() + dynProps := types.NewDynamicProperty( + types.Or(aDynProps.Key, bDynProps.Key), + mergeTypes(aDynProps.Value, bDynProps.Value)) + return types.NewObject(nil, dynProps) + } else if bAny, ok := b.(types.Any); ok && len(a.StaticProperties()) == 0 { + // If a is an object type with no static components ... + for _, t := range bAny { + if tObj, ok := t.(*types.Object); ok && len(tObj.StaticProperties()) == 0 { + // ... and b is a types.Any containing an object with no static components, we merge them. + aDynProps := a.DynamicProperties() + tDynProps := tObj.DynamicProperties() + tDynProps.Key = types.Or(tDynProps.Key, aDynProps.Key) + tDynProps.Value = types.Or(tDynProps.Value, aDynProps.Value) + return bAny + } + } + } + case *types.Set: + if bSet, ok := b.(*types.Set); ok { + return types.NewSet(types.Or(a.Of(), bSet.Of())) + } + case types.Any: + if _, ok := b.(types.Any); !ok { + return mergeTypes(b, a) + } + } + + return types.Or(a, b) +} + +func (n *typeTreeNode) String() string { + b := strings.Builder{} + + if k := n.key; k != nil { + b.WriteString(k.String()) + } else { + b.WriteString("-") + } + + if v := n.value; v != nil { + b.WriteString(": ") + b.WriteString(v.String()) + } + + n.children.Iter(func(_, v util.T) bool { + if child, ok := v.(*typeTreeNode); ok { + b.WriteString("\n\t+ ") + s := child.String() + s = strings.ReplaceAll(s, "\n", "\n\t") + b.WriteString(s) + } + return false + }) + + return b.String() +} + +func insertIntoObject(o *types.Object, path Ref, tpe types.Type, env *TypeEnv) (*types.Object, error) { + if len(path) == 0 { + return o, nil + } + + key := env.Get(path[0].Value) + + if len(path) == 1 { + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, tpe)) + } else { + dynamicProps = types.NewDynamicProperty(key, tpe) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil + } + + child, err := insertIntoObject(types.NewObject(nil, nil), path[1:], tpe, env) + if err != nil { + return nil, err + } + + var dynamicProps *types.DynamicProperty + if dp := o.DynamicProperties(); dp != nil { + dynamicProps = types.NewDynamicProperty(types.Or(o.DynamicProperties().Key, key), types.Or(o.DynamicProperties().Value, child)) + } else { + dynamicProps = types.NewDynamicProperty(key, child) + } + return types.NewObject(o.StaticProperties(), dynamicProps), nil +} + +func (n *typeTreeNode) Leafs() map[*Ref]types.Type { + leafs := map[*Ref]types.Type{} + n.children.Iter(func(_, v util.T) bool { + collectLeafs(v.(*typeTreeNode), nil, leafs) + return false + }) + return leafs +} + +func collectLeafs(n *typeTreeNode, path Ref, leafs map[*Ref]types.Type) { + nPath := append(path, NewTerm(n.key)) + if n.Leaf() { + leafs[&nPath] = n.Value() + return + } + n.children.Iter(func(_, v util.T) bool { + collectLeafs(v.(*typeTreeNode), nPath, leafs) + return false + }) +} + +func (n *typeTreeNode) Value() types.Type { + return n.value +} + +// selectConstant returns the attribute of the type referred to by the term. If +// the attribute type cannot be determined, nil is returned. +func selectConstant(tpe types.Type, term *Term) types.Type { + x, err := JSON(term.Value) + if err == nil { + return types.Select(tpe, x) + } + return nil +} + +// selectRef returns the type of the nested attribute referred to by ref. If +// the attribute type cannot be determined, nil is returned. If the ref +// contains vars or refs, then the returned type will be a union of the +// possible types. +func selectRef(tpe types.Type, ref Ref) types.Type { + + if tpe == nil || len(ref) == 0 { + return tpe + } + + head, tail := ref[0], ref[1:] + + switch head.Value.(type) { + case Var, Ref, *Array, Object, Set: + return selectRef(types.Values(tpe), tail) + default: + return selectRef(selectConstant(tpe, head), tail) + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go new file mode 100644 index 000000000..066dfcdd6 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/errors.go @@ -0,0 +1,123 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" +) + +// Errors represents a series of errors encountered during parsing, compiling, +// etc. +type Errors []*Error + +func (e Errors) Error() string { + + if len(e) == 0 { + return "no error(s)" + } + + if len(e) == 1 { + return fmt.Sprintf("1 error occurred: %v", e[0].Error()) + } + + s := make([]string, len(e)) + for i, err := range e { + s[i] = err.Error() + } + + return fmt.Sprintf("%d errors occurred:\n%s", len(e), strings.Join(s, "\n")) +} + +// Sort sorts the error slice by location. If the locations are equal then the +// error message is compared. +func (e Errors) Sort() { + sort.Slice(e, func(i, j int) bool { + a := e[i] + b := e[j] + + if cmp := a.Location.Compare(b.Location); cmp != 0 { + return cmp < 0 + } + + return a.Error() < b.Error() + }) +} + +const ( + // ParseErr indicates an unclassified parse error occurred. + ParseErr = "rego_parse_error" + + // CompileErr indicates an unclassified compile error occurred. + CompileErr = "rego_compile_error" + + // TypeErr indicates a type error was caught. + TypeErr = "rego_type_error" + + // UnsafeVarErr indicates an unsafe variable was found during compilation. + UnsafeVarErr = "rego_unsafe_var_error" + + // RecursionErr indicates recursion was found during compilation. + RecursionErr = "rego_recursion_error" +) + +// IsError returns true if err is an AST error with code. +func IsError(code string, err error) bool { + if err, ok := err.(*Error); ok { + return err.Code == code + } + return false +} + +// ErrorDetails defines the interface for detailed error messages. +type ErrorDetails interface { + Lines() []string +} + +// Error represents a single error caught during parsing, compiling, etc. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Location *Location `json:"location,omitempty"` + Details ErrorDetails `json:"details,omitempty"` +} + +func (e *Error) Error() string { + + var prefix string + + if e.Location != nil { + + if len(e.Location.File) > 0 { + prefix += e.Location.File + ":" + fmt.Sprint(e.Location.Row) + } else { + prefix += fmt.Sprint(e.Location.Row) + ":" + fmt.Sprint(e.Location.Col) + } + } + + msg := fmt.Sprintf("%v: %v", e.Code, e.Message) + + if len(prefix) > 0 { + msg = prefix + ": " + msg + } + + if e.Details != nil { + for _, line := range e.Details.Lines() { + msg += "\n\t" + line + } + } + + return msg +} + +// NewError returns a new Error object. +func NewError(code string, loc *Location, f string, a ...interface{}) *Error { + return &Error{ + Code: code, + Location: loc, + Message: fmt.Sprintf(f, a...), + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/index.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/index.go new file mode 100644 index 000000000..63cd480d1 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/index.go @@ -0,0 +1,932 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/util" +) + +// RuleIndex defines the interface for rule indices. +type RuleIndex interface { + + // Build tries to construct an index for the given rules. If the index was + // constructed, it returns true, otherwise false. + Build(rules []*Rule) bool + + // Lookup searches the index for rules that will match the provided + // resolver. If the resolver returns an error, it is returned via err. + Lookup(resolver ValueResolver) (*IndexResult, error) + + // AllRules traverses the index and returns all rules that will match + // the provided resolver without any optimizations (effectively with + // indexing disabled). If the resolver returns an error, it is returned + // via err. + AllRules(resolver ValueResolver) (*IndexResult, error) +} + +// IndexResult contains the result of an index lookup. +type IndexResult struct { + Kind RuleKind + Rules []*Rule + Else map[*Rule][]*Rule + Default *Rule + EarlyExit bool + OnlyGroundRefs bool +} + +// NewIndexResult returns a new IndexResult object. +func NewIndexResult(kind RuleKind) *IndexResult { + return &IndexResult{ + Kind: kind, + Else: map[*Rule][]*Rule{}, + } +} + +// Empty returns true if there are no rules to evaluate. +func (ir *IndexResult) Empty() bool { + return len(ir.Rules) == 0 && ir.Default == nil +} + +type baseDocEqIndex struct { + skipIndexing Set + isVirtual func(Ref) bool + root *trieNode + defaultRule *Rule + kind RuleKind + onlyGroundRefs bool +} + +var ( + equalityRef = Equality.Ref() + equalRef = Equal.Ref() + globMatchRef = GlobMatch.Ref() + internalPrintRef = InternalPrint.Ref() +) + +func newBaseDocEqIndex(isVirtual func(Ref) bool) *baseDocEqIndex { + return &baseDocEqIndex{ + skipIndexing: NewSet(NewTerm(internalPrintRef)), + isVirtual: isVirtual, + root: newTrieNodeImpl(), + onlyGroundRefs: true, + } +} + +func (i *baseDocEqIndex) Build(rules []*Rule) bool { + if len(rules) == 0 { + return false + } + + i.kind = rules[0].Head.RuleKind() + indices := newrefindices(i.isVirtual) + + // build indices for each rule. + for idx := range rules { + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + i.defaultRule = rule + return false + } + if i.onlyGroundRefs { + i.onlyGroundRefs = rule.Head.Reference.IsGround() + } + var skip bool + for _, expr := range rule.Body { + if op := expr.OperatorTerm(); op != nil && i.skipIndexing.Contains(op) { + skip = true + break + } + } + if !skip { + for _, expr := range rule.Body { + indices.Update(rule, expr) + } + } + return false + }) + } + + // build trie out of indices. + for idx := range rules { + var prio int + WalkRules(rules[idx], func(rule *Rule) bool { + if rule.Default { + return false + } + node := i.root + if indices.Indexed(rule) { + for _, ref := range indices.Sorted() { + node = node.Insert(ref, indices.Value(rule, ref), indices.Mapper(rule, ref)) + } + } + // Insert rule into trie with (insertion order, priority order) + // tuple. Retaining the insertion order allows us to return rules + // in the order they were passed to this function. + node.append([...]int{idx, prio}, rule) + prio++ + return false + }) + } + return true +} + +func (i *baseDocEqIndex) Lookup(resolver ValueResolver) (*IndexResult, error) { + tr := ttrPool.Get().(*trieTraversalResult) + + defer func() { + clear(tr.unordered) + tr.ordering = tr.ordering[:0] + tr.values.clear() + + ttrPool.Put(tr) + }() + + err := i.root.Traverse(resolver, tr) + if err != nil { + return nil, err + } + + result := NewIndexResult(i.kind) + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + result.Rules = make([]*Rule, 0, len(tr.ordering)) + + for _, pos := range tr.ordering { + sort.Slice(tr.unordered[pos], func(i, j int) bool { + return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() + + return result, nil +} + +func (i *baseDocEqIndex) AllRules(_ ValueResolver) (*IndexResult, error) { + tr := newTrieTraversalResult() + + // Walk over the rule trie and accumulate _all_ rules + rw := &ruleWalker{result: tr} + i.root.Do(rw) + + result := NewIndexResult(i.kind) + result.Default = i.defaultRule + result.OnlyGroundRefs = i.onlyGroundRefs + result.Rules = make([]*Rule, 0, len(tr.ordering)) + + for _, pos := range tr.ordering { + sort.Slice(tr.unordered[pos], func(i, j int) bool { + return tr.unordered[pos][i].prio[1] < tr.unordered[pos][j].prio[1] + }) + nodes := tr.unordered[pos] + root := nodes[0].rule + result.Rules = append(result.Rules, root) + if len(nodes) > 1 { + result.Else[root] = make([]*Rule, len(nodes)-1) + for i := 1; i < len(nodes); i++ { + result.Else[root][i-1] = nodes[i].rule + } + } + } + + result.EarlyExit = tr.values.Len() == 1 && tr.values.Slice()[0].IsGround() + + return result, nil +} + +type ruleWalker struct { + result *trieTraversalResult +} + +func (r *ruleWalker) Do(x interface{}) trieWalker { + tn := x.(*trieNode) + r.result.Add(tn) + return r +} + +type valueMapper struct { + Key string + MapValue func(Value) Value +} + +type refindex struct { + Ref Ref + Value Value + Mapper *valueMapper +} + +type refindices struct { + isVirtual func(Ref) bool + rules map[*Rule][]*refindex + frequency *util.HashMap + sorted []Ref +} + +func newrefindices(isVirtual func(Ref) bool) *refindices { + return &refindices{ + isVirtual: isVirtual, + rules: map[*Rule][]*refindex{}, + frequency: util.NewHashMap(func(a, b util.T) bool { + r1, r2 := a.(Ref), b.(Ref) + return r1.Equal(r2) + }, func(x util.T) int { + return x.(Ref).Hash() + }), + } +} + +// Update attempts to update the refindices for the given expression in the +// given rule. If the expression cannot be indexed the update does not affect +// the indices. +func (i *refindices) Update(rule *Rule, expr *Expr) { + + if expr.Negated { + return + } + + if len(expr.With) > 0 { + // NOTE(tsandall): In the future, we may need to consider expressions + // that have with statements applied to them. + return + } + + op := expr.Operator() + + switch { + case op.Equal(equalityRef): + i.updateEq(rule, expr) + + case op.Equal(equalRef) && len(expr.Operands()) == 2: + // NOTE(tsandall): if equal() is called with more than two arguments the + // output value is being captured in which case the indexer cannot + // exclude the rule if the equal() call would return false (because the + // false value must still be produced.) + i.updateEq(rule, expr) + + case op.Equal(globMatchRef) && len(expr.Operands()) == 3: + // NOTE(sr): Same as with equal() above -- 4 operands means the output + // of `glob.match` is captured and the rule can thus not be excluded. + i.updateGlobMatch(rule, expr) + } +} + +// Sorted returns a sorted list of references that the indices were built from. +// References that appear more frequently in the indexed rules are ordered +// before less frequently appearing references. +func (i *refindices) Sorted() []Ref { + + if i.sorted == nil { + counts := make([]int, 0, i.frequency.Len()) + i.sorted = make([]Ref, 0, i.frequency.Len()) + + i.frequency.Iter(func(k, v util.T) bool { + counts = append(counts, v.(int)) + i.sorted = append(i.sorted, k.(Ref)) + return false + }) + + sort.Slice(i.sorted, func(a, b int) bool { + if counts[a] > counts[b] { + return true + } else if counts[b] > counts[a] { + return false + } + return i.sorted[a][0].Loc().Compare(i.sorted[b][0].Loc()) < 0 + }) + } + + return i.sorted +} + +func (i *refindices) Indexed(rule *Rule) bool { + return len(i.rules[rule]) > 0 +} + +func (i *refindices) Value(rule *Rule, ref Ref) Value { + if index := i.index(rule, ref); index != nil { + return index.Value + } + return nil +} + +func (i *refindices) Mapper(rule *Rule, ref Ref) *valueMapper { + if index := i.index(rule, ref); index != nil { + return index.Mapper + } + return nil +} + +func (i *refindices) updateEq(rule *Rule, expr *Expr) { + a, b := expr.Operand(0), expr.Operand(1) + args := rule.Head.Args + if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, a, b); ok { + i.insert(rule, idx) + return + } + if idx, ok := eqOperandsToRefAndValue(i.isVirtual, args, b, a); ok { + i.insert(rule, idx) + return + } +} + +func (i *refindices) updateGlobMatch(rule *Rule, expr *Expr) { + args := rule.Head.Args + + delim, ok := globDelimiterToString(expr.Operand(1)) + if !ok { + return + } + + if arr := globPatternToArray(expr.Operand(0), delim); arr != nil { + // The 3rd operand of glob.match is the value to match. We assume the + // 3rd operand was a reference that has been rewritten and bound to a + // variable earlier in the query OR a function argument variable. + match := expr.Operand(2) + if _, ok := match.Value.(Var); ok { + var ref Ref + for _, other := range i.rules[rule] { + if _, ok := other.Value.(Var); ok && other.Value.Compare(match.Value) == 0 { + ref = other.Ref + } + } + if ref == nil { + for j, arg := range args { + if arg.Equal(match) { + ref = Ref{FunctionArgRootDocument, InternedIntNumberTerm(j)} + } + } + } + if ref != nil { + i.insert(rule, &refindex{ + Ref: ref, + Value: arr.Value, + Mapper: &valueMapper{ + Key: delim, + MapValue: func(v Value) Value { + if s, ok := v.(String); ok { + return stringSliceToArray(splitStringEscaped(string(s), delim)) + } + return v + }, + }, + }) + } + } + } +} + +func (i *refindices) insert(rule *Rule, index *refindex) { + + count, ok := i.frequency.Get(index.Ref) + if !ok { + count = 0 + } + + i.frequency.Put(index.Ref, count.(int)+1) + + for pos, other := range i.rules[rule] { + if other.Ref.Equal(index.Ref) { + i.rules[rule][pos] = index + return + } + } + + i.rules[rule] = append(i.rules[rule], index) +} + +func (i *refindices) index(rule *Rule, ref Ref) *refindex { + for _, index := range i.rules[rule] { + if index.Ref.Equal(ref) { + return index + } + } + return nil +} + +type trieWalker interface { + Do(x interface{}) trieWalker +} + +type trieTraversalResult struct { + unordered map[int][]*ruleNode + ordering []int + values *set +} + +var ttrPool = sync.Pool{ + New: func() any { + return newTrieTraversalResult() + }, +} + +func newTrieTraversalResult() *trieTraversalResult { + return &trieTraversalResult{ + unordered: map[int][]*ruleNode{}, + // Number 3 is arbitrary, but seemed to be the most common number of values + // stored when benchmarking the trie traversal against a large policy library + // (Regal). + values: newset(3), + } +} + +func (tr *trieTraversalResult) Add(t *trieNode) { + for _, node := range t.rules { + root := node.prio[0] + nodes, ok := tr.unordered[root] + if !ok { + tr.ordering = append(tr.ordering, root) + } + tr.unordered[root] = append(nodes, node) + } + if t.values != nil { + t.values.Foreach(tr.values.insertNoGuard) + } +} + +type trieNode struct { + ref Ref + values Set + mappers []*valueMapper + next *trieNode + any *trieNode + undefined *trieNode + scalars *util.HashMap + array *trieNode + rules []*ruleNode +} + +func (node *trieNode) String() string { + var flags []string + flags = append(flags, fmt.Sprintf("self:%p", node)) + if len(node.ref) > 0 { + flags = append(flags, node.ref.String()) + } + if node.next != nil { + flags = append(flags, fmt.Sprintf("next:%p", node.next)) + } + if node.any != nil { + flags = append(flags, fmt.Sprintf("any:%p", node.any)) + } + if node.undefined != nil { + flags = append(flags, fmt.Sprintf("undefined:%p", node.undefined)) + } + if node.array != nil { + flags = append(flags, fmt.Sprintf("array:%p", node.array)) + } + if node.scalars.Len() > 0 { + buf := make([]string, 0, node.scalars.Len()) + node.scalars.Iter(func(k, v util.T) bool { + key := k.(Value) + val := v.(*trieNode) + buf = append(buf, fmt.Sprintf("scalar(%v):%p", key, val)) + return false + }) + sort.Strings(buf) + flags = append(flags, strings.Join(buf, " ")) + } + if len(node.rules) > 0 { + flags = append(flags, fmt.Sprintf("%d rule(s)", len(node.rules))) + } + if len(node.mappers) > 0 { + flags = append(flags, fmt.Sprintf("%d mapper(s)", len(node.mappers))) + } + if node.values != nil { + if l := node.values.Len(); l > 0 { + flags = append(flags, fmt.Sprintf("%d value(s)", l)) + } + } + return strings.Join(flags, " ") +} + +func (node *trieNode) append(prio [2]int, rule *Rule) { + node.rules = append(node.rules, &ruleNode{prio, rule}) + + if node.values != nil && rule.Head.Value != nil { + node.values.Add(rule.Head.Value) + return + } + + if node.values == nil && rule.Head.DocKind() == CompleteDoc { + node.values = NewSet(rule.Head.Value) + } +} + +type ruleNode struct { + prio [2]int + rule *Rule +} + +func newTrieNodeImpl() *trieNode { + return &trieNode{ + scalars: util.NewHashMap(valueEq, valueHash), + } +} + +func (node *trieNode) Do(walker trieWalker) { + next := walker.Do(node) + if next == nil { + return + } + if node.any != nil { + node.any.Do(next) + } + if node.undefined != nil { + node.undefined.Do(next) + } + + node.scalars.Iter(func(_, v util.T) bool { + child := v.(*trieNode) + child.Do(next) + return false + }) + + if node.array != nil { + node.array.Do(next) + } + if node.next != nil { + node.next.Do(next) + } +} + +func (node *trieNode) Insert(ref Ref, value Value, mapper *valueMapper) *trieNode { + + if node.next == nil { + node.next = newTrieNodeImpl() + node.next.ref = ref + } + + if mapper != nil { + node.next.addMapper(mapper) + } + + return node.next.insertValue(value) +} + +func (node *trieNode) Traverse(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + tr.Add(node) + + return node.next.traverse(resolver, tr) +} + +func (node *trieNode) addMapper(mapper *valueMapper) { + for i := range node.mappers { + if node.mappers[i].Key == mapper.Key { + return + } + } + node.mappers = append(node.mappers, mapper) +} + +func (node *trieNode) insertValue(value Value) *trieNode { + + switch value := value.(type) { + case nil: + if node.undefined == nil { + node.undefined = newTrieNodeImpl() + } + return node.undefined + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(value, child) + } + return child.(*trieNode) + case *Array: + if node.array == nil { + node.array = newTrieNodeImpl() + } + return node.array.insertArray(value) + } + + panic("illegal value") +} + +func (node *trieNode) insertArray(arr *Array) *trieNode { + + if arr.Len() == 0 { + return node + } + + switch head := arr.Elem(0).Value.(type) { + case Var: + if node.any == nil { + node.any = newTrieNodeImpl() + } + return node.any.insertArray(arr.Slice(1, -1)) + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(head) + if !ok { + child = newTrieNodeImpl() + node.scalars.Put(head, child) + } + return child.(*trieNode).insertArray(arr.Slice(1, -1)) + } + + panic("illegal value") +} + +func (node *trieNode) traverse(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + v, err := resolver.Resolve(node.ref) + if err != nil { + if IsUnknownValueErr(err) { + return node.traverseUnknown(resolver, tr) + } + return err + } + + if node.undefined != nil { + err = node.undefined.Traverse(resolver, tr) + if err != nil { + return err + } + } + + if v == nil { + return nil + } + + if node.any != nil { + err = node.any.Traverse(resolver, tr) + if err != nil { + return err + } + } + + if err := node.traverseValue(resolver, tr, v); err != nil { + return err + } + + for i := range node.mappers { + if err := node.traverseValue(resolver, tr, node.mappers[i].MapValue(v)); err != nil { + return err + } + } + + return nil +} + +func (node *trieNode) traverseValue(resolver ValueResolver, tr *trieTraversalResult, value Value) error { + + switch value := value.(type) { + case *Array: + if node.array == nil { + return nil + } + return node.array.traverseArray(resolver, tr, value) + + case Null, Boolean, Number, String: + child, ok := node.scalars.Get(value) + if !ok { + return nil + } + return child.(*trieNode).Traverse(resolver, tr) + } + + return nil +} + +func (node *trieNode) traverseArray(resolver ValueResolver, tr *trieTraversalResult, arr *Array) error { + + if arr.Len() == 0 { + return node.Traverse(resolver, tr) + } + + if node.any != nil { + err := node.any.traverseArray(resolver, tr, arr.Slice(1, -1)) + if err != nil { + return err + } + } + + head := arr.Elem(0).Value + + if !IsScalar(head) { + return nil + } + + child, ok := node.scalars.Get(head) + if !ok { + return nil + } + return child.(*trieNode).traverseArray(resolver, tr, arr.Slice(1, -1)) +} + +func (node *trieNode) traverseUnknown(resolver ValueResolver, tr *trieTraversalResult) error { + + if node == nil { + return nil + } + + if err := node.Traverse(resolver, tr); err != nil { + return err + } + + if err := node.undefined.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.any.traverseUnknown(resolver, tr); err != nil { + return err + } + + if err := node.array.traverseUnknown(resolver, tr); err != nil { + return err + } + + var iterErr error + node.scalars.Iter(func(_, v util.T) bool { + child := v.(*trieNode) + if iterErr = child.traverseUnknown(resolver, tr); iterErr != nil { + return true + } + return false + }) + + return iterErr +} + +// If term `a` is one of the function's operands, we store a Ref: `args[0]` +// for the argument number. So for `f(x, y) { x = 10; y = 12 }`, we'll +// bind `args[0]` and `args[1]` to this rule when called for (x=10) and +// (y=12) respectively. +func eqOperandsToRefAndValue(isVirtual func(Ref) bool, args []*Term, a, b *Term) (*refindex, bool) { + switch v := a.Value.(type) { + case Var: + for i, arg := range args { + if arg.Value.Compare(v) == 0 { + if bval, ok := indexValue(b); ok { + return &refindex{Ref: Ref{FunctionArgRootDocument, InternedIntNumberTerm(i)}, Value: bval}, true + } + } + } + case Ref: + if !RootDocumentNames.Contains(v[0]) { + return nil, false + } + if isVirtual(v) { + return nil, false + } + if v.IsNested() || !v.IsGround() { + return nil, false + } + if bval, ok := indexValue(b); ok { + return &refindex{Ref: v, Value: bval}, true + } + } + return nil, false +} + +func indexValue(b *Term) (Value, bool) { + switch b := b.Value.(type) { + case Null, Boolean, Number, String, Var: + return b, true + case *Array: + stop := false + first := true + vis := NewGenericVisitor(func(x interface{}) bool { + if first { + first = false + return false + } + switch x.(type) { + // No nested structures or values that require evaluation (other than var). + case *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Ref: + stop = true + } + return stop + }) + vis.Walk(b) + if !stop { + return b, true + } + } + + return nil, false +} + +func globDelimiterToString(delim *Term) (string, bool) { + + arr, ok := delim.Value.(*Array) + if !ok { + return "", false + } + + var result string + + if arr.Len() == 0 { + result = "." + } else { + for i := 0; i < arr.Len(); i++ { + term := arr.Elem(i) + s, ok := term.Value.(String) + if !ok { + return "", false + } + result += string(s) + } + } + + return result, true +} + +func globPatternToArray(pattern *Term, delim string) *Term { + + s, ok := pattern.Value.(String) + if !ok { + return nil + } + + parts := splitStringEscaped(string(s), delim) + arr := make([]*Term, len(parts)) + + for i := range parts { + if parts[i] == "*" { + arr[i] = VarTerm("$globwildcard") + } else { + var escaped bool + for _, c := range parts[i] { + if c == '\\' { + escaped = !escaped + continue + } + if !escaped { + switch c { + case '[', '?', '{', '*': + // TODO(tsandall): super glob and character pattern + // matching not supported yet. + return nil + } + } + escaped = false + } + arr[i] = StringTerm(parts[i]) + } + } + + return NewTerm(NewArray(arr...)) +} + +// splits s on characters in delim except if delim characters have been escaped +// with reverse solidus. +func splitStringEscaped(s string, delim string) []string { + + var last, curr int + var escaped bool + var result []string + + for ; curr < len(s); curr++ { + if s[curr] == '\\' || escaped { + escaped = !escaped + continue + } + if strings.ContainsRune(delim, rune(s[curr])) { + result = append(result, s[last:curr]) + last = curr + 1 + } + } + + result = append(result, s[last:]) + + return result +} + +func stringSliceToArray(s []string) *Array { + arr := make([]*Term, len(s)) + for i, v := range s { + arr[i] = StringTerm(v) + } + return NewArray(arr...) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go index a0200ac18..4558f9141 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/internal/scanner/scanner.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/internal/scanner/scanner.go @@ -9,8 +9,9 @@ import ( "io" "unicode" "unicode/utf8" + "unsafe" - "github.com/open-policy-agent/opa/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" ) const bom = 0xFEFF @@ -18,31 +19,31 @@ const bom = 0xFEFF // Scanner is used to tokenize an input stream of // Rego source code. type Scanner struct { + keywords map[string]tokens.Token + bs []byte + errors []Error + tabs []int offset int row int col int - bs []byte - curr rune width int - errors []Error - keywords map[string]tokens.Token - tabs []int + curr rune regoV1Compatible bool } // Error represents a scanner error. type Error struct { - Pos Position Message string + Pos Position } // Position represents a point in the scanned source code. type Position struct { + Tabs []int // positions of any tabs preceding Col Offset int // start offset in bytes End int // end offset in bytes Row int // line number computed in bytes Col int // column number computed in bytes - Tabs []int // positions of any tabs preceding Col } // New returns an initialized scanner that will scan @@ -270,7 +271,8 @@ func (s *Scanner) scanIdentifier() string { for isLetter(s.curr) || isDigit(s.curr) { s.next() } - return string(s.bs[start : s.offset-1]) + + return byteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanNumber() string { @@ -321,7 +323,7 @@ func (s *Scanner) scanNumber() string { } } - return string(s.bs[start : s.offset-1]) + return byteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanString() string { @@ -355,7 +357,7 @@ func (s *Scanner) scanString() string { } } - return string(s.bs[start : s.offset-1]) + return byteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanRawString() string { @@ -370,7 +372,8 @@ func (s *Scanner) scanRawString() string { break } } - return string(s.bs[start : s.offset-1]) + + return byteSliceToString(s.bs[start : s.offset-1]) } func (s *Scanner) scanComment() string { @@ -383,7 +386,8 @@ func (s *Scanner) scanComment() string { if s.offset > 1 && s.bs[s.offset-2] == '\r' { end = end - 1 } - return string(s.bs[start:end]) + + return byteSliceToString(s.bs[start:end]) } func (s *Scanner) next() { @@ -413,7 +417,7 @@ func (s *Scanner) next() { if s.curr == '\n' { s.row++ s.col = 0 - s.tabs = []int{} + s.tabs = s.tabs[:0] } else { s.col++ if s.curr == '\t' { @@ -453,3 +457,7 @@ func (s *Scanner) error(reason string) { Col: s.col, }, Message: reason}) } + +func byteSliceToString(bs []byte) string { + return unsafe.String(unsafe.SliceData(bs), len(bs)) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/internal/tokens/tokens.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/internal/tokens/tokens.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go new file mode 100644 index 000000000..17b10231b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/interning.go @@ -0,0 +1,1098 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import "strconv" + +// NOTE! Great care must be taken **not** to modify the terms returned +// from these functions, as they are shared across all callers. + +var ( + booleanTrueTerm = &Term{Value: Boolean(true)} + booleanFalseTerm = &Term{Value: Boolean(false)} + + // since this is by far the most common negative number + minusOneTerm = &Term{Value: Number("-1")} + + InternedNullTerm = &Term{Value: Null{}} +) + +// InternedBooleanTerm returns an interned term with the given boolean value. +func InternedBooleanTerm(b bool) *Term { + if b { + return booleanTrueTerm + } + + return booleanFalseTerm +} + +// InternedIntNumberTerm returns a term with the given integer value. The term is +// cached between -1 to 512, and for values outside of that range, this function +// is equivalent to ast.IntNumberTerm. +func InternedIntNumberTerm(i int) *Term { + if i >= 0 && i < len(intNumberTerms) { + return intNumberTerms[i] + } + + if i == -1 { + return minusOneTerm + } + + return &Term{Value: Number(strconv.Itoa(i))} +} + +// InternedIntFromString returns a term with the given integer value if the string +// maps to an interned term. If the string does not map to an interned term, nil is +// returned. +func InternedIntNumberTermFromString(s string) *Term { + if term, ok := stringToIntNumberTermMap[s]; ok { + return term + } + + return nil +} + +// HasInternedIntNumberTerm returns true if the given integer value maps to an interned +// term, otherwise false. +func HasInternedIntNumberTerm(i int) bool { + return i >= -1 && i < len(intNumberTerms) +} + +var stringToIntNumberTermMap = map[string]*Term{ + "-1": minusOneTerm, + "0": intNumberTerms[0], + "1": intNumberTerms[1], + "2": intNumberTerms[2], + "3": intNumberTerms[3], + "4": intNumberTerms[4], + "5": intNumberTerms[5], + "6": intNumberTerms[6], + "7": intNumberTerms[7], + "8": intNumberTerms[8], + "9": intNumberTerms[9], + "10": intNumberTerms[10], + "11": intNumberTerms[11], + "12": intNumberTerms[12], + "13": intNumberTerms[13], + "14": intNumberTerms[14], + "15": intNumberTerms[15], + "16": intNumberTerms[16], + "17": intNumberTerms[17], + "18": intNumberTerms[18], + "19": intNumberTerms[19], + "20": intNumberTerms[20], + "21": intNumberTerms[21], + "22": intNumberTerms[22], + "23": intNumberTerms[23], + "24": intNumberTerms[24], + "25": intNumberTerms[25], + "26": intNumberTerms[26], + "27": intNumberTerms[27], + "28": intNumberTerms[28], + "29": intNumberTerms[29], + "30": intNumberTerms[30], + "31": intNumberTerms[31], + "32": intNumberTerms[32], + "33": intNumberTerms[33], + "34": intNumberTerms[34], + "35": intNumberTerms[35], + "36": intNumberTerms[36], + "37": intNumberTerms[37], + "38": intNumberTerms[38], + "39": intNumberTerms[39], + "40": intNumberTerms[40], + "41": intNumberTerms[41], + "42": intNumberTerms[42], + "43": intNumberTerms[43], + "44": intNumberTerms[44], + "45": intNumberTerms[45], + "46": intNumberTerms[46], + "47": intNumberTerms[47], + "48": intNumberTerms[48], + "49": intNumberTerms[49], + "50": intNumberTerms[50], + "51": intNumberTerms[51], + "52": intNumberTerms[52], + "53": intNumberTerms[53], + "54": intNumberTerms[54], + "55": intNumberTerms[55], + "56": intNumberTerms[56], + "57": intNumberTerms[57], + "58": intNumberTerms[58], + "59": intNumberTerms[59], + "60": intNumberTerms[60], + "61": intNumberTerms[61], + "62": intNumberTerms[62], + "63": intNumberTerms[63], + "64": intNumberTerms[64], + "65": intNumberTerms[65], + "66": intNumberTerms[66], + "67": intNumberTerms[67], + "68": intNumberTerms[68], + "69": intNumberTerms[69], + "70": intNumberTerms[70], + "71": intNumberTerms[71], + "72": intNumberTerms[72], + "73": intNumberTerms[73], + "74": intNumberTerms[74], + "75": intNumberTerms[75], + "76": intNumberTerms[76], + "77": intNumberTerms[77], + "78": intNumberTerms[78], + "79": intNumberTerms[79], + "80": intNumberTerms[80], + "81": intNumberTerms[81], + "82": intNumberTerms[82], + "83": intNumberTerms[83], + "84": intNumberTerms[84], + "85": intNumberTerms[85], + "86": intNumberTerms[86], + "87": intNumberTerms[87], + "88": intNumberTerms[88], + "89": intNumberTerms[89], + "90": intNumberTerms[90], + "91": intNumberTerms[91], + "92": intNumberTerms[92], + "93": intNumberTerms[93], + "94": intNumberTerms[94], + "95": intNumberTerms[95], + "96": intNumberTerms[96], + "97": intNumberTerms[97], + "98": intNumberTerms[98], + "99": intNumberTerms[99], + "100": intNumberTerms[100], + "101": intNumberTerms[101], + "102": intNumberTerms[102], + "103": intNumberTerms[103], + "104": intNumberTerms[104], + "105": intNumberTerms[105], + "106": intNumberTerms[106], + "107": intNumberTerms[107], + "108": intNumberTerms[108], + "109": intNumberTerms[109], + "110": intNumberTerms[110], + "111": intNumberTerms[111], + "112": intNumberTerms[112], + "113": intNumberTerms[113], + "114": intNumberTerms[114], + "115": intNumberTerms[115], + "116": intNumberTerms[116], + "117": intNumberTerms[117], + "118": intNumberTerms[118], + "119": intNumberTerms[119], + "120": intNumberTerms[120], + "121": intNumberTerms[121], + "122": intNumberTerms[122], + "123": intNumberTerms[123], + "124": intNumberTerms[124], + "125": intNumberTerms[125], + "126": intNumberTerms[126], + "127": intNumberTerms[127], + "128": intNumberTerms[128], + "129": intNumberTerms[129], + "130": intNumberTerms[130], + "131": intNumberTerms[131], + "132": intNumberTerms[132], + "133": intNumberTerms[133], + "134": intNumberTerms[134], + "135": intNumberTerms[135], + "136": intNumberTerms[136], + "137": intNumberTerms[137], + "138": intNumberTerms[138], + "139": intNumberTerms[139], + "140": intNumberTerms[140], + "141": intNumberTerms[141], + "142": intNumberTerms[142], + "143": intNumberTerms[143], + "144": intNumberTerms[144], + "145": intNumberTerms[145], + "146": intNumberTerms[146], + "147": intNumberTerms[147], + "148": intNumberTerms[148], + "149": intNumberTerms[149], + "150": intNumberTerms[150], + "151": intNumberTerms[151], + "152": intNumberTerms[152], + "153": intNumberTerms[153], + "154": intNumberTerms[154], + "155": intNumberTerms[155], + "156": intNumberTerms[156], + "157": intNumberTerms[157], + "158": intNumberTerms[158], + "159": intNumberTerms[159], + "160": intNumberTerms[160], + "161": intNumberTerms[161], + "162": intNumberTerms[162], + "163": intNumberTerms[163], + "164": intNumberTerms[164], + "165": intNumberTerms[165], + "166": intNumberTerms[166], + "167": intNumberTerms[167], + "168": intNumberTerms[168], + "169": intNumberTerms[169], + "170": intNumberTerms[170], + "171": intNumberTerms[171], + "172": intNumberTerms[172], + "173": intNumberTerms[173], + "174": intNumberTerms[174], + "175": intNumberTerms[175], + "176": intNumberTerms[176], + "177": intNumberTerms[177], + "178": intNumberTerms[178], + "179": intNumberTerms[179], + "180": intNumberTerms[180], + "181": intNumberTerms[181], + "182": intNumberTerms[182], + "183": intNumberTerms[183], + "184": intNumberTerms[184], + "185": intNumberTerms[185], + "186": intNumberTerms[186], + "187": intNumberTerms[187], + "188": intNumberTerms[188], + "189": intNumberTerms[189], + "190": intNumberTerms[190], + "191": intNumberTerms[191], + "192": intNumberTerms[192], + "193": intNumberTerms[193], + "194": intNumberTerms[194], + "195": intNumberTerms[195], + "196": intNumberTerms[196], + "197": intNumberTerms[197], + "198": intNumberTerms[198], + "199": intNumberTerms[199], + "200": intNumberTerms[200], + "201": intNumberTerms[201], + "202": intNumberTerms[202], + "203": intNumberTerms[203], + "204": intNumberTerms[204], + "205": intNumberTerms[205], + "206": intNumberTerms[206], + "207": intNumberTerms[207], + "208": intNumberTerms[208], + "209": intNumberTerms[209], + "210": intNumberTerms[210], + "211": intNumberTerms[211], + "212": intNumberTerms[212], + "213": intNumberTerms[213], + "214": intNumberTerms[214], + "215": intNumberTerms[215], + "216": intNumberTerms[216], + "217": intNumberTerms[217], + "218": intNumberTerms[218], + "219": intNumberTerms[219], + "220": intNumberTerms[220], + "221": intNumberTerms[221], + "222": intNumberTerms[222], + "223": intNumberTerms[223], + "224": intNumberTerms[224], + "225": intNumberTerms[225], + "226": intNumberTerms[226], + "227": intNumberTerms[227], + "228": intNumberTerms[228], + "229": intNumberTerms[229], + "230": intNumberTerms[230], + "231": intNumberTerms[231], + "232": intNumberTerms[232], + "233": intNumberTerms[233], + "234": intNumberTerms[234], + "235": intNumberTerms[235], + "236": intNumberTerms[236], + "237": intNumberTerms[237], + "238": intNumberTerms[238], + "239": intNumberTerms[239], + "240": intNumberTerms[240], + "241": intNumberTerms[241], + "242": intNumberTerms[242], + "243": intNumberTerms[243], + "244": intNumberTerms[244], + "245": intNumberTerms[245], + "246": intNumberTerms[246], + "247": intNumberTerms[247], + "248": intNumberTerms[248], + "249": intNumberTerms[249], + "250": intNumberTerms[250], + "251": intNumberTerms[251], + "252": intNumberTerms[252], + "253": intNumberTerms[253], + "254": intNumberTerms[254], + "255": intNumberTerms[255], + "256": intNumberTerms[256], + "257": intNumberTerms[257], + "258": intNumberTerms[258], + "259": intNumberTerms[259], + "260": intNumberTerms[260], + "261": intNumberTerms[261], + "262": intNumberTerms[262], + "263": intNumberTerms[263], + "264": intNumberTerms[264], + "265": intNumberTerms[265], + "266": intNumberTerms[266], + "267": intNumberTerms[267], + "268": intNumberTerms[268], + "269": intNumberTerms[269], + "270": intNumberTerms[270], + "271": intNumberTerms[271], + "272": intNumberTerms[272], + "273": intNumberTerms[273], + "274": intNumberTerms[274], + "275": intNumberTerms[275], + "276": intNumberTerms[276], + "277": intNumberTerms[277], + "278": intNumberTerms[278], + "279": intNumberTerms[279], + "280": intNumberTerms[280], + "281": intNumberTerms[281], + "282": intNumberTerms[282], + "283": intNumberTerms[283], + "284": intNumberTerms[284], + "285": intNumberTerms[285], + "286": intNumberTerms[286], + "287": intNumberTerms[287], + "288": intNumberTerms[288], + "289": intNumberTerms[289], + "290": intNumberTerms[290], + "291": intNumberTerms[291], + "292": intNumberTerms[292], + "293": intNumberTerms[293], + "294": intNumberTerms[294], + "295": intNumberTerms[295], + "296": intNumberTerms[296], + "297": intNumberTerms[297], + "298": intNumberTerms[298], + "299": intNumberTerms[299], + "300": intNumberTerms[300], + "301": intNumberTerms[301], + "302": intNumberTerms[302], + "303": intNumberTerms[303], + "304": intNumberTerms[304], + "305": intNumberTerms[305], + "306": intNumberTerms[306], + "307": intNumberTerms[307], + "308": intNumberTerms[308], + "309": intNumberTerms[309], + "310": intNumberTerms[310], + "311": intNumberTerms[311], + "312": intNumberTerms[312], + "313": intNumberTerms[313], + "314": intNumberTerms[314], + "315": intNumberTerms[315], + "316": intNumberTerms[316], + "317": intNumberTerms[317], + "318": intNumberTerms[318], + "319": intNumberTerms[319], + "320": intNumberTerms[320], + "321": intNumberTerms[321], + "322": intNumberTerms[322], + "323": intNumberTerms[323], + "324": intNumberTerms[324], + "325": intNumberTerms[325], + "326": intNumberTerms[326], + "327": intNumberTerms[327], + "328": intNumberTerms[328], + "329": intNumberTerms[329], + "330": intNumberTerms[330], + "331": intNumberTerms[331], + "332": intNumberTerms[332], + "333": intNumberTerms[333], + "334": intNumberTerms[334], + "335": intNumberTerms[335], + "336": intNumberTerms[336], + "337": intNumberTerms[337], + "338": intNumberTerms[338], + "339": intNumberTerms[339], + "340": intNumberTerms[340], + "341": intNumberTerms[341], + "342": intNumberTerms[342], + "343": intNumberTerms[343], + "344": intNumberTerms[344], + "345": intNumberTerms[345], + "346": intNumberTerms[346], + "347": intNumberTerms[347], + "348": intNumberTerms[348], + "349": intNumberTerms[349], + "350": intNumberTerms[350], + "351": intNumberTerms[351], + "352": intNumberTerms[352], + "353": intNumberTerms[353], + "354": intNumberTerms[354], + "355": intNumberTerms[355], + "356": intNumberTerms[356], + "357": intNumberTerms[357], + "358": intNumberTerms[358], + "359": intNumberTerms[359], + "360": intNumberTerms[360], + "361": intNumberTerms[361], + "362": intNumberTerms[362], + "363": intNumberTerms[363], + "364": intNumberTerms[364], + "365": intNumberTerms[365], + "366": intNumberTerms[366], + "367": intNumberTerms[367], + "368": intNumberTerms[368], + "369": intNumberTerms[369], + "370": intNumberTerms[370], + "371": intNumberTerms[371], + "372": intNumberTerms[372], + "373": intNumberTerms[373], + "374": intNumberTerms[374], + "375": intNumberTerms[375], + "376": intNumberTerms[376], + "377": intNumberTerms[377], + "378": intNumberTerms[378], + "379": intNumberTerms[379], + "380": intNumberTerms[380], + "381": intNumberTerms[381], + "382": intNumberTerms[382], + "383": intNumberTerms[383], + "384": intNumberTerms[384], + "385": intNumberTerms[385], + "386": intNumberTerms[386], + "387": intNumberTerms[387], + "388": intNumberTerms[388], + "389": intNumberTerms[389], + "390": intNumberTerms[390], + "391": intNumberTerms[391], + "392": intNumberTerms[392], + "393": intNumberTerms[393], + "394": intNumberTerms[394], + "395": intNumberTerms[395], + "396": intNumberTerms[396], + "397": intNumberTerms[397], + "398": intNumberTerms[398], + "399": intNumberTerms[399], + "400": intNumberTerms[400], + "401": intNumberTerms[401], + "402": intNumberTerms[402], + "403": intNumberTerms[403], + "404": intNumberTerms[404], + "405": intNumberTerms[405], + "406": intNumberTerms[406], + "407": intNumberTerms[407], + "408": intNumberTerms[408], + "409": intNumberTerms[409], + "410": intNumberTerms[410], + "411": intNumberTerms[411], + "412": intNumberTerms[412], + "413": intNumberTerms[413], + "414": intNumberTerms[414], + "415": intNumberTerms[415], + "416": intNumberTerms[416], + "417": intNumberTerms[417], + "418": intNumberTerms[418], + "419": intNumberTerms[419], + "420": intNumberTerms[420], + "421": intNumberTerms[421], + "422": intNumberTerms[422], + "423": intNumberTerms[423], + "424": intNumberTerms[424], + "425": intNumberTerms[425], + "426": intNumberTerms[426], + "427": intNumberTerms[427], + "428": intNumberTerms[428], + "429": intNumberTerms[429], + "430": intNumberTerms[430], + "431": intNumberTerms[431], + "432": intNumberTerms[432], + "433": intNumberTerms[433], + "434": intNumberTerms[434], + "435": intNumberTerms[435], + "436": intNumberTerms[436], + "437": intNumberTerms[437], + "438": intNumberTerms[438], + "439": intNumberTerms[439], + "440": intNumberTerms[440], + "441": intNumberTerms[441], + "442": intNumberTerms[442], + "443": intNumberTerms[443], + "444": intNumberTerms[444], + "445": intNumberTerms[445], + "446": intNumberTerms[446], + "447": intNumberTerms[447], + "448": intNumberTerms[448], + "449": intNumberTerms[449], + "450": intNumberTerms[450], + "451": intNumberTerms[451], + "452": intNumberTerms[452], + "453": intNumberTerms[453], + "454": intNumberTerms[454], + "455": intNumberTerms[455], + "456": intNumberTerms[456], + "457": intNumberTerms[457], + "458": intNumberTerms[458], + "459": intNumberTerms[459], + "460": intNumberTerms[460], + "461": intNumberTerms[461], + "462": intNumberTerms[462], + "463": intNumberTerms[463], + "464": intNumberTerms[464], + "465": intNumberTerms[465], + "466": intNumberTerms[466], + "467": intNumberTerms[467], + "468": intNumberTerms[468], + "469": intNumberTerms[469], + "470": intNumberTerms[470], + "471": intNumberTerms[471], + "472": intNumberTerms[472], + "473": intNumberTerms[473], + "474": intNumberTerms[474], + "475": intNumberTerms[475], + "476": intNumberTerms[476], + "477": intNumberTerms[477], + "478": intNumberTerms[478], + "479": intNumberTerms[479], + "480": intNumberTerms[480], + "481": intNumberTerms[481], + "482": intNumberTerms[482], + "483": intNumberTerms[483], + "484": intNumberTerms[484], + "485": intNumberTerms[485], + "486": intNumberTerms[486], + "487": intNumberTerms[487], + "488": intNumberTerms[488], + "489": intNumberTerms[489], + "490": intNumberTerms[490], + "491": intNumberTerms[491], + "492": intNumberTerms[492], + "493": intNumberTerms[493], + "494": intNumberTerms[494], + "495": intNumberTerms[495], + "496": intNumberTerms[496], + "497": intNumberTerms[497], + "498": intNumberTerms[498], + "499": intNumberTerms[499], + "500": intNumberTerms[500], + "501": intNumberTerms[501], + "502": intNumberTerms[502], + "503": intNumberTerms[503], + "504": intNumberTerms[504], + "505": intNumberTerms[505], + "506": intNumberTerms[506], + "507": intNumberTerms[507], + "508": intNumberTerms[508], + "509": intNumberTerms[509], + "510": intNumberTerms[510], + "511": intNumberTerms[511], + "512": intNumberTerms[512], +} + +var intNumberTerms = [...]*Term{ + {Value: Number("0")}, + {Value: Number("1")}, + {Value: Number("2")}, + {Value: Number("3")}, + {Value: Number("4")}, + {Value: Number("5")}, + {Value: Number("6")}, + {Value: Number("7")}, + {Value: Number("8")}, + {Value: Number("9")}, + {Value: Number("10")}, + {Value: Number("11")}, + {Value: Number("12")}, + {Value: Number("13")}, + {Value: Number("14")}, + {Value: Number("15")}, + {Value: Number("16")}, + {Value: Number("17")}, + {Value: Number("18")}, + {Value: Number("19")}, + {Value: Number("20")}, + {Value: Number("21")}, + {Value: Number("22")}, + {Value: Number("23")}, + {Value: Number("24")}, + {Value: Number("25")}, + {Value: Number("26")}, + {Value: Number("27")}, + {Value: Number("28")}, + {Value: Number("29")}, + {Value: Number("30")}, + {Value: Number("31")}, + {Value: Number("32")}, + {Value: Number("33")}, + {Value: Number("34")}, + {Value: Number("35")}, + {Value: Number("36")}, + {Value: Number("37")}, + {Value: Number("38")}, + {Value: Number("39")}, + {Value: Number("40")}, + {Value: Number("41")}, + {Value: Number("42")}, + {Value: Number("43")}, + {Value: Number("44")}, + {Value: Number("45")}, + {Value: Number("46")}, + {Value: Number("47")}, + {Value: Number("48")}, + {Value: Number("49")}, + {Value: Number("50")}, + {Value: Number("51")}, + {Value: Number("52")}, + {Value: Number("53")}, + {Value: Number("54")}, + {Value: Number("55")}, + {Value: Number("56")}, + {Value: Number("57")}, + {Value: Number("58")}, + {Value: Number("59")}, + {Value: Number("60")}, + {Value: Number("61")}, + {Value: Number("62")}, + {Value: Number("63")}, + {Value: Number("64")}, + {Value: Number("65")}, + {Value: Number("66")}, + {Value: Number("67")}, + {Value: Number("68")}, + {Value: Number("69")}, + {Value: Number("70")}, + {Value: Number("71")}, + {Value: Number("72")}, + {Value: Number("73")}, + {Value: Number("74")}, + {Value: Number("75")}, + {Value: Number("76")}, + {Value: Number("77")}, + {Value: Number("78")}, + {Value: Number("79")}, + {Value: Number("80")}, + {Value: Number("81")}, + {Value: Number("82")}, + {Value: Number("83")}, + {Value: Number("84")}, + {Value: Number("85")}, + {Value: Number("86")}, + {Value: Number("87")}, + {Value: Number("88")}, + {Value: Number("89")}, + {Value: Number("90")}, + {Value: Number("91")}, + {Value: Number("92")}, + {Value: Number("93")}, + {Value: Number("94")}, + {Value: Number("95")}, + {Value: Number("96")}, + {Value: Number("97")}, + {Value: Number("98")}, + {Value: Number("99")}, + {Value: Number("100")}, + {Value: Number("101")}, + {Value: Number("102")}, + {Value: Number("103")}, + {Value: Number("104")}, + {Value: Number("105")}, + {Value: Number("106")}, + {Value: Number("107")}, + {Value: Number("108")}, + {Value: Number("109")}, + {Value: Number("110")}, + {Value: Number("111")}, + {Value: Number("112")}, + {Value: Number("113")}, + {Value: Number("114")}, + {Value: Number("115")}, + {Value: Number("116")}, + {Value: Number("117")}, + {Value: Number("118")}, + {Value: Number("119")}, + {Value: Number("120")}, + {Value: Number("121")}, + {Value: Number("122")}, + {Value: Number("123")}, + {Value: Number("124")}, + {Value: Number("125")}, + {Value: Number("126")}, + {Value: Number("127")}, + {Value: Number("128")}, + {Value: Number("129")}, + {Value: Number("130")}, + {Value: Number("131")}, + {Value: Number("132")}, + {Value: Number("133")}, + {Value: Number("134")}, + {Value: Number("135")}, + {Value: Number("136")}, + {Value: Number("137")}, + {Value: Number("138")}, + {Value: Number("139")}, + {Value: Number("140")}, + {Value: Number("141")}, + {Value: Number("142")}, + {Value: Number("143")}, + {Value: Number("144")}, + {Value: Number("145")}, + {Value: Number("146")}, + {Value: Number("147")}, + {Value: Number("148")}, + {Value: Number("149")}, + {Value: Number("150")}, + {Value: Number("151")}, + {Value: Number("152")}, + {Value: Number("153")}, + {Value: Number("154")}, + {Value: Number("155")}, + {Value: Number("156")}, + {Value: Number("157")}, + {Value: Number("158")}, + {Value: Number("159")}, + {Value: Number("160")}, + {Value: Number("161")}, + {Value: Number("162")}, + {Value: Number("163")}, + {Value: Number("164")}, + {Value: Number("165")}, + {Value: Number("166")}, + {Value: Number("167")}, + {Value: Number("168")}, + {Value: Number("169")}, + {Value: Number("170")}, + {Value: Number("171")}, + {Value: Number("172")}, + {Value: Number("173")}, + {Value: Number("174")}, + {Value: Number("175")}, + {Value: Number("176")}, + {Value: Number("177")}, + {Value: Number("178")}, + {Value: Number("179")}, + {Value: Number("180")}, + {Value: Number("181")}, + {Value: Number("182")}, + {Value: Number("183")}, + {Value: Number("184")}, + {Value: Number("185")}, + {Value: Number("186")}, + {Value: Number("187")}, + {Value: Number("188")}, + {Value: Number("189")}, + {Value: Number("190")}, + {Value: Number("191")}, + {Value: Number("192")}, + {Value: Number("193")}, + {Value: Number("194")}, + {Value: Number("195")}, + {Value: Number("196")}, + {Value: Number("197")}, + {Value: Number("198")}, + {Value: Number("199")}, + {Value: Number("200")}, + {Value: Number("201")}, + {Value: Number("202")}, + {Value: Number("203")}, + {Value: Number("204")}, + {Value: Number("205")}, + {Value: Number("206")}, + {Value: Number("207")}, + {Value: Number("208")}, + {Value: Number("209")}, + {Value: Number("210")}, + {Value: Number("211")}, + {Value: Number("212")}, + {Value: Number("213")}, + {Value: Number("214")}, + {Value: Number("215")}, + {Value: Number("216")}, + {Value: Number("217")}, + {Value: Number("218")}, + {Value: Number("219")}, + {Value: Number("220")}, + {Value: Number("221")}, + {Value: Number("222")}, + {Value: Number("223")}, + {Value: Number("224")}, + {Value: Number("225")}, + {Value: Number("226")}, + {Value: Number("227")}, + {Value: Number("228")}, + {Value: Number("229")}, + {Value: Number("230")}, + {Value: Number("231")}, + {Value: Number("232")}, + {Value: Number("233")}, + {Value: Number("234")}, + {Value: Number("235")}, + {Value: Number("236")}, + {Value: Number("237")}, + {Value: Number("238")}, + {Value: Number("239")}, + {Value: Number("240")}, + {Value: Number("241")}, + {Value: Number("242")}, + {Value: Number("243")}, + {Value: Number("244")}, + {Value: Number("245")}, + {Value: Number("246")}, + {Value: Number("247")}, + {Value: Number("248")}, + {Value: Number("249")}, + {Value: Number("250")}, + {Value: Number("251")}, + {Value: Number("252")}, + {Value: Number("253")}, + {Value: Number("254")}, + {Value: Number("255")}, + {Value: Number("256")}, + {Value: Number("257")}, + {Value: Number("258")}, + {Value: Number("259")}, + {Value: Number("260")}, + {Value: Number("261")}, + {Value: Number("262")}, + {Value: Number("263")}, + {Value: Number("264")}, + {Value: Number("265")}, + {Value: Number("266")}, + {Value: Number("267")}, + {Value: Number("268")}, + {Value: Number("269")}, + {Value: Number("270")}, + {Value: Number("271")}, + {Value: Number("272")}, + {Value: Number("273")}, + {Value: Number("274")}, + {Value: Number("275")}, + {Value: Number("276")}, + {Value: Number("277")}, + {Value: Number("278")}, + {Value: Number("279")}, + {Value: Number("280")}, + {Value: Number("281")}, + {Value: Number("282")}, + {Value: Number("283")}, + {Value: Number("284")}, + {Value: Number("285")}, + {Value: Number("286")}, + {Value: Number("287")}, + {Value: Number("288")}, + {Value: Number("289")}, + {Value: Number("290")}, + {Value: Number("291")}, + {Value: Number("292")}, + {Value: Number("293")}, + {Value: Number("294")}, + {Value: Number("295")}, + {Value: Number("296")}, + {Value: Number("297")}, + {Value: Number("298")}, + {Value: Number("299")}, + {Value: Number("300")}, + {Value: Number("301")}, + {Value: Number("302")}, + {Value: Number("303")}, + {Value: Number("304")}, + {Value: Number("305")}, + {Value: Number("306")}, + {Value: Number("307")}, + {Value: Number("308")}, + {Value: Number("309")}, + {Value: Number("310")}, + {Value: Number("311")}, + {Value: Number("312")}, + {Value: Number("313")}, + {Value: Number("314")}, + {Value: Number("315")}, + {Value: Number("316")}, + {Value: Number("317")}, + {Value: Number("318")}, + {Value: Number("319")}, + {Value: Number("320")}, + {Value: Number("321")}, + {Value: Number("322")}, + {Value: Number("323")}, + {Value: Number("324")}, + {Value: Number("325")}, + {Value: Number("326")}, + {Value: Number("327")}, + {Value: Number("328")}, + {Value: Number("329")}, + {Value: Number("330")}, + {Value: Number("331")}, + {Value: Number("332")}, + {Value: Number("333")}, + {Value: Number("334")}, + {Value: Number("335")}, + {Value: Number("336")}, + {Value: Number("337")}, + {Value: Number("338")}, + {Value: Number("339")}, + {Value: Number("340")}, + {Value: Number("341")}, + {Value: Number("342")}, + {Value: Number("343")}, + {Value: Number("344")}, + {Value: Number("345")}, + {Value: Number("346")}, + {Value: Number("347")}, + {Value: Number("348")}, + {Value: Number("349")}, + {Value: Number("350")}, + {Value: Number("351")}, + {Value: Number("352")}, + {Value: Number("353")}, + {Value: Number("354")}, + {Value: Number("355")}, + {Value: Number("356")}, + {Value: Number("357")}, + {Value: Number("358")}, + {Value: Number("359")}, + {Value: Number("360")}, + {Value: Number("361")}, + {Value: Number("362")}, + {Value: Number("363")}, + {Value: Number("364")}, + {Value: Number("365")}, + {Value: Number("366")}, + {Value: Number("367")}, + {Value: Number("368")}, + {Value: Number("369")}, + {Value: Number("370")}, + {Value: Number("371")}, + {Value: Number("372")}, + {Value: Number("373")}, + {Value: Number("374")}, + {Value: Number("375")}, + {Value: Number("376")}, + {Value: Number("377")}, + {Value: Number("378")}, + {Value: Number("379")}, + {Value: Number("380")}, + {Value: Number("381")}, + {Value: Number("382")}, + {Value: Number("383")}, + {Value: Number("384")}, + {Value: Number("385")}, + {Value: Number("386")}, + {Value: Number("387")}, + {Value: Number("388")}, + {Value: Number("389")}, + {Value: Number("390")}, + {Value: Number("391")}, + {Value: Number("392")}, + {Value: Number("393")}, + {Value: Number("394")}, + {Value: Number("395")}, + {Value: Number("396")}, + {Value: Number("397")}, + {Value: Number("398")}, + {Value: Number("399")}, + {Value: Number("400")}, + {Value: Number("401")}, + {Value: Number("402")}, + {Value: Number("403")}, + {Value: Number("404")}, + {Value: Number("405")}, + {Value: Number("406")}, + {Value: Number("407")}, + {Value: Number("408")}, + {Value: Number("409")}, + {Value: Number("410")}, + {Value: Number("411")}, + {Value: Number("412")}, + {Value: Number("413")}, + {Value: Number("414")}, + {Value: Number("415")}, + {Value: Number("416")}, + {Value: Number("417")}, + {Value: Number("418")}, + {Value: Number("419")}, + {Value: Number("420")}, + {Value: Number("421")}, + {Value: Number("422")}, + {Value: Number("423")}, + {Value: Number("424")}, + {Value: Number("425")}, + {Value: Number("426")}, + {Value: Number("427")}, + {Value: Number("428")}, + {Value: Number("429")}, + {Value: Number("430")}, + {Value: Number("431")}, + {Value: Number("432")}, + {Value: Number("433")}, + {Value: Number("434")}, + {Value: Number("435")}, + {Value: Number("436")}, + {Value: Number("437")}, + {Value: Number("438")}, + {Value: Number("439")}, + {Value: Number("440")}, + {Value: Number("441")}, + {Value: Number("442")}, + {Value: Number("443")}, + {Value: Number("444")}, + {Value: Number("445")}, + {Value: Number("446")}, + {Value: Number("447")}, + {Value: Number("448")}, + {Value: Number("449")}, + {Value: Number("450")}, + {Value: Number("451")}, + {Value: Number("452")}, + {Value: Number("453")}, + {Value: Number("454")}, + {Value: Number("455")}, + {Value: Number("456")}, + {Value: Number("457")}, + {Value: Number("458")}, + {Value: Number("459")}, + {Value: Number("460")}, + {Value: Number("461")}, + {Value: Number("462")}, + {Value: Number("463")}, + {Value: Number("464")}, + {Value: Number("465")}, + {Value: Number("466")}, + {Value: Number("467")}, + {Value: Number("468")}, + {Value: Number("469")}, + {Value: Number("470")}, + {Value: Number("471")}, + {Value: Number("472")}, + {Value: Number("473")}, + {Value: Number("474")}, + {Value: Number("475")}, + {Value: Number("476")}, + {Value: Number("477")}, + {Value: Number("478")}, + {Value: Number("479")}, + {Value: Number("480")}, + {Value: Number("481")}, + {Value: Number("482")}, + {Value: Number("483")}, + {Value: Number("484")}, + {Value: Number("485")}, + {Value: Number("486")}, + {Value: Number("487")}, + {Value: Number("488")}, + {Value: Number("489")}, + {Value: Number("490")}, + {Value: Number("491")}, + {Value: Number("492")}, + {Value: Number("493")}, + {Value: Number("494")}, + {Value: Number("495")}, + {Value: Number("496")}, + {Value: Number("497")}, + {Value: Number("498")}, + {Value: Number("499")}, + {Value: Number("500")}, + {Value: Number("501")}, + {Value: Number("502")}, + {Value: Number("503")}, + {Value: Number("504")}, + {Value: Number("505")}, + {Value: Number("506")}, + {Value: Number("507")}, + {Value: Number("508")}, + {Value: Number("509")}, + {Value: Number("510")}, + {Value: Number("511")}, + {Value: Number("512")}, +} + +var InternedEmptyString = StringTerm("") + +var InternedEmptyObject = ObjectTerm() diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go new file mode 100644 index 000000000..9081fe703 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/json/json.go @@ -0,0 +1,106 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This package provides options for JSON marshalling of AST nodes, and location +// data in particular. Since location data occupies a significant portion of the +// AST when included, it is excluded by default. The options provided here allow +// changing that behavior — either for all nodes or for specific types. Since +// JSONMarshaller implementations have access only to the node being marshaled, +// our options are to either attach these settings to *all* nodes in the AST, or +// to provide them via global state. The former is perhaps a little more elegant, +// and is what we went with initially. The cost of attaching these settings to +// every node however turned out to be non-negligible, and given that the number +// of users who have an interest in AST serialization are likely to be few, we +// have since switched to using global state, as provided here. Note that this +// is mostly to provide an equivalent feature to what we had before, should +// anyone depend on that. Users who need fine-grained control over AST +// serialization are recommended to use external libraries for that purpose, +// such as `github.com/json-iterator/go`. +package json + +import "sync" + +// Options defines the options for JSON operations, +// currently only marshaling can be configured +type Options struct { + MarshalOptions MarshalOptions +} + +// MarshalOptions defines the options for JSON marshaling, +// currently only toggling the marshaling of location information is supported +type MarshalOptions struct { + // IncludeLocation toggles the marshaling of location information + IncludeLocation NodeToggle + // IncludeLocationText additionally/optionally includes the text of the location + IncludeLocationText bool + // ExcludeLocationFile additionally/optionally excludes the file of the location + // Note that this is inverted (i.e. not "include" as the default needs to remain false) + ExcludeLocationFile bool +} + +// NodeToggle is a generic struct to allow the toggling of +// settings for different ast node types +type NodeToggle struct { + Term bool + Package bool + Comment bool + Import bool + Rule bool + Head bool + Expr bool + SomeDecl bool + Every bool + With bool + Annotations bool + AnnotationsRef bool +} + +// configuredJSONOptions synchronizes access to the global JSON options +type configuredJSONOptions struct { + options Options + lock sync.RWMutex +} + +var options = &configuredJSONOptions{ + options: Defaults(), +} + +// SetOptions sets the global options for marshalling AST nodes to JSON +func SetOptions(opts Options) { + options.lock.Lock() + defer options.lock.Unlock() + options.options = opts +} + +// GetOptions returns (a copy of) the global options for marshalling AST nodes to JSON +func GetOptions() Options { + options.lock.RLock() + defer options.lock.RUnlock() + return options.options +} + +// Defaults returns the default JSON options, which is to exclude location +// information in serialized JSON AST. +func Defaults() Options { + return Options{ + MarshalOptions: MarshalOptions{ + IncludeLocation: NodeToggle{ + Term: false, + Package: false, + Comment: false, + Import: false, + Rule: false, + Head: false, + Expr: false, + SomeDecl: false, + Every: false, + With: false, + Annotations: false, + AnnotationsRef: false, + }, + IncludeLocationText: false, + ExcludeLocationFile: false, + }, + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/location/location.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go similarity index 91% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/location/location.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go index 92226df3f..716aad693 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/location/location.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/location/location.go @@ -7,7 +7,7 @@ import ( "errors" "fmt" - astJSON "github.com/open-policy-agent/opa/ast/json" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" ) // Location records a position in source code @@ -18,9 +18,6 @@ type Location struct { Col int `json:"col"` // The column in the row. Offset int `json:"-"` // The byte offset for the location in the source. - // JSONOptions specifies options for marshaling and unmarshalling of locations - JSONOptions astJSON.Options - Tabs []int `json:"-"` // The column offsets of tabs in the source. } @@ -98,7 +95,8 @@ func (loc *Location) Compare(other *Location) int { func (loc *Location) MarshalJSON() ([]byte, error) { // structs are used here to preserve the field ordering of the original Location struct - if loc.JSONOptions.MarshalOptions.ExcludeLocationFile { + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.ExcludeLocationFile { data := struct { Row int `json:"row"` Col int `json:"col"` @@ -108,7 +106,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) { Col: loc.Col, } - if loc.JSONOptions.MarshalOptions.IncludeLocationText { + if jsonOptions.IncludeLocationText { data.Text = loc.Text } @@ -126,7 +124,7 @@ func (loc *Location) MarshalJSON() ([]byte, error) { File: loc.File, } - if loc.JSONOptions.MarshalOptions.IncludeLocationText { + if jsonOptions.IncludeLocationText { data.Text = loc.Text } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/map.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/map.go new file mode 100644 index 000000000..5a64f3250 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/map.go @@ -0,0 +1,133 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "encoding/json" + + "github.com/open-policy-agent/opa/v1/util" +) + +// ValueMap represents a key/value map between AST term values. Any type of term +// can be used as a key in the map. +type ValueMap struct { + hashMap *util.HashMap +} + +// NewValueMap returns a new ValueMap. +func NewValueMap() *ValueMap { + vs := &ValueMap{ + hashMap: util.NewHashMap(valueEq, valueHash), + } + return vs +} + +// MarshalJSON provides a custom marshaller for the ValueMap which +// will include the key, value, and value type. +func (vs *ValueMap) MarshalJSON() ([]byte, error) { + var tmp []map[string]interface{} + vs.Iter(func(k Value, v Value) bool { + tmp = append(tmp, map[string]interface{}{ + "name": k.String(), + "type": ValueName(v), + "value": v, + }) + return false + }) + return json.Marshal(tmp) +} + +// Copy returns a shallow copy of the ValueMap. +func (vs *ValueMap) Copy() *ValueMap { + if vs == nil { + return nil + } + cpy := NewValueMap() + cpy.hashMap = vs.hashMap.Copy() + return cpy +} + +// Equal returns true if this ValueMap equals the other. +func (vs *ValueMap) Equal(other *ValueMap) bool { + if vs == nil { + return other == nil || other.Len() == 0 + } + if other == nil { + return vs.Len() == 0 + } + return vs.hashMap.Equal(other.hashMap) +} + +// Len returns the number of elements in the map. +func (vs *ValueMap) Len() int { + if vs == nil { + return 0 + } + return vs.hashMap.Len() +} + +// Get returns the value in the map for k. +func (vs *ValueMap) Get(k Value) Value { + if vs != nil { + if v, ok := vs.hashMap.Get(k); ok { + return v.(Value) + } + } + return nil +} + +// Hash returns a hash code for this ValueMap. +func (vs *ValueMap) Hash() int { + if vs == nil { + return 0 + } + return vs.hashMap.Hash() +} + +// Iter calls the iter function for each key/value pair in the map. If the iter +// function returns true, iteration stops. +func (vs *ValueMap) Iter(iter func(Value, Value) bool) bool { + if vs == nil { + return false + } + return vs.hashMap.Iter(func(kt, vt util.T) bool { + k := kt.(Value) + v := vt.(Value) + return iter(k, v) + }) +} + +// Put inserts a key k into the map with value v. +func (vs *ValueMap) Put(k, v Value) { + if vs == nil { + panic("put on nil value map") + } + vs.hashMap.Put(k, v) +} + +// Delete removes a key k from the map. +func (vs *ValueMap) Delete(k Value) { + if vs == nil { + return + } + vs.hashMap.Delete(k) +} + +func (vs *ValueMap) String() string { + if vs == nil { + return "{}" + } + return vs.hashMap.String() +} + +func valueHash(v util.T) int { + return v.(Value).Hash() +} + +func valueEq(a, b util.T) bool { + av := a.(Value) + bv := b.(Value) + return av.Compare(bv) == 0 +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go new file mode 100644 index 000000000..2054141d3 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser.go @@ -0,0 +1,2766 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math/big" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "unicode/utf8" + + "gopkg.in/yaml.v3" + + "github.com/open-policy-agent/opa/v1/ast/internal/scanner" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" +) + +var RegoV1CompatibleRef = Ref{VarTerm("rego"), StringTerm("v1")} + +// RegoVersion defines the Rego syntax requirements for a module. +type RegoVersion int + +const DefaultRegoVersion = RegoV1 + +const ( + RegoUndefined RegoVersion = iota + // RegoV0 is the default, original Rego syntax. + RegoV0 + // RegoV0CompatV1 requires modules to comply with both the RegoV0 and RegoV1 syntax (as when 'rego.v1' is imported in a module). + // Shortly, RegoV1 compatibility is required, but 'rego.v1' or 'future.keywords' must also be imported. + RegoV0CompatV1 + // RegoV1 is the Rego syntax enforced by OPA 1.0; e.g.: + // future.keywords part of default keyword set, and don't require imports; + // 'if' and 'contains' required in rule heads; + // (some) strict checks on by default. + RegoV1 +) + +func (v RegoVersion) Int() int { + if v == RegoV1 { + return 1 + } + return 0 +} + +func (v RegoVersion) String() string { + switch v { + case RegoV0: + return "v0" + case RegoV1: + return "v1" + case RegoV0CompatV1: + return "v0v1" + default: + return "unknown" + } +} + +func RegoVersionFromInt(i int) RegoVersion { + if i == 1 { + return RegoV1 + } + return RegoV0 +} + +// Note: This state is kept isolated from the parser so that we +// can do efficient shallow copies of these values when doing a +// save() and restore(). +type state struct { + s *scanner.Scanner + lastEnd int + skippedNL bool + tok tokens.Token + tokEnd int + lit string + loc Location + errors Errors + hints []string + comments []*Comment + wildcard int +} + +func (s *state) String() string { + return fmt.Sprintf("", s.s, s.tok, s.lit, s.loc, len(s.errors), len(s.comments)) +} + +func (s *state) Loc() *location.Location { + cpy := s.loc + return &cpy +} + +func (s *state) Text(offset, end int) []byte { + bs := s.s.Bytes() + if offset >= 0 && offset < len(bs) { + if end >= offset && end <= len(bs) { + return bs[offset:end] + } + } + return nil +} + +// Parser is used to parse Rego statements. +type Parser struct { + r io.Reader + s *state + po ParserOptions + cache parsedTermCache +} + +type parsedTermCacheItem struct { + t *Term + post *state // post is the post-state that's restored on a cache-hit + offset int + next *parsedTermCacheItem +} + +type parsedTermCache struct { + m *parsedTermCacheItem +} + +func (c parsedTermCache) String() string { + s := strings.Builder{} + s.WriteRune('{') + var e *parsedTermCacheItem + for e = c.m; e != nil; e = e.next { + s.WriteString(fmt.Sprintf("%v", e)) + } + s.WriteRune('}') + return s.String() +} + +func (e *parsedTermCacheItem) String() string { + return fmt.Sprintf("<%d:%v>", e.offset, e.t) +} + +// ParserOptions defines the options for parsing Rego statements. +type ParserOptions struct { + Capabilities *Capabilities + ProcessAnnotation bool + AllFutureKeywords bool + FutureKeywords []string + SkipRules bool + // RegoVersion is the version of Rego to parse for. + RegoVersion RegoVersion + unreleasedKeywords bool // TODO(sr): cleanup +} + +// EffectiveRegoVersion returns the effective RegoVersion to use for parsing. +func (po *ParserOptions) EffectiveRegoVersion() RegoVersion { + if po.RegoVersion == RegoUndefined { + return DefaultRegoVersion + } + return po.RegoVersion +} + +// NewParser creates and initializes a Parser. +func NewParser() *Parser { + p := &Parser{ + s: &state{}, + po: ParserOptions{}, + } + return p +} + +// WithFilename provides the filename for Location details +// on parsed statements. +func (p *Parser) WithFilename(filename string) *Parser { + p.s.loc.File = filename + return p +} + +// WithReader provides the io.Reader that the parser will +// use as its source. +func (p *Parser) WithReader(r io.Reader) *Parser { + p.r = r + return p +} + +// WithProcessAnnotation enables or disables the processing of +// annotations by the Parser +func (p *Parser) WithProcessAnnotation(processAnnotation bool) *Parser { + p.po.ProcessAnnotation = processAnnotation + return p +} + +// WithFutureKeywords enables "future" keywords, i.e., keywords that can +// be imported via +// +// import future.keywords.kw +// import future.keywords.other +// +// but in a more direct way. The equivalent of this import would be +// +// WithFutureKeywords("kw", "other") +func (p *Parser) WithFutureKeywords(kws ...string) *Parser { + p.po.FutureKeywords = kws + return p +} + +// WithAllFutureKeywords enables all "future" keywords, i.e., the +// ParserOption equivalent of +// +// import future.keywords +func (p *Parser) WithAllFutureKeywords(yes bool) *Parser { + p.po.AllFutureKeywords = yes + return p +} + +// withUnreleasedKeywords allows using keywords that haven't surfaced +// as future keywords (see above) yet, but have tests that require +// them to be parsed +func (p *Parser) withUnreleasedKeywords(yes bool) *Parser { + p.po.unreleasedKeywords = yes + return p +} + +// WithCapabilities sets the capabilities structure on the parser. +func (p *Parser) WithCapabilities(c *Capabilities) *Parser { + p.po.Capabilities = c + return p +} + +// WithSkipRules instructs the parser not to attempt to parse Rule statements. +func (p *Parser) WithSkipRules(skip bool) *Parser { + p.po.SkipRules = skip + return p +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (p *Parser) WithJSONOptions(_ *astJSON.Options) *Parser { + return p +} + +func (p *Parser) WithRegoVersion(version RegoVersion) *Parser { + p.po.RegoVersion = version + return p +} + +func (p *Parser) parsedTermCacheLookup() (*Term, *state) { + l := p.s.loc.Offset + // stop comparing once the cached offsets are lower than l + for h := p.cache.m; h != nil && h.offset >= l; h = h.next { + if h.offset == l { + return h.t, h.post + } + } + return nil, nil +} + +func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { + s1 := p.save() + o0 := s0.loc.Offset + entry := parsedTermCacheItem{t: t, post: s1, offset: o0} + + // find the first one whose offset is smaller than ours + var e *parsedTermCacheItem + for e = p.cache.m; e != nil; e = e.next { + if e.offset < o0 { + break + } + } + entry.next = e + p.cache.m = &entry +} + +// futureParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows all future keywords. +// It's used to present hints in errors, when statements would +// only parse successfully if some future keyword is enabled. +func (p *Parser) futureParser() *Parser { + q := *p + q.s = p.save() + q.s.s = p.s.s.WithKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q +} + +// presentParser returns a shallow copy of `p` with an empty +// cache, and a scanner that knows none of the future keywords. +// It is used to successfully parse keyword imports, like +// +// import future.keywords.in +// +// even when the parser has already been informed about the +// future keyword "in". This parser won't error out because +// "in" is an identifier. +func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { + var cpy map[string]tokens.Token + q := *p + q.s = p.save() + q.s.s, cpy = p.s.s.WithoutKeywords(allFutureKeywords) + q.cache = parsedTermCache{} + return &q, cpy +} + +// Parse will read the Rego source and parse statements and +// comments as they are found. Any errors encountered while +// parsing will be accumulated and returned as a list of Errors. +func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { + + if p.po.Capabilities == nil { + p.po.Capabilities = CapabilitiesForThisVersion(CapabilitiesRegoVersion(p.po.RegoVersion)) + } + + allowedFutureKeywords := map[string]tokens.Token{} + + if p.po.EffectiveRegoVersion() == RegoV1 { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: "illegal capabilities: rego_v1 feature required for parsing v1 Rego", + Location: nil, + }, + } + } + + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { + allowedFutureKeywords[k] = v + } + + for _, kw := range p.po.Capabilities.FutureKeywords { + if tok, ok := futureKeywords[kw]; ok { + allowedFutureKeywords[kw] = tok + } else { + // For sake of error reporting, we still need to check that keywords in capabilities are known in v0 + if _, ok := futureKeywordsV0[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + } + + // Check that explicitly requested future keywords are known. + for _, kw := range p.po.FutureKeywords { + if _, ok := allowedFutureKeywords[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + } + } else { + for _, kw := range p.po.Capabilities.FutureKeywords { + var ok bool + allowedFutureKeywords[kw], ok = allFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } + } + } + + if p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { + allowedFutureKeywords[k] = v + } + } + } + + var err error + p.s.s, err = scanner.New(p.r) + if err != nil { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: err.Error(), + Location: nil, + }, + } + } + + selected := map[string]tokens.Token{} + if p.po.AllFutureKeywords || p.po.EffectiveRegoVersion() == RegoV1 { + for kw, tok := range allowedFutureKeywords { + selected[kw] = tok + } + } else { + for _, kw := range p.po.FutureKeywords { + tok, ok := allowedFutureKeywords[kw] + if !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("unknown future keyword: %v", kw), + Location: nil, + }, + } + } + selected[kw] = tok + } + } + p.s.s = p.s.s.WithKeywords(selected) + + if p.po.EffectiveRegoVersion() == RegoV1 { + for kw, tok := range allowedFutureKeywords { + p.s.s.AddKeyword(kw, tok) + } + } + + // read the first token to initialize the parser + p.scan() + + var stmts []Statement + + // Read from the scanner until the last token is reached or no statements + // can be parsed. Attempt to parse package statements, import statements, + // rule statements, and then body/query statements (in that order). If a + // statement cannot be parsed, restore the parser state before trying the + // next type of statement. If a statement can be parsed, continue from that + // point trying to parse packages, imports, etc. in the same order. + for p.s.tok != tokens.EOF { + + s := p.save() + + if pkg := p.parsePackage(); pkg != nil { + stmts = append(stmts, pkg) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + s = p.save() + + if imp := p.parseImport(); imp != nil { + if RegoRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.regoV1Import(imp) + } + + if FutureRootDocument.Equal(imp.Path.Value.(Ref)[0]) { + p.futureImport(imp, allowedFutureKeywords) + } + + stmts = append(stmts, imp) + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + + if !p.po.SkipRules { + s = p.save() + + if rules := p.parseRules(); rules != nil { + for i := range rules { + stmts = append(stmts, rules[i]) + } + continue + } else if len(p.s.errors) > 0 { + break + } + + p.restore(s) + } + + if body := p.parseQuery(true, tokens.EOF); body != nil { + stmts = append(stmts, body) + continue + } + + break + } + + if p.po.ProcessAnnotation { + stmts = p.parseAnnotations(stmts) + } + + return stmts, p.s.comments, p.s.errors +} + +func (p *Parser) parseAnnotations(stmts []Statement) []Statement { + + annotStmts, errs := parseAnnotations(p.s.comments) + for _, err := range errs { + p.error(err.Location, err.Message) + } + + for _, annotStmt := range annotStmts { + stmts = append(stmts, annotStmt) + } + + return stmts +} + +func parseAnnotations(comments []*Comment) ([]*Annotations, Errors) { + + var hint = []byte("METADATA") + var curr *metadataParser + var blocks []*metadataParser + + for i := 0; i < len(comments); i++ { + if curr != nil { + if comments[i].Location.Row == comments[i-1].Location.Row+1 && comments[i].Location.Col == 1 { + curr.Append(comments[i]) + continue + } + curr = nil + } + if bytes.HasPrefix(bytes.TrimSpace(comments[i].Text), hint) { + curr = newMetadataParser(comments[i].Location) + blocks = append(blocks, curr) + } + } + + var stmts []*Annotations + var errs Errors + for _, b := range blocks { + a, err := b.Parse() + if err != nil { + errs = append(errs, &Error{ + Code: ParseErr, + Message: err.Error(), + Location: b.loc, + }) + } else { + stmts = append(stmts, a) + } + } + + return stmts, errs +} + +func (p *Parser) parsePackage() *Package { + + var pkg Package + pkg.SetLoc(p.s.Loc()) + + if p.s.tok != tokens.Package { + return nil + } + + p.scan() + if p.s.tok != tokens.Ident { + p.illegalToken() + return nil + } + + term := p.parseTerm() + + if term != nil { + switch v := term.Value.(type) { + case Var: + pkg.Path = Ref{ + DefaultRootDocument.Copy().SetLocation(term.Location), + StringTerm(string(v)).SetLocation(term.Location), + } + case Ref: + pkg.Path = make(Ref, len(v)+1) + pkg.Path[0] = DefaultRootDocument.Copy().SetLocation(v[0].Location) + first, ok := v[0].Value.(Var) + if !ok { + p.errorf(v[0].Location, "unexpected %v token: expecting var", ValueName(v[0].Value)) + return nil + } + pkg.Path[1] = StringTerm(string(first)).SetLocation(v[0].Location) + for i := 2; i < len(pkg.Path); i++ { + switch v[i-1].Value.(type) { + case String: + pkg.Path[i] = v[i-1] + default: + p.errorf(v[i-1].Location, "unexpected %v token: expecting string", ValueName(v[i-1].Value)) + return nil + } + } + default: + p.illegalToken() + return nil + } + } + + if pkg.Path == nil { + if len(p.s.errors) == 0 { + p.error(p.s.Loc(), "expected path") + } + return nil + } + + return &pkg +} + +func (p *Parser) parseImport() *Import { + + var imp Import + imp.SetLoc(p.s.Loc()) + + if p.s.tok != tokens.Import { + return nil + } + + p.scan() + if p.s.tok != tokens.Ident { + p.error(p.s.Loc(), "expected ident") + return nil + } + q, prev := p.presentParser() + term := q.parseTerm() + if term != nil { + switch v := term.Value.(type) { + case Var: + imp.Path = RefTerm(term).SetLocation(term.Location) + case Ref: + for i := 1; i < len(v); i++ { + if _, ok := v[i].Value.(String); !ok { + p.errorf(v[i].Location, "unexpected %v token: expecting string", ValueName(v[i].Value)) + return nil + } + } + imp.Path = term + } + } + // keep advanced parser state, reset known keywords + p.s = q.s + p.s.s = q.s.s.WithKeywords(prev) + + if imp.Path == nil { + p.error(p.s.Loc(), "expected path") + return nil + } + + path := imp.Path.Value.(Ref) + + switch { + case RootDocumentNames.Contains(path[0]): + case FutureRootDocument.Equal(path[0]): + case RegoRootDocument.Equal(path[0]): + default: + p.hint("if this is unexpected, try updating OPA") + p.errorf(imp.Path.Location, "unexpected import path, must begin with one of: %v, got: %v", + RootDocumentNames.Union(NewSet(FutureRootDocument, RegoRootDocument)), + path[0]) + return nil + } + + if p.s.tok == tokens.As { + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + if alias := p.parseTerm(); alias != nil { + v, ok := alias.Value.(Var) + if ok { + imp.Alias = v + return &imp + } + } + p.illegal("expected var") + return nil + } + + return &imp +} + +func (p *Parser) parseRules() []*Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + if p.s.tok == tokens.Default { + p.scan() + rule.Default = true + } + + if p.s.tok != tokens.Ident { + return nil + } + + usesContains := false + if rule.Head, usesContains = p.parseHead(rule.Default); rule.Head == nil { + return nil + } + + if usesContains { + rule.Head.keywords = append(rule.Head.keywords, tokens.Contains) + } + + if rule.Default { + if !p.validateDefaultRuleValue(&rule) { + return nil + } + + if len(rule.Head.Args) > 0 { + if !p.validateDefaultRuleArgs(&rule) { + return nil + } + } + + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + return []*Rule{&rule} + } + + // back-compat with `p[x] { ... }`` + hasIf := p.s.tok == tokens.If + + // p[x] if ... becomes a single-value rule p[x] + if hasIf && !usesContains && len(rule.Head.Ref()) == 2 { + if !rule.Head.Ref()[1].IsGround() && len(rule.Head.Args) == 0 { + rule.Head.Key = rule.Head.Ref()[1] + } + + if rule.Head.Value == nil { + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true).SetLocation(rule.Head.Location) + } else { + // p[x] = y if becomes a single-value rule p[x] with value y, but needs name for compat + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + } + } + + // p[x] becomes a multi-value rule p + if !hasIf && !usesContains && + len(rule.Head.Args) == 0 && // not a function + len(rule.Head.Ref()) == 2 { // ref like 'p[x]' + v, ok := rule.Head.Ref()[0].Value.(Var) + if !ok { + return nil + } + rule.Head.Name = v + rule.Head.Key = rule.Head.Ref()[1] + if rule.Head.Value == nil { + rule.Head.SetRef(rule.Head.Ref()[:len(rule.Head.Ref())-1]) + } + } + + switch { + case hasIf: + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + s := p.save() + if expr := p.parseLiteral(); expr != nil { + // NOTE(sr): set literals are never false or undefined, so parsing this as + // p if { true } + // ^^^^^^^^ set of one element, `true` + // isn't valid. + isSetLiteral := false + if t, ok := expr.Terms.(*Term); ok { + _, isSetLiteral = t.Value.(Set) + } + // expr.Term is []*Term or Every + if !isSetLiteral { + rule.Body.Append(expr) + break + } + } + + // parsing as literal didn't work out, expect '{ BODY }' + p.restore(s) + fallthrough + + case p.s.tok == tokens.LBrace: + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + + case usesContains: + rule.Body = NewBody(NewExpr(BooleanTerm(true).SetLocation(rule.Location)).SetLocation(rule.Location)) + rule.generatedBody = true + rule.Location = rule.Head.Location + + return []*Rule{&rule} + + default: + return nil + } + + if p.s.tok == tokens.Else { + if r := rule.Head.Ref(); len(r) > 1 && !r.IsGround() { + p.error(p.s.Loc(), "else keyword cannot be used on rules with variables in head") + return nil + } + if rule.Head.Key != nil { + p.error(p.s.Loc(), "else keyword cannot be used on multi-value rules") + return nil + } + + if rule.Else = p.parseElse(rule.Head); rule.Else == nil { + return nil + } + } + + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + + rules := []*Rule{&rule} + + for p.s.tok == tokens.LBrace { + + if rule.Else != nil { + p.error(p.s.Loc(), "expected else keyword") + return nil + } + + loc := p.s.Loc() + + p.scan() + var next Rule + + if next.Body = p.parseBody(tokens.RBrace); next.Body == nil { + return nil + } + p.scan() + + loc.Text = p.s.Text(loc.Offset, p.s.lastEnd) + next.SetLoc(loc) + + // Chained rule head's keep the original + // rule's head AST but have their location + // set to the rule body. + next.Head = rule.Head.Copy() + next.Head.keywords = rule.Head.keywords + for i := range next.Head.Args { + if v, ok := next.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + next.Head.Args[i].Value = Var(p.genwildcard()) + } + } + setLocRecursive(next.Head, loc) + + rules = append(rules, &next) + } + + return rules +} + +func (p *Parser) parseElse(head *Head) *Rule { + + var rule Rule + rule.SetLoc(p.s.Loc()) + + rule.Head = head.Copy() + rule.Head.generatedValue = false + for i := range rule.Head.Args { + if v, ok := rule.Head.Args[i].Value.(Var); ok && v.IsWildcard() { + rule.Head.Args[i].Value = Var(p.genwildcard()) + } + } + rule.Head.SetLoc(p.s.Loc()) + + defer func() { + rule.Location.Text = p.s.Text(rule.Location.Offset, p.s.lastEnd) + }() + + p.scan() + + switch p.s.tok { + case tokens.LBrace, tokens.If: // no value, but a body follows directly + rule.Head.generatedValue = true + rule.Head.Value = BooleanTerm(true) + case tokens.Assign, tokens.Unify: + rule.Head.Assign = tokens.Assign == p.s.tok + p.scan() + rule.Head.Value = p.parseTermInfixCall() + if rule.Head.Value == nil { + return nil + } + rule.Head.Location.Text = p.s.Text(rule.Head.Location.Offset, p.s.lastEnd) + default: + p.illegal("expected else value term or rule body") + return nil + } + + hasIf := p.s.tok == tokens.If + hasLBrace := p.s.tok == tokens.LBrace + + if !hasIf && !hasLBrace { + rule.Body = NewBody(NewExpr(BooleanTerm(true))) + rule.generatedBody = true + setLocRecursive(rule.Body, rule.Location) + return &rule + } + + if hasIf { + rule.Head.keywords = append(rule.Head.keywords, tokens.If) + p.scan() + } + + if p.s.tok == tokens.LBrace { + p.scan() + if rule.Body = p.parseBody(tokens.RBrace); rule.Body == nil { + return nil + } + p.scan() + } else if p.s.tok != tokens.EOF { + expr := p.parseLiteral() + if expr == nil { + return nil + } + rule.Body.Append(expr) + setLocRecursive(rule.Body, rule.Location) + } else { + p.illegal("rule body expected") + return nil + } + + if p.s.tok == tokens.Else { + if rule.Else = p.parseElse(head); rule.Else == nil { + return nil + } + } + return &rule +} + +func (p *Parser) parseHead(defaultRule bool) (*Head, bool) { + head := &Head{} + loc := p.s.Loc() + defer func() { + if head != nil { + head.SetLoc(loc) + head.Location.Text = p.s.Text(head.Location.Offset, p.s.lastEnd) + } + }() + + term := p.parseVar() + if term == nil { + return nil, false + } + + ref := p.parseTermFinish(term, true) + if ref == nil { + p.illegal("expected rule head name") + return nil, false + } + + switch x := ref.Value.(type) { + case Var: + // TODO + head = VarHead(x, ref.Location, nil) + case Ref: + head = RefHead(x) + case Call: + op, args := x[0], x[1:] + var ref Ref + switch y := op.Value.(type) { + case Var: + ref = Ref{op} + case Ref: + if _, ok := y[0].Value.(Var); !ok { + p.illegal("rule head ref %v invalid", y) + return nil, false + } + ref = y + } + head = RefHead(ref) + head.Args = append([]*Term{}, args...) + + default: + return nil, false + } + + name := head.Ref().String() + + switch p.s.tok { + case tokens.Contains: // NOTE: no Value for `contains` heads, we return here + // Catch error case of using 'contains' with a function definition rule head. + if head.Args != nil { + p.illegal("the contains keyword can only be used with multi-value rule definitions (e.g., %s contains { ... })", name) + } + p.scan() + head.Key = p.parseTermInfixCall() + if head.Key == nil { + p.illegal("expected rule key term (e.g., %s contains { ... })", name) + } + return head, true + + case tokens.Unify: + p.scan() + head.Value = p.parseTermInfixCall() + if head.Value == nil { + // FIX HEAD.String() + p.illegal("expected rule value term (e.g., %s[%s] = { ... })", name, head.Key) + } + case tokens.Assign: + p.scan() + head.Assign = true + head.Value = p.parseTermInfixCall() + if head.Value == nil { + switch { + case len(head.Args) > 0: + p.illegal("expected function value term (e.g., %s(...) := { ... })", name) + case head.Key != nil: + p.illegal("expected partial rule value term (e.g., %s[...] := { ... })", name) + case defaultRule: + p.illegal("expected default rule value term (e.g., default %s := )", name) + default: + p.illegal("expected rule value term (e.g., %s := { ... })", name) + } + } + } + + if head.Value == nil && head.Key == nil { + if len(head.Ref()) != 2 || len(head.Args) > 0 { + head.generatedValue = true + head.Value = BooleanTerm(true).SetLocation(head.Location) + } + } + return head, false +} + +func (p *Parser) parseBody(end tokens.Token) Body { + return p.parseQuery(false, end) +} + +func (p *Parser) parseQuery(requireSemi bool, end tokens.Token) Body { + body := Body{} + + if p.s.tok == end { + p.error(p.s.Loc(), "found empty body") + return nil + } + + for { + expr := p.parseLiteral() + if expr == nil { + return nil + } + + body.Append(expr) + + if p.s.tok == tokens.Semicolon { + p.scan() + continue + } + + if p.s.tok == end || requireSemi { + return body + } + + if !p.s.skippedNL { + // If there was already an error then don't pile this one on + if len(p.s.errors) == 0 { + p.illegal(`expected \n or %s or %s`, tokens.Semicolon, end) + } + return nil + } + } +} + +func (p *Parser) parseLiteral() (expr *Expr) { + + offset := p.s.loc.Offset + loc := p.s.Loc() + + defer func() { + if expr != nil { + loc.Text = p.s.Text(offset, p.s.lastEnd) + expr.SetLoc(loc) + } + }() + + var negated bool + if p.s.tok == tokens.Not { + p.scan() + negated = true + } + + switch p.s.tok { + case tokens.Some: + if negated { + p.illegal("illegal negation of 'some'") + return nil + } + return p.parseSome() + case tokens.Every: + if negated { + p.illegal("illegal negation of 'every'") + return nil + } + return p.parseEvery() + default: + s := p.save() + expr := p.parseExpr() + if expr != nil { + expr.Negated = negated + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + // If we find a plain `every` identifier, attempt to parse an every expression, + // add hint if it succeeds. + if term, ok := expr.Terms.(*Term); ok && Var("every").Equal(term.Value) { + var hint bool + t := p.save() + p.restore(s) + if expr := p.futureParser().parseEvery(); expr != nil { + _, hint = expr.Terms.(*Every) + } + p.restore(t) + if hint { + p.hint("`import future.keywords.every` for `every x in xs { ... }` expressions") + } + } + return expr + } + return nil + } +} + +func (p *Parser) parseWith() []*With { + + withs := []*With{} + + for { + + with := With{ + Location: p.s.Loc(), + } + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected ident") + return nil + } + + with.Target = p.parseTerm() + if with.Target == nil { + return nil + } + + switch with.Target.Value.(type) { + case Ref, Var: + break + default: + p.illegal("expected with target path") + } + + if p.s.tok != tokens.As { + p.illegal("expected as keyword") + return nil + } + + p.scan() + + if with.Value = p.parseTermInfixCall(); with.Value == nil { + return nil + } + + with.Location.Text = p.s.Text(with.Location.Offset, p.s.lastEnd) + + withs = append(withs, &with) + + if p.s.tok != tokens.With { + break + } + } + + return withs +} + +func (p *Parser) parseSome() *Expr { + + decl := &SomeDecl{} + decl.SetLoc(p.s.Loc()) + + // Attempt to parse "some x in xs", which will end up in + // SomeDecl{Symbols: ["member(x, xs)"]} + s := p.save() + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name: + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + case MemberWithKey.Name: + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + default: + p.illegal("expected `x in xs` or `x, y in xs` expression") + return nil + } + + decl.Symbols = []*Term{term} + expr := NewExpr(decl).SetLocation(decl.Location) + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + } + + p.restore(s) + s = p.save() // new copy for later + var hint bool + p.scan() + if term := p.futureParser().parseTermInfixCall(); term != nil { + if call, ok := term.Value.(Call); ok { + switch call[0].String() { + case Member.Name, MemberWithKey.Name: + hint = true + } + } + } + + // go on as before, it's `some x[...]` or illegal + p.restore(s) + if hint { + p.hint("`import future.keywords.in` for `some x in xs` expressions") + } + + for { // collecting var args + + p.scan() + + if p.s.tok != tokens.Ident { + p.illegal("expected var") + return nil + } + + decl.Symbols = append(decl.Symbols, p.parseVar()) + + p.scan() + + if p.s.tok != tokens.Comma { + break + } + } + + return NewExpr(decl).SetLocation(decl.Location) +} + +func (p *Parser) parseEvery() *Expr { + qb := &Every{} + qb.SetLoc(p.s.Loc()) + + // TODO(sr): We'd get more accurate error messages if we didn't rely on + // parseTermInfixCall here, but parsed "var [, var] in term" manually. + p.scan() + term := p.parseTermInfixCall() + if term == nil { + return nil + } + call, ok := term.Value.(Call) + if !ok { + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + switch call[0].String() { + case Member.Name: // x in xs + if len(call) != 3 { + p.illegal("illegal domain") + return nil + } + qb.Value = call[1] + qb.Domain = call[2] + case MemberWithKey.Name: // k, v in xs + if len(call) != 4 { + p.illegal("illegal domain") + return nil + } + qb.Key = call[1] + qb.Value = call[2] + qb.Domain = call[3] + if _, ok := qb.Key.Value.(Var); !ok { + p.illegal("expected key to be a variable") + return nil + } + default: + p.illegal("expected `x[, y] in xs { ... }` expression") + return nil + } + if _, ok := qb.Value.Value.(Var); !ok { + p.illegal("expected value to be a variable") + return nil + } + if p.s.tok == tokens.LBrace { // every x in xs { ... } + p.scan() + body := p.parseBody(tokens.RBrace) + if body == nil { + return nil + } + p.scan() + qb.Body = body + expr := NewExpr(qb).SetLocation(qb.Location) + + if p.s.tok == tokens.With { + if expr.With = p.parseWith(); expr.With == nil { + return nil + } + } + return expr + } + + p.illegal("missing body") + return nil +} + +func (p *Parser) parseExpr() *Expr { + + lhs := p.parseTermInfixCall() + if lhs == nil { + return nil + } + + if op := p.parseTermOp(tokens.Assign, tokens.Unify); op != nil { + if rhs := p.parseTermInfixCall(); rhs != nil { + return NewExpr([]*Term{op, lhs, rhs}) + } + return nil + } + + // NOTE(tsandall): the top-level call term is converted to an expr because + // the evaluator does not support the call term type (nested calls are + // rewritten by the compiler.) + if call, ok := lhs.Value.(Call); ok { + return NewExpr([]*Term(call)) + } + + return NewExpr(lhs) +} + +// parseTermInfixCall consumes the next term from the input and returns it. If a +// term cannot be parsed the return value is nil and error will be recorded. The +// scanner will be advanced to the next token before returning. +// By starting out with infix relations (==, !=, <, etc) and further calling the +// other binary operators (|, &, arithmetics), it constitutes the binding +// precedence. +func (p *Parser) parseTermInfixCall() *Term { + return p.parseTermIn(nil, true, p.s.loc.Offset) +} + +func (p *Parser) parseTermInfixCallInList() *Term { + return p.parseTermIn(nil, false, p.s.loc.Offset) +} + +// use static references to avoid allocations, and +// copy them to the call term only when needed +var memberWithKeyRef = MemberWithKey.Ref() +var memberRef = Member.Ref() + +func (p *Parser) parseTermIn(lhs *Term, keyVal bool, offset int) *Term { + // NOTE(sr): `in` is a bit special: besides `lhs in rhs`, it also + // supports `key, val in rhs`, so it can have an optional second lhs. + // `keyVal` triggers if we attempt to parse a second lhs argument (`mhs`). + if lhs == nil { + lhs = p.parseTermRelation(nil, offset) + } + if lhs != nil { + if keyVal && p.s.tok == tokens.Comma { // second "lhs", or "middle hand side" + s := p.save() + p.scan() + if mhs := p.parseTermRelation(nil, offset); mhs != nil { + + if op := p.parseTermOpName(memberWithKeyRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, mhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + p.restore(s) + } + if op := p.parseTermOpName(memberRef, tokens.In); op != nil { + if rhs := p.parseTermRelation(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.In: + return p.parseTermIn(call, keyVal, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermRelation(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermOr(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte); op != nil { + if rhs := p.parseTermOr(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Equal, tokens.Neq, tokens.Lt, tokens.Gt, tokens.Lte, tokens.Gte: + return p.parseTermRelation(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermOr(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermAnd(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Or); op != nil { + if rhs := p.parseTermAnd(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Or: + return p.parseTermOr(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermAnd(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermArith(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.And); op != nil { + if rhs := p.parseTermArith(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.And: + return p.parseTermAnd(call, offset) + default: + return call + } + } + } + return lhs + } + return nil +} + +func (p *Parser) parseTermArith(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTermFactor(nil, offset) + } + if lhs != nil { + if op := p.parseTermOp(tokens.Add, tokens.Sub); op != nil { + if rhs := p.parseTermFactor(nil, p.s.loc.Offset); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Add, tokens.Sub: + return p.parseTermArith(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTermFactor(lhs *Term, offset int) *Term { + if lhs == nil { + lhs = p.parseTerm() + } + if lhs != nil { + if op := p.parseTermOp(tokens.Mul, tokens.Quo, tokens.Rem); op != nil { + if rhs := p.parseTerm(); rhs != nil { + call := p.setLoc(CallTerm(op, lhs, rhs), lhs.Location, offset, p.s.lastEnd) + switch p.s.tok { + case tokens.Mul, tokens.Quo, tokens.Rem: + return p.parseTermFactor(call, offset) + default: + return call + } + } + } + } + return lhs +} + +func (p *Parser) parseTerm() *Term { + if term, s := p.parsedTermCacheLookup(); s != nil { + p.restore(s) + return term + } + s0 := p.save() + + var term *Term + switch p.s.tok { + case tokens.Null: + term = NullTerm().SetLocation(p.s.Loc()) + case tokens.True: + term = BooleanTerm(true).SetLocation(p.s.Loc()) + case tokens.False: + term = BooleanTerm(false).SetLocation(p.s.Loc()) + case tokens.Sub, tokens.Dot, tokens.Number: + term = p.parseNumber() + case tokens.String: + term = p.parseString() + case tokens.Ident, tokens.Contains: // NOTE(sr): contains anywhere BUT in rule heads gets no special treatment + term = p.parseVar() + case tokens.LBrack: + term = p.parseArray() + case tokens.LBrace: + term = p.parseSetOrObject() + case tokens.LParen: + offset := p.s.loc.Offset + p.scan() + if r := p.parseTermInfixCall(); r != nil { + if p.s.tok == tokens.RParen { + r.Location.Text = p.s.Text(offset, p.s.tokEnd) + term = r + } else { + p.error(p.s.Loc(), "non-terminated expression") + } + } + default: + p.illegalToken() + } + + term = p.parseTermFinish(term, false) + p.parsedTermCachePush(term, s0) + return term +} + +func (p *Parser) parseTermFinish(head *Term, skipws bool) *Term { + if head == nil { + return nil + } + offset := p.s.loc.Offset + p.doScan(skipws) + + switch p.s.tok { + case tokens.LParen, tokens.Dot, tokens.LBrack: + return p.parseRef(head, offset) + case tokens.Whitespace: + p.scan() + fallthrough + default: + if _, ok := head.Value.(Var); ok && RootDocumentNames.Contains(head) { + return RefTerm(head).SetLocation(head.Location) + } + return head + } +} + +func (p *Parser) parseNumber() *Term { + var prefix string + loc := p.s.Loc() + if p.s.tok == tokens.Sub { + prefix = "-" + p.scan() + switch p.s.tok { + case tokens.Number, tokens.Dot: + break + default: + p.illegal("expected number") + return nil + } + } + if p.s.tok == tokens.Dot { + prefix += "." + p.scan() + if p.s.tok != tokens.Number { + p.illegal("expected number") + return nil + } + } + + // Check for multiple leading 0's, parsed by math/big.Float.Parse as decimal 0: + // https://golang.org/pkg/math/big/#Float.Parse + if ((len(prefix) != 0 && prefix[0] == '-') || len(prefix) == 0) && + len(p.s.lit) > 1 && p.s.lit[0] == '0' && p.s.lit[1] == '0' { + p.illegal("expected number") + return nil + } + + // Ensure that the number is valid + s := prefix + p.s.lit + f, ok := new(big.Float).SetString(s) + if !ok { + p.illegal("invalid float") + return nil + } + + // Put limit on size of exponent to prevent non-linear cost of String() + // function on big.Float from causing denial of service: https://github.com/golang/go/issues/11068 + // + // n == sign * mantissa * 2^exp + // 0.5 <= mantissa < 1.0 + // + // The limit is arbitrary. + exp := f.MantExp(nil) + if exp > 1e5 || exp < -1e5 || f.IsInf() { // +/- inf, exp is 0 + p.error(p.s.Loc(), "number too big") + return nil + } + + // Note: Use the original string, do *not* round trip from + // the big.Float as it can cause precision loss. + return NumberTerm(json.Number(s)).SetLocation(loc) +} + +func (p *Parser) parseString() *Term { + if p.s.lit[0] == '"' { + var s string + err := json.Unmarshal([]byte(p.s.lit), &s) + if err != nil { + p.errorf(p.s.Loc(), "illegal string literal: %s", p.s.lit) + return nil + } + term := StringTerm(s).SetLocation(p.s.Loc()) + return term + } + return p.parseRawString() +} + +func (p *Parser) parseRawString() *Term { + if len(p.s.lit) < 2 { + return nil + } + term := StringTerm(p.s.lit[1 : len(p.s.lit)-1]).SetLocation(p.s.Loc()) + return term +} + +// this is the name to use for instantiating an empty set, e.g., `set()`. +var setConstructor = RefTerm(VarTerm("set")) + +func (p *Parser) parseCall(operator *Term, offset int) (term *Term) { + + loc := operator.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + p.scan() // steps over '(' + + if p.s.tok == tokens.RParen { // no args, i.e. set() or any.func() + end = p.s.tokEnd + p.scanWS() + if operator.Equal(setConstructor) { + return SetTerm() + } + return CallTerm(operator) + } + + if r := p.parseTermList(tokens.RParen, []*Term{operator}); r != nil { + end = p.s.tokEnd + p.scanWS() + return CallTerm(r...) + } + + return nil +} + +func (p *Parser) parseRef(head *Term, offset int) (term *Term) { + + loc := head.Location + var end int + + defer func() { + p.setLoc(term, loc, offset, end) + }() + + switch h := head.Value.(type) { + case Var, *Array, Object, Set, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + // ok + default: + p.errorf(loc, "illegal ref (head cannot be %v)", ValueName(h)) + } + + ref := []*Term{head} + + for { + switch p.s.tok { + case tokens.Dot: + p.scanWS() + if p.s.tok != tokens.Ident { + p.illegal("expected %v", tokens.Ident) + return nil + } + ref = append(ref, StringTerm(p.s.lit).SetLocation(p.s.Loc())) + p.scanWS() + case tokens.LParen: + term = p.parseCall(p.setLoc(RefTerm(ref...), loc, offset, p.s.loc.Offset), offset) + if term != nil { + switch p.s.tok { + case tokens.Whitespace: + p.scan() + end = p.s.lastEnd + return term + case tokens.Dot, tokens.LBrack: + term = p.parseRef(term, offset) + } + } + end = p.s.tokEnd + return term + case tokens.LBrack: + p.scan() + if term := p.parseTermInfixCall(); term != nil { + if p.s.tok != tokens.RBrack { + p.illegal("expected %v", tokens.LBrack) + return nil + } + ref = append(ref, term) + p.scanWS() + } else { + return nil + } + case tokens.Whitespace: + end = p.s.lastEnd + p.scan() + return RefTerm(ref...) + default: + end = p.s.lastEnd + return RefTerm(ref...) + } + } +} + +func (p *Parser) parseArray() (term *Term) { + + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrack { + return ArrayTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg [, x, y] + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // NOTE(tsandall): The parser cannot attempt a relational term here because + // of ambiguity around comprehensions. For example, given: + // + // {1 | 1} + // + // Does this represent a set comprehension or a set containing binary OR + // call? We resolve the ambiguity by prioritizing comprehensions. + head := p.parseTerm() + + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrack: + return ArrayTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrack, []*Term{head}); terms != nil { + return NewTerm(NewArray(terms...)) + } + return nil + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is an array comprehension + p.scan() + if body := p.parseBody(tokens.RBrack); body != nil { + return ArrayComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // fall back to parsing as a normal array definition + } + + p.restore(s) + + if terms := p.parseTermList(tokens.RBrack, nil); terms != nil { + return NewTerm(NewArray(terms...)) + } + return nil +} + +func (p *Parser) parseSetOrObject() (term *Term) { + loc := p.s.Loc() + offset := p.s.loc.Offset + + defer func() { + p.setLoc(term, loc, offset, p.s.tokEnd) + }() + + p.scan() + + if p.s.tok == tokens.RBrace { + return ObjectTerm() + } + + potentialComprehension := true + + // Skip leading commas, eg {, x, y} + // Supported for backwards compatibility. In the future + // we should make this a parse error. + if p.s.tok == tokens.Comma { + potentialComprehension = false + p.scan() + } + + s := p.save() + + // Try parsing just a single term first to give comprehensions higher + // priority to "or" calls in ambiguous situations. Eg: { a | b } + // will be a set comprehension. + // + // Note: We don't know yet if it is a set or object being defined. + head := p.parseTerm() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.Or: + if potentialComprehension { + return p.parseSet(s, head, potentialComprehension) + } + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, potentialComprehension) + case tokens.Colon: + return p.parseObject(head, potentialComprehension) + } + + p.restore(s) + + head = p.parseTermInfixCallInList() + if head == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseSet(s, head, false) + case tokens.Colon: + // It still might be an object comprehension, eg { a+1: b | ... } + return p.parseObject(head, potentialComprehension) + } + + p.illegal("non-terminated set") + return nil +} + +func (p *Parser) parseSet(s *state, head *Term, potentialComprehension bool) *Term { + switch p.s.tok { + case tokens.RBrace: + return SetTerm(head) + case tokens.Comma: + p.scan() + if terms := p.parseTermList(tokens.RBrace, []*Term{head}); terms != nil { + return SetTerm(terms...) + } + case tokens.Or: + if potentialComprehension { + // Try to parse as if it is a set comprehension + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return SetComprehensionTerm(head, body) + } + if p.s.tok != tokens.Comma { + return nil + } + } + // Fall back to parsing as normal set definition + p.restore(s) + if terms := p.parseTermList(tokens.RBrace, nil); terms != nil { + return SetTerm(terms...) + } + } + return nil +} + +func (p *Parser) parseObject(k *Term, potentialComprehension bool) *Term { + // NOTE(tsandall): Assumption: this function is called after parsing the key + // of the head element and then receiving a colon token from the scanner. + // Advance beyond the colon and attempt to parse an object. + if p.s.tok != tokens.Colon { + panic("expected colon") + } + p.scan() + + s := p.save() + + // NOTE(sr): We first try to parse the value as a term (`v`), and see + // if we can parse `{ x: v | ...}` as a comprehension. + // However, if we encounter either a Comma or an RBace, it cannot be + // parsed as a comprehension -- so we save double work further down + // where `parseObjectFinish(k, v, false)` would only exercise the + // same code paths once more. + v := p.parseTerm() + if v == nil { + return nil + } + + potentialRelation := true + if potentialComprehension { + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + potentialRelation = false + fallthrough + case tokens.Or: + if term := p.parseObjectFinish(k, v, true); term != nil { + return term + } + } + } + + p.restore(s) + + if potentialRelation { + v := p.parseTermInfixCallInList() + if v == nil { + return nil + } + + switch p.s.tok { + case tokens.RBrace, tokens.Comma: + return p.parseObjectFinish(k, v, false) + } + } + + p.illegal("non-terminated object") + return nil +} + +func (p *Parser) parseObjectFinish(key, val *Term, potentialComprehension bool) *Term { + switch p.s.tok { + case tokens.RBrace: + return ObjectTerm([2]*Term{key, val}) + case tokens.Or: + if potentialComprehension { + p.scan() + if body := p.parseBody(tokens.RBrace); body != nil { + return ObjectComprehensionTerm(key, val, body) + } + } else { + p.illegal("non-terminated object") + } + case tokens.Comma: + p.scan() + if r := p.parseTermPairList(tokens.RBrace, [][2]*Term{{key, val}}); r != nil { + return ObjectTerm(r...) + } + } + return nil +} + +func (p *Parser) parseTermList(end tokens.Token, r []*Term) []*Term { + if p.s.tok == end { + return r + } + for { + term := p.parseTermInfixCallInList() + if term != nil { + r = append(r, term) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermPairList(end tokens.Token, r [][2]*Term) [][2]*Term { + if p.s.tok == end { + return r + } + for { + key := p.parseTermInfixCallInList() + if key != nil { + switch p.s.tok { + case tokens.Colon: + p.scan() + if val := p.parseTermInfixCallInList(); val != nil { + r = append(r, [2]*Term{key, val}) + switch p.s.tok { + case end: + return r + case tokens.Comma: + p.scan() + if p.s.tok == end { + return r + } + continue + default: + p.illegal(fmt.Sprintf("expected %q or %q", tokens.Comma, end)) + return nil + } + } + default: + p.illegal(fmt.Sprintf("expected %q", tokens.Colon)) + return nil + } + } + return nil + } +} + +func (p *Parser) parseTermOp(values ...tokens.Token) *Term { + for i := range values { + if p.s.tok == values[i] { + r := RefTerm(VarTerm(fmt.Sprint(p.s.tok)).SetLocation(p.s.Loc())).SetLocation(p.s.Loc()) + p.scan() + return r + } + } + return nil +} + +func (p *Parser) parseTermOpName(ref Ref, values ...tokens.Token) *Term { + for i := range values { + if p.s.tok == values[i] { + cp := ref.Copy() + for _, r := range cp { + r.SetLocation(p.s.Loc()) + } + t := RefTerm(cp...) + t.SetLocation(p.s.Loc()) + p.scan() + return t + } + } + return nil +} + +func (p *Parser) parseVar() *Term { + + s := p.s.lit + + term := VarTerm(s).SetLocation(p.s.Loc()) + + // Update wildcard values with unique identifiers + if term.Equal(Wildcard) { + term.Value = Var(p.genwildcard()) + } + + return term +} + +func (p *Parser) genwildcard() string { + c := p.s.wildcard + p.s.wildcard++ + return fmt.Sprintf("%v%d", WildcardPrefix, c) +} + +func (p *Parser) error(loc *location.Location, reason string) { + p.errorf(loc, reason) //nolint:govet +} + +func (p *Parser) errorf(loc *location.Location, f string, a ...interface{}) { + msg := strings.Builder{} + msg.WriteString(fmt.Sprintf(f, a...)) + + switch len(p.s.hints) { + case 0: // nothing to do + case 1: + msg.WriteString(" (hint: ") + msg.WriteString(p.s.hints[0]) + msg.WriteRune(')') + default: + msg.WriteString(" (hints: ") + for i, h := range p.s.hints { + if i > 0 { + msg.WriteString(", ") + } + msg.WriteString(h) + } + msg.WriteRune(')') + } + + p.s.errors = append(p.s.errors, &Error{ + Code: ParseErr, + Message: msg.String(), + Location: loc, + Details: newParserErrorDetail(p.s.s.Bytes(), loc.Offset), + }) + p.s.hints = nil +} + +func (p *Parser) hint(f string, a ...interface{}) { + p.s.hints = append(p.s.hints, fmt.Sprintf(f, a...)) +} + +func (p *Parser) illegal(note string, a ...interface{}) { + tok := p.s.tok.String() + + if p.s.tok == tokens.Illegal { + p.errorf(p.s.Loc(), "illegal token") + return + } + + tokType := "token" + if tokens.IsKeyword(p.s.tok) { + tokType = "keyword" + } else if _, ok := allFutureKeywords[p.s.tok.String()]; ok { + tokType = "keyword" + } + + note = fmt.Sprintf(note, a...) + if len(note) > 0 { + p.errorf(p.s.Loc(), "unexpected %s %s: %s", tok, tokType, note) + } else { + p.errorf(p.s.Loc(), "unexpected %s %s", tok, tokType) + } +} + +func (p *Parser) illegalToken() { + p.illegal("") +} + +func (p *Parser) scan() { + p.doScan(true) +} + +func (p *Parser) scanWS() { + p.doScan(false) +} + +func (p *Parser) doScan(skipws bool) { + + // NOTE(tsandall): the last position is used to compute the "text" field for + // complex AST nodes. Whitespace never affects the last position of an AST + // node so do not update it when scanning. + if p.s.tok != tokens.Whitespace { + p.s.lastEnd = p.s.tokEnd + p.s.skippedNL = false + } + + var errs []scanner.Error + for { + var pos scanner.Position + p.s.tok, pos, p.s.lit, errs = p.s.s.Scan() + + p.s.tokEnd = pos.End + p.s.loc.Row = pos.Row + p.s.loc.Col = pos.Col + p.s.loc.Offset = pos.Offset + p.s.loc.Text = p.s.Text(pos.Offset, pos.End) + p.s.loc.Tabs = pos.Tabs + + for _, err := range errs { + p.error(p.s.Loc(), err.Message) + } + + if len(errs) > 0 { + p.s.tok = tokens.Illegal + } + + if p.s.tok == tokens.Whitespace { + if p.s.lit == "\n" { + p.s.skippedNL = true + } + if skipws { + continue + } + } + + if p.s.tok != tokens.Comment { + break + } + + // For backwards compatibility leave a nil + // Text value if there is no text rather than + // an empty string. + var commentText []byte + if len(p.s.lit) > 1 { + commentText = []byte(p.s.lit[1:]) + } + comment := NewComment(commentText) + comment.SetLoc(p.s.Loc()) + p.s.comments = append(p.s.comments, comment) + } +} + +func (p *Parser) save() *state { + cpy := *p.s + s := *cpy.s + cpy.s = &s + return &cpy +} + +func (p *Parser) restore(s *state) { + p.s = s +} + +func setLocRecursive(x interface{}, loc *location.Location) { + NewGenericVisitor(func(x interface{}) bool { + if node, ok := x.(Node); ok { + node.SetLoc(loc) + } + return false + }).Walk(x) +} + +func (p *Parser) setLoc(term *Term, loc *location.Location, offset, end int) *Term { + if term != nil { + cpy := *loc + term.Location = &cpy + term.Location.Text = p.s.Text(offset, end) + } + return term +} + +func (p *Parser) validateDefaultRuleValue(rule *Rule) bool { + if rule.Head.Value == nil { + p.error(rule.Loc(), "illegal default rule (must have a value)") + return false + } + + valid := true + vis := NewGenericVisitor(func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: // skip closures + return true + case Ref, Var, Call: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (value cannot contain %v)", TypeName(x))) + valid = false + return true + } + return false + }) + + vis.Walk(rule.Head.Value.Value) + return valid +} + +func (p *Parser) validateDefaultRuleArgs(rule *Rule) bool { + + valid := true + vars := NewVarSet() + + vis := NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case Var: + if vars.Contains(x) { + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot be repeated %v)", x)) + valid = false + return true + } + vars.Add(x) + + case *Term: + switch v := x.Value.(type) { + case Var: // do nothing + default: + p.error(rule.Loc(), fmt.Sprintf("illegal default rule (arguments cannot contain %v)", ValueName(v))) + valid = false + return true + } + } + + return false + }) + + vis.Walk(rule.Head.Args) + return valid +} + +// We explicitly use yaml unmarshalling, to accommodate for the '_' in 'related_resources', +// which isn't handled properly by json for some reason. +type rawAnnotation struct { + Scope string `yaml:"scope"` + Title string `yaml:"title"` + Entrypoint bool `yaml:"entrypoint"` + Description string `yaml:"description"` + Organizations []string `yaml:"organizations"` + RelatedResources []interface{} `yaml:"related_resources"` + Authors []interface{} `yaml:"authors"` + Schemas []map[string]any `yaml:"schemas"` + Custom map[string]interface{} `yaml:"custom"` +} + +type metadataParser struct { + buf *bytes.Buffer + comments []*Comment + loc *location.Location +} + +func newMetadataParser(loc *Location) *metadataParser { + return &metadataParser{loc: loc, buf: bytes.NewBuffer(nil)} +} + +func (b *metadataParser) Append(c *Comment) { + b.buf.Write(bytes.TrimPrefix(c.Text, []byte(" "))) + b.buf.WriteByte('\n') + b.comments = append(b.comments, c) +} + +var yamlLineErrRegex = regexp.MustCompile(`^yaml:(?: unmarshal errors:[\n\s]*)? line ([[:digit:]]+):`) + +func (b *metadataParser) Parse() (*Annotations, error) { + + var raw rawAnnotation + + if len(bytes.TrimSpace(b.buf.Bytes())) == 0 { + return nil, fmt.Errorf("expected METADATA block, found whitespace") + } + + if err := yaml.Unmarshal(b.buf.Bytes(), &raw); err != nil { + var comment *Comment + match := yamlLineErrRegex.FindStringSubmatch(err.Error()) + if len(match) == 2 { + index, err2 := strconv.Atoi(match[1]) + if err2 == nil { + if index >= len(b.comments) { + comment = b.comments[len(b.comments)-1] + } else { + comment = b.comments[index] + } + b.loc = comment.Location + } + } + + if match == nil && len(b.comments) > 0 { + b.loc = b.comments[0].Location + } + + return nil, augmentYamlError(err, b.comments) + } + + var result Annotations + result.comments = b.comments + result.Scope = raw.Scope + result.Entrypoint = raw.Entrypoint + result.Title = raw.Title + result.Description = raw.Description + result.Organizations = raw.Organizations + + for _, v := range raw.RelatedResources { + rr, err := parseRelatedResource(v) + if err != nil { + return nil, fmt.Errorf("invalid related-resource definition %s: %w", v, err) + } + result.RelatedResources = append(result.RelatedResources, rr) + } + + for _, pair := range raw.Schemas { + k, v := unwrapPair(pair) + + var a SchemaAnnotation + var err error + + a.Path, err = ParseRef(k) + if err != nil { + return nil, fmt.Errorf("invalid document reference") + } + + switch v := v.(type) { + case string: + a.Schema, err = parseSchemaRef(v) + if err != nil { + return nil, err + } + case map[string]any: + w, err := convertYAMLMapKeyTypes(v, nil) + if err != nil { + return nil, fmt.Errorf("invalid schema definition: %w", err) + } + a.Definition = &w + default: + return nil, fmt.Errorf("invalid schema declaration for path %q", k) + } + + result.Schemas = append(result.Schemas, &a) + } + + for _, v := range raw.Authors { + author, err := parseAuthor(v) + if err != nil { + return nil, fmt.Errorf("invalid author definition %s: %w", v, err) + } + result.Authors = append(result.Authors, author) + } + + result.Custom = make(map[string]interface{}) + for k, v := range raw.Custom { + val, err := convertYAMLMapKeyTypes(v, nil) + if err != nil { + return nil, err + } + result.Custom[k] = val + } + + result.Location = b.loc + + // recreate original text of entire metadata block for location text attribute + sb := strings.Builder{} + sb.WriteString("# METADATA\n") + + lines := bytes.Split(b.buf.Bytes(), []byte{'\n'}) + + for _, line := range lines[:len(lines)-1] { + sb.WriteString("# ") + sb.Write(line) + sb.WriteByte('\n') + } + + result.Location.Text = []byte(strings.TrimSuffix(sb.String(), "\n")) + + return &result, nil +} + +// augmentYamlError augments a YAML error with hints intended to help the user figure out the cause of an otherwise +// cryptic error. These are hints, instead of proper errors, because they are educated guesses, and aren't guaranteed +// to be correct. +func augmentYamlError(err error, comments []*Comment) error { + // Adding hints for when key/value ':' separator isn't suffixed with a legal YAML space symbol + for _, comment := range comments { + txt := string(comment.Text) + parts := strings.Split(txt, ":") + if len(parts) > 1 { + parts = parts[1:] + var invalidSpaces []string + for partIndex, part := range parts { + if len(part) == 0 && partIndex == len(parts)-1 { + invalidSpaces = []string{} + break + } + + r, _ := utf8.DecodeRuneInString(part) + if r == ' ' || r == '\t' { + invalidSpaces = []string{} + break + } + + invalidSpaces = append(invalidSpaces, fmt.Sprintf("%+q", r)) + } + if len(invalidSpaces) > 0 { + err = fmt.Errorf( + "%s\n Hint: on line %d, symbol(s) %v immediately following a key/value separator ':' is not a legal yaml space character", + err.Error(), comment.Location.Row, invalidSpaces) + } + } + } + return err +} + +func unwrapPair(pair map[string]interface{}) (string, interface{}) { + for k, v := range pair { + return k, v + } + return "", nil +} + +var errInvalidSchemaRef = fmt.Errorf("invalid schema reference") + +// NOTE(tsandall): 'schema' is not registered as a root because it's not +// supported by the compiler or evaluator today. Once we fix that, we can remove +// this function. +func parseSchemaRef(s string) (Ref, error) { + + term, err := ParseTerm(s) + if err == nil { + switch v := term.Value.(type) { + case Var: + if term.Equal(SchemaRootDocument) { + return SchemaRootRef.Copy(), nil + } + case Ref: + if v.HasPrefix(SchemaRootRef) { + return v, nil + } + } + } + + return nil, errInvalidSchemaRef +} + +func parseRelatedResource(rr interface{}) (*RelatedResourceAnnotation, error) { + rr, err := convertYAMLMapKeyTypes(rr, nil) + if err != nil { + return nil, err + } + + switch rr := rr.(type) { + case string: + if len(rr) > 0 { + u, err := url.Parse(rr) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Ref: *u}, nil + } + return nil, fmt.Errorf("ref URL may not be empty string") + case map[string]interface{}: + description := strings.TrimSpace(getSafeString(rr, "description")) + ref := strings.TrimSpace(getSafeString(rr, "ref")) + if len(ref) > 0 { + u, err := url.Parse(ref) + if err != nil { + return nil, err + } + return &RelatedResourceAnnotation{Description: description, Ref: *u}, nil + } + return nil, fmt.Errorf("'ref' value required in object") + } + + return nil, fmt.Errorf("invalid value type, must be string or map") +} + +func parseAuthor(a interface{}) (*AuthorAnnotation, error) { + a, err := convertYAMLMapKeyTypes(a, nil) + if err != nil { + return nil, err + } + + switch a := a.(type) { + case string: + return parseAuthorString(a) + case map[string]interface{}: + name := strings.TrimSpace(getSafeString(a, "name")) + email := strings.TrimSpace(getSafeString(a, "email")) + if len(name) > 0 || len(email) > 0 { + return &AuthorAnnotation{name, email}, nil + } + return nil, fmt.Errorf("'name' and/or 'email' values required in object") + } + + return nil, fmt.Errorf("invalid value type, must be string or map") +} + +func getSafeString(m map[string]interface{}, k string) string { + if v, found := m[k]; found { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +const emailPrefix = "<" +const emailSuffix = ">" + +// parseAuthor parses a string into an AuthorAnnotation. If the last word of the input string is enclosed within <>, +// it is extracted as the author's email. The email may not contain whitelines, as it then will be interpreted as +// multiple words. +func parseAuthorString(s string) (*AuthorAnnotation, error) { + parts := strings.Fields(s) + + if len(parts) == 0 { + return nil, fmt.Errorf("author is an empty string") + } + + namePartCount := len(parts) + trailing := parts[namePartCount-1] + var email string + if len(trailing) >= len(emailPrefix)+len(emailSuffix) && strings.HasPrefix(trailing, emailPrefix) && + strings.HasSuffix(trailing, emailSuffix) { + email = trailing[len(emailPrefix):] + email = email[0 : len(email)-len(emailSuffix)] + namePartCount = namePartCount - 1 + } + + name := strings.Join(parts[0:namePartCount], " ") + + return &AuthorAnnotation{Name: name, Email: email}, nil +} + +func convertYAMLMapKeyTypes(x any, path []string) (any, error) { + var err error + switch x := x.(type) { + case map[any]any: + result := make(map[string]any, len(x)) + for k, v := range x { + str, ok := k.(string) + if !ok { + return nil, fmt.Errorf("invalid map key type(s): %v", strings.Join(path, "/")) + } + result[str], err = convertYAMLMapKeyTypes(v, append(path, str)) + if err != nil { + return nil, err + } + } + return result, nil + case []any: + for i := range x { + x[i], err = convertYAMLMapKeyTypes(x[i], append(path, fmt.Sprintf("%d", i))) + if err != nil { + return nil, err + } + } + return x, nil + default: + return x, nil + } +} + +// futureKeywords is the source of truth for future keywords that will +// eventually become standard keywords inside of Rego. +var futureKeywords = map[string]tokens.Token{} + +// futureKeywordsV0 is the source of truth for future keywords that were +// not yet a standard part of Rego in v0, and required importing. +var futureKeywordsV0 = map[string]tokens.Token{ + "in": tokens.In, + "every": tokens.Every, + "contains": tokens.Contains, + "if": tokens.If, +} + +var allFutureKeywords map[string]tokens.Token + +func IsFutureKeyword(s string) bool { + return IsFutureKeywordForRegoVersion(s, RegoV1) +} + +func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { + var yes bool + + switch v { + case RegoV0, RegoV0CompatV1: + _, yes = futureKeywordsV0[s] + case RegoV1: + _, yes = futureKeywords[s] + } + + return yes +} + +func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { + path := imp.Path.Value.(Ref) + + if len(path) == 1 || !path[1].Equal(StringTerm("keywords")) { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords`") + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`future` imports cannot be aliased") + return + } + + kwds := make([]string, 0, len(allowedFutureKeywords)) + for k := range allowedFutureKeywords { + kwds = append(kwds, k) + } + + switch len(path) { + case 2: // all keywords imported, nothing to do + case 3: // one keyword imported + kw, ok := path[2].Value.(String) + if !ok { + p.errorf(imp.Path.Location, "invalid import, must be `future.keywords.x`, e.g. `import future.keywords.in`") + return + } + keyword := string(kw) + _, ok = allowedFutureKeywords[keyword] + if !ok { + sort.Strings(kwds) // so the error message is stable + p.errorf(imp.Path.Location, "unexpected keyword, must be one of %v", kwds) + return + } + + kwds = []string{keyword} // overwrite + } + for _, kw := range kwds { + p.s.s.AddKeyword(kw, allowedFutureKeywords[kw]) + } +} + +func (p *Parser) regoV1Import(imp *Import) { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) && !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) + return + } + + path := imp.Path.Value.(Ref) + + // v1 is only valid option + if len(path) == 1 || !path[1].Equal(RegoV1CompatibleRef[1]) || len(path) > 2 { + p.errorf(imp.Path.Location, "invalid import `%s`, must be `%s`", path, RegoV1CompatibleRef) + return + } + + if p.po.EffectiveRegoVersion() == RegoV1 { + // We're parsing for Rego v1, where the 'rego.v1' import is a no-op. + return + } + + if imp.Alias != "" { + p.errorf(imp.Path.Location, "`rego` imports cannot be aliased") + return + } + + // import all future keywords with the rego.v1 import + kwds := make([]string, 0, len(futureKeywordsV0)) + for k := range futureKeywordsV0 { + kwds = append(kwds, k) + } + + p.s.s.SetRegoV1Compatible() + for _, kw := range kwds { + p.s.s.AddKeyword(kw, futureKeywordsV0[kw]) + } +} + +func init() { + allFutureKeywords = map[string]tokens.Token{} + for k, v := range futureKeywords { + allFutureKeywords[k] = v + } + for k, v := range futureKeywordsV0 { + allFutureKeywords[k] = v + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go new file mode 100644 index 000000000..9712cb611 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/parser_ext.go @@ -0,0 +1,821 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// This file contains extra functions for parsing Rego. +// Most of the parsing is handled by the code in parser.go, +// however, there are additional utilities that are +// helpful for dealing with Rego source inputs (e.g., REPL +// statements, source files, etc.) + +package ast + +import ( + "bytes" + "errors" + "fmt" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" +) + +// MustParseBody returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBody(input string) Body { + return MustParseBodyWithOpts(input, ParserOptions{}) +} + +// MustParseBodyWithOpts returns a parsed body. +// If an error occurs during parsing, panic. +func MustParseBodyWithOpts(input string, opts ParserOptions) Body { + parsed, err := ParseBodyWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseExpr returns a parsed expression. +// If an error occurs during parsing, panic. +func MustParseExpr(input string) *Expr { + parsed, err := ParseExpr(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseImports returns a slice of imports. +// If an error occurs during parsing, panic. +func MustParseImports(input string) []*Import { + parsed, err := ParseImports(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseModule returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModule(input string) *Module { + return MustParseModuleWithOpts(input, ParserOptions{}) +} + +// MustParseModuleWithOpts returns a parsed module. +// If an error occurs during parsing, panic. +func MustParseModuleWithOpts(input string, opts ParserOptions) *Module { + parsed, err := ParseModuleWithOpts("", input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParsePackage returns a Package. +// If an error occurs during parsing, panic. +func MustParsePackage(input string) *Package { + parsed, err := ParsePackage(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatements returns a slice of parsed statements. +// If an error occurs during parsing, panic. +func MustParseStatements(input string) []Statement { + parsed, _, err := ParseStatements("", input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseStatement returns exactly one statement. +// If an error occurs during parsing, panic. +func MustParseStatement(input string) Statement { + parsed, err := ParseStatement(input) + if err != nil { + panic(err) + } + return parsed +} + +func MustParseStatementWithOpts(input string, popts ParserOptions) Statement { + parsed, err := ParseStatementWithOpts(input, popts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRef returns a parsed reference. +// If an error occurs during parsing, panic. +func MustParseRef(input string) Ref { + parsed, err := ParseRef(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRule returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRule(input string) *Rule { + parsed, err := ParseRule(input) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseRuleWithOpts returns a parsed rule. +// If an error occurs during parsing, panic. +func MustParseRuleWithOpts(input string, opts ParserOptions) *Rule { + parsed, err := ParseRuleWithOpts(input, opts) + if err != nil { + panic(err) + } + return parsed +} + +// MustParseTerm returns a parsed term. +// If an error occurs during parsing, panic. +func MustParseTerm(input string) *Term { + parsed, err := ParseTerm(input) + if err != nil { + panic(err) + } + return parsed +} + +// ParseRuleFromBody returns a rule if the body can be interpreted as a rule +// definition. Otherwise, an error is returned. +func ParseRuleFromBody(module *Module, body Body) (*Rule, error) { + + if len(body) != 1 { + return nil, fmt.Errorf("multiple expressions cannot be used for rule head") + } + + return ParseRuleFromExpr(module, body[0]) +} + +// ParseRuleFromExpr returns a rule if the expression can be interpreted as a +// rule definition. +func ParseRuleFromExpr(module *Module, expr *Expr) (*Rule, error) { + + if len(expr.With) > 0 { + return nil, fmt.Errorf("expressions using with keyword cannot be used for rule head") + } + + if expr.Negated { + return nil, fmt.Errorf("negated expressions cannot be used for rule head") + } + + if _, ok := expr.Terms.(*SomeDecl); ok { + return nil, errors.New("'some' declarations cannot be used for rule head") + } + + if term, ok := expr.Terms.(*Term); ok { + switch v := term.Value.(type) { + case Ref: + if len(v) > 2 { // 2+ dots + return ParseCompleteDocRuleWithDotsFromTerm(module, term) + } + return ParsePartialSetDocRuleFromTerm(module, term) + default: + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(v)) + } + } + + if _, ok := expr.Terms.([]*Term); !ok { + // This is a defensive check in case other kinds of expression terms are + // introduced in the future. + return nil, errors.New("expression cannot be used for rule head") + } + + if expr.IsEquality() { + return parseCompleteRuleFromEq(module, expr) + } else if expr.IsAssignment() { + rule, err := parseCompleteRuleFromEq(module, expr) + if err != nil { + return nil, err + } + rule.Head.Assign = true + return rule, nil + } + + if _, ok := BuiltinMap[expr.Operator().String()]; ok { + return nil, fmt.Errorf("rule name conflicts with built-in function") + } + + return ParseRuleFromCallExpr(module, expr.Terms.([]*Term)) +} + +func parseCompleteRuleFromEq(module *Module, expr *Expr) (rule *Rule, err error) { + + // ensure the rule location is set to the expr location + // the helper functions called below try to set the location based + // on the terms they've been provided but that is not as accurate. + defer func() { + if rule != nil { + rule.Location = expr.Location + rule.Head.Location = expr.Location + } + }() + + lhs, rhs := expr.Operand(0), expr.Operand(1) + if lhs == nil || rhs == nil { + return nil, errors.New("assignment requires two operands") + } + + rule, err = ParseRuleFromCallEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + rule, err = ParsePartialObjectDocRuleFromEqExpr(module, lhs, rhs) + if err == nil { + return rule, nil + } + + return ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) +} + +// ParseCompleteDocRuleFromAssignmentExpr returns a rule if the expression can +// be interpreted as a complete document definition declared with the assignment +// operator. +func ParseCompleteDocRuleFromAssignmentExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + rule, err := ParseCompleteDocRuleFromEqExpr(module, lhs, rhs) + if err != nil { + return nil, err + } + + rule.Head.Assign = true + + return rule, nil +} + +// ParseCompleteDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a complete document definition. +func ParseCompleteDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + var head *Head + + if v, ok := lhs.Value.(Var); ok { + // Modify the code to add the location to the head ref + head = VarHead(v, lhs.Location, nil) + } else if r, ok := lhs.Value.(Ref); ok { // groundness ? + if _, ok := r[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", r) + } + head = RefHead(r) + if len(r) > 1 && !r[len(r)-1].IsGround() { + return nil, fmt.Errorf("ref not ground") + } + } else { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(lhs.Value)) + } + head.Value = rhs + head.Location = lhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + return &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + generatedBody: true, + }, nil +} + +func ParseCompleteDocRuleWithDotsFromTerm(module *Module, term *Term) (*Rule, error) { + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used for rule name", ValueName(term.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(term.Location)) + head.generatedValue = true + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + return &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + }, nil +} + +// ParsePartialObjectDocRuleFromEqExpr returns a rule if the expression can be +// interpreted as a partial object document definition. +func ParsePartialObjectDocRuleFromEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + ref, ok := lhs.Value.(Ref) + if !ok { + return nil, fmt.Errorf("%v cannot be used as rule name", ValueName(lhs.Value)) + } + + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + if len(ref) == 2 { // backcompat for naked `foo.bar = "baz"` statements + head.Name = ref[0].Value.(Var) + head.Key = ref[1] + } + head.Location = rhs.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: rhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParsePartialSetDocRuleFromTerm returns a rule if the term can be interpreted +// as a partial set document definition. +func ParsePartialSetDocRuleFromTerm(module *Module, term *Term) (*Rule, error) { + + ref, ok := term.Value.(Ref) + if !ok || len(ref) == 1 { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref) + if len(ref) == 2 { + v, ok := ref[0].Value.(Var) + if !ok { + return nil, fmt.Errorf("%vs cannot be used for rule head", ValueName(term.Value)) + } + // Modify the code to add the location to the head ref + head = VarHead(v, ref[0].Location, nil) + head.Key = ref[1] + } + head.Location = term.Location + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(term.Location)).SetLocation(term.Location)) + + rule := &Rule{ + Location: term.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallEqExpr returns a rule if the term can be interpreted as a +// function definition (e.g., f(x) = y => f(x) = y { true }). +func ParseRuleFromCallEqExpr(module *Module, lhs, rhs *Term) (*Rule, error) { + + call, ok := lhs.Value.(Call) + if !ok { + return nil, fmt.Errorf("must be call") + } + + ref, ok := call[0].Value.(Ref) + if !ok { + return nil, fmt.Errorf("%vs cannot be used in function signature", ValueName(call[0].Value)) + } + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + + head := RefHead(ref, rhs) + head.Location = lhs.Location + head.Args = Args(call[1:]) + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(rhs.Location)).SetLocation(rhs.Location)) + + rule := &Rule{ + Location: lhs.Location, + Head: head, + Body: body, + Module: module, + } + + return rule, nil +} + +// ParseRuleFromCallExpr returns a rule if the terms can be interpreted as a +// function returning true or some value (e.g., f(x) => f(x) = true { true }). +func ParseRuleFromCallExpr(module *Module, terms []*Term) (*Rule, error) { + + if len(terms) <= 1 { + return nil, fmt.Errorf("rule argument list must take at least one argument") + } + + loc := terms[0].Location + ref := terms[0].Value.(Ref) + if _, ok := ref[0].Value.(Var); !ok { + return nil, fmt.Errorf("invalid rule head: %v", ref) + } + head := RefHead(ref, BooleanTerm(true).SetLocation(loc)) + head.Location = loc + head.Args = terms[1:] + + body := NewBody(NewExpr(BooleanTerm(true).SetLocation(loc)).SetLocation(loc)) + + rule := &Rule{ + Location: loc, + Head: head, + Module: module, + Body: body, + } + return rule, nil +} + +// ParseImports returns a slice of Import objects. +func ParseImports(input string) ([]*Import, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + result := []*Import{} + for _, stmt := range stmts { + if imp, ok := stmt.(*Import); ok { + result = append(result, imp) + } else { + return nil, fmt.Errorf("expected import but got %T", stmt) + } + } + return result, nil +} + +// ParseModule returns a parsed Module object. +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModule(filename, input string) (*Module, error) { + return ParseModuleWithOpts(filename, input, ParserOptions{}) +} + +// ParseModuleWithOpts returns a parsed Module object, and has an additional input ParserOptions +// For details on Module objects and their fields, see policy.go. +// Empty input will return nil, nil. +func ParseModuleWithOpts(filename, input string, popts ParserOptions) (*Module, error) { + stmts, comments, err := ParseStatementsWithOpts(filename, input, popts) + if err != nil { + return nil, err + } + return parseModule(filename, stmts, comments, popts.RegoVersion) +} + +// ParseBody returns exactly one body. +// If multiple bodies are parsed, an error is returned. +func ParseBody(input string) (Body, error) { + return ParseBodyWithOpts(input, ParserOptions{SkipRules: true}) +} + +// ParseBodyWithOpts returns exactly one body. It does _not_ set SkipRules: true on its own, +// but respects whatever ParserOptions it's been given. +func ParseBodyWithOpts(input string, popts ParserOptions) (Body, error) { + + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + + result := Body{} + + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case Body: + for i := range stmt { + result.Append(stmt[i]) + } + case *Comment: + // skip + default: + return nil, fmt.Errorf("expected body but got %T", stmt) + } + } + + return result, nil +} + +// ParseExpr returns exactly one expression. +// If multiple expressions are parsed, an error is returned. +func ParseExpr(input string) (*Expr, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse expression: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one expression but got: %v", body) + } + return body[0], nil +} + +// ParsePackage returns exactly one Package. +// If multiple statements are parsed, an error is returned. +func ParsePackage(input string) (*Package, error) { + stmt, err := ParseStatement(input) + if err != nil { + return nil, err + } + pkg, ok := stmt.(*Package) + if !ok { + return nil, fmt.Errorf("expected package but got %T", stmt) + } + return pkg, nil +} + +// ParseTerm returns exactly one term. +// If multiple terms are parsed, an error is returned. +func ParseTerm(input string) (*Term, error) { + body, err := ParseBody(input) + if err != nil { + return nil, fmt.Errorf("failed to parse term: %w", err) + } + if len(body) != 1 { + return nil, fmt.Errorf("expected exactly one term but got: %v", body) + } + term, ok := body[0].Terms.(*Term) + if !ok { + return nil, fmt.Errorf("expected term but got %v", body[0].Terms) + } + return term, nil +} + +// ParseRef returns exactly one reference. +func ParseRef(input string) (Ref, error) { + term, err := ParseTerm(input) + if err != nil { + return nil, fmt.Errorf("failed to parse ref: %w", err) + } + ref, ok := term.Value.(Ref) + if !ok { + return nil, fmt.Errorf("expected ref but got %v", term) + } + return ref, nil +} + +// ParseRuleWithOpts returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRuleWithOpts(input string, opts ParserOptions) (*Rule, error) { + stmts, _, err := ParseStatementsWithOpts("", input, opts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("expected exactly one statement (rule), got %v = %T, %T", stmts, stmts[0], stmts[1]) + } + rule, ok := stmts[0].(*Rule) + if !ok { + return nil, fmt.Errorf("expected rule but got %T", stmts[0]) + } + return rule, nil +} + +// ParseRule returns exactly one rule. +// If multiple rules are parsed, an error is returned. +func ParseRule(input string) (*Rule, error) { + return ParseRuleWithOpts(input, ParserOptions{}) +} + +// ParseStatement returns exactly one statement. +// A statement might be a term, expression, rule, etc. Regardless, +// this function expects *exactly* one statement. If multiple +// statements are parsed, an error is returned. +func ParseStatement(input string) (Statement, error) { + stmts, _, err := ParseStatements("", input) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("expected exactly one statement") + } + return stmts[0], nil +} + +func ParseStatementWithOpts(input string, popts ParserOptions) (Statement, error) { + stmts, _, err := ParseStatementsWithOpts("", input, popts) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("expected exactly one statement") + } + return stmts[0], nil +} + +// ParseStatements is deprecated. Use ParseStatementWithOpts instead. +func ParseStatements(filename, input string) ([]Statement, []*Comment, error) { + return ParseStatementsWithOpts(filename, input, ParserOptions{}) +} + +// ParseStatementsWithOpts returns a slice of parsed statements. This is the +// default return value from the parser. +func ParseStatementsWithOpts(filename, input string, popts ParserOptions) ([]Statement, []*Comment, error) { + + parser := NewParser(). + WithFilename(filename). + WithReader(bytes.NewBufferString(input)). + WithProcessAnnotation(popts.ProcessAnnotation). + WithFutureKeywords(popts.FutureKeywords...). + WithAllFutureKeywords(popts.AllFutureKeywords). + WithCapabilities(popts.Capabilities). + WithSkipRules(popts.SkipRules). + WithRegoVersion(popts.RegoVersion). + withUnreleasedKeywords(popts.unreleasedKeywords) + + stmts, comments, errs := parser.Parse() + + if len(errs) > 0 { + return nil, nil, errs + } + + return stmts, comments, nil +} + +func parseModule(filename string, stmts []Statement, comments []*Comment, regoCompatibilityMode RegoVersion) (*Module, error) { + + if len(stmts) == 0 { + return nil, NewError(ParseErr, &Location{File: filename}, "empty module") + } + + var errs Errors + + pkg, ok := stmts[0].(*Package) + if !ok { + loc := stmts[0].Loc() + errs = append(errs, NewError(ParseErr, loc, "package expected")) + } + + mod := &Module{ + Package: pkg, + stmts: stmts, + } + + // The comments slice only holds comments that were not their own statements. + mod.Comments = append(mod.Comments, comments...) + + if regoCompatibilityMode == RegoUndefined { + mod.regoVersion = DefaultRegoVersion + } else { + mod.regoVersion = regoCompatibilityMode + } + + for i, stmt := range stmts[1:] { + switch stmt := stmt.(type) { + case *Import: + mod.Imports = append(mod.Imports, stmt) + if mod.regoVersion == RegoV0 && Compare(stmt.Path.Value, RegoV1CompatibleRef) == 0 { + mod.regoVersion = RegoV0CompatV1 + } + case *Rule: + setRuleModule(stmt, mod) + mod.Rules = append(mod.Rules, stmt) + case Body: + rule, err := ParseRuleFromBody(mod, stmt) + if err != nil { + errs = append(errs, NewError(ParseErr, stmt[0].Location, err.Error())) //nolint:govet + continue + } + rule.generatedBody = true + mod.Rules = append(mod.Rules, rule) + + // NOTE(tsandall): the statement should now be interpreted as a + // rule so update the statement list. This is important for the + // logic below that associates annotations with statements. + stmts[i+1] = rule + case *Package: + errs = append(errs, NewError(ParseErr, stmt.Loc(), "unexpected package")) + case *Annotations: + mod.Annotations = append(mod.Annotations, stmt) + case *Comment: + // Ignore comments, they're handled above. + default: + panic("illegal value") // Indicates grammar is out-of-sync with code. + } + } + + if mod.regoVersion == RegoV0CompatV1 || mod.regoVersion == RegoV1 { + for _, rule := range mod.Rules { + for r := rule; r != nil; r = r.Else { + errs = append(errs, CheckRegoV1(r)...) + } + } + } + + if len(errs) > 0 { + return nil, errs + } + + errs = append(errs, attachAnnotationsNodes(mod)...) + + if len(errs) > 0 { + return nil, errs + } + + attachRuleAnnotations(mod) + + return mod, nil +} + +func ruleDeclarationHasKeyword(rule *Rule, keyword tokens.Token) bool { + for _, kw := range rule.Head.keywords { + if kw == keyword { + return true + } + } + return false +} + +func newScopeAttachmentErr(a *Annotations, want string) *Error { + var have string + if a.node != nil { + have = fmt.Sprintf(" (have %v)", TypeName(a.node)) + } + return NewError(ParseErr, a.Loc(), "annotation scope '%v' must be applied to %v%v", a.Scope, want, have) +} + +func setRuleModule(rule *Rule, module *Module) { + rule.Module = module + if rule.Else != nil { + setRuleModule(rule.Else, module) + } +} + +// ParserErrorDetail holds additional details for parser errors. +type ParserErrorDetail struct { + Line string `json:"line"` + Idx int `json:"idx"` +} + +func newParserErrorDetail(bs []byte, offset int) *ParserErrorDetail { + + // Find first non-space character at or before offset position. + if offset >= len(bs) { + offset = len(bs) - 1 + } else if offset < 0 { + offset = 0 + } + + for offset > 0 && unicode.IsSpace(rune(bs[offset])) { + offset-- + } + + // Find beginning of line containing offset. + begin := offset + + for begin > 0 && !isNewLineChar(bs[begin]) { + begin-- + } + + if isNewLineChar(bs[begin]) { + begin++ + } + + // Find end of line containing offset. + end := offset + + for end < len(bs) && !isNewLineChar(bs[end]) { + end++ + } + + if begin > end { + begin = end + } + + // Extract line and compute index of offset byte in line. + line := bs[begin:end] + index := offset - begin + + return &ParserErrorDetail{ + Line: string(line), + Idx: index, + } +} + +// Lines returns the pretty formatted line output for the error details. +func (d ParserErrorDetail) Lines() []string { + line := strings.TrimLeft(d.Line, "\t") // remove leading tabs + tabCount := len(d.Line) - len(line) + indent := d.Idx - tabCount + if indent < 0 { + indent = 0 + } + return []string{line, strings.Repeat(" ", indent) + "^"} +} + +func isNewLineChar(b byte) bool { + return b == '\r' || b == '\n' +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go new file mode 100644 index 000000000..94dc25244 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/policy.go @@ -0,0 +1,2027 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "bytes" + "encoding/json" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/util" +) + +// Initialize seed for term hashing. This is intentionally placed before the +// root document sets are constructed to ensure they use the same hash seed as +// subsequent lookups. If the hash seeds are out of sync, lookups will fail. +var hashSeed = rand.New(rand.NewSource(time.Now().UnixNano())) +var hashSeed0 = (uint64(hashSeed.Uint32()) << 32) | uint64(hashSeed.Uint32()) + +// DefaultRootDocument is the default root document. +// +// All package directives inside source files are implicitly prefixed with the +// DefaultRootDocument value. +var DefaultRootDocument = VarTerm("data") + +// InputRootDocument names the document containing query arguments. +var InputRootDocument = VarTerm("input") + +// SchemaRootDocument names the document containing external data schemas. +var SchemaRootDocument = VarTerm("schema") + +// FunctionArgRootDocument names the document containing function arguments. +// It's only for internal usage, for referencing function arguments between +// the index and topdown. +var FunctionArgRootDocument = VarTerm("args") + +// FutureRootDocument names the document containing new, to-become-default, +// features. +var FutureRootDocument = VarTerm("future") + +// RegoRootDocument names the document containing new, to-become-default, +// features in a future versioned release. +var RegoRootDocument = VarTerm("rego") + +// RootDocumentNames contains the names of top-level documents that can be +// referred to in modules and queries. +// +// Note, the schema document is not currently implemented in the evaluator so it +// is not registered as a root document name (yet). +var RootDocumentNames = NewSet( + DefaultRootDocument, + InputRootDocument, +) + +// DefaultRootRef is a reference to the root of the default document. +// +// All refs to data in the policy engine's storage layer are prefixed with this ref. +var DefaultRootRef = Ref{DefaultRootDocument} + +// InputRootRef is a reference to the root of the input document. +// +// All refs to query arguments are prefixed with this ref. +var InputRootRef = Ref{InputRootDocument} + +// SchemaRootRef is a reference to the root of the schema document. +// +// All refs to schema documents are prefixed with this ref. Note, the schema +// document is not currently implemented in the evaluator so it is not +// registered as a root document ref (yet). +var SchemaRootRef = Ref{SchemaRootDocument} + +// RootDocumentRefs contains the prefixes of top-level documents that all +// non-local references start with. +var RootDocumentRefs = NewSet( + NewTerm(DefaultRootRef), + NewTerm(InputRootRef), +) + +// SystemDocumentKey is the name of the top-level key that identifies the system +// document. +const SystemDocumentKey = String("system") + +// ReservedVars is the set of names that refer to implicitly ground vars. +var ReservedVars = NewVarSet( + DefaultRootDocument.Value.(Var), + InputRootDocument.Value.(Var), +) + +// Wildcard represents the wildcard variable as defined in the language. +var Wildcard = &Term{Value: Var("_")} + +// WildcardPrefix is the special character that all wildcard variables are +// prefixed with when the statement they are contained in is parsed. +const WildcardPrefix = "$" + +// Keywords contains strings that map to language keywords. +var Keywords = KeywordsForRegoVersion(DefaultRegoVersion) + +var KeywordsV0 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", +} + +var KeywordsV1 = [...]string{ + "not", + "package", + "import", + "as", + "default", + "else", + "with", + "null", + "true", + "false", + "some", + "if", + "contains", + "in", + "every", +} + +func KeywordsForRegoVersion(v RegoVersion) []string { + switch v { + case RegoV0: + return KeywordsV0[:] + case RegoV1, RegoV0CompatV1: + return KeywordsV1[:] + } + return nil +} + +// IsKeyword returns true if s is a language keyword. +func IsKeyword(s string) bool { + return IsInKeywords(s, Keywords) +} + +func IsInKeywords(s string, keywords []string) bool { + for _, x := range keywords { + if x == s { + return true + } + } + return false +} + +// IsKeywordInRegoVersion returns true if s is a language keyword. +func IsKeywordInRegoVersion(s string, regoVersion RegoVersion) bool { + switch regoVersion { + case RegoV0: + for _, x := range KeywordsV0 { + if x == s { + return true + } + } + case RegoV1, RegoV0CompatV1: + for _, x := range KeywordsV1 { + if x == s { + return true + } + } + } + + return false +} + +type ( + // Node represents a node in an AST. Nodes may be statements in a policy module + // or elements of an ad-hoc query, expression, etc. + Node interface { + fmt.Stringer + Loc() *Location + SetLoc(*Location) + } + + // Statement represents a single statement in a policy module. + Statement interface { + Node + } +) + +type ( + + // Module represents a collection of policies (defined by rules) + // within a namespace (defined by the package) and optional + // dependencies on external documents (defined by imports). + Module struct { + Package *Package `json:"package"` + Imports []*Import `json:"imports,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + Rules []*Rule `json:"rules,omitempty"` + Comments []*Comment `json:"comments,omitempty"` + stmts []Statement + regoVersion RegoVersion + } + + // Comment contains the raw text from the comment in the definition. + Comment struct { + // TODO: these fields have inconsistent JSON keys with other structs in this package. + Text []byte + Location *Location + } + + // Package represents the namespace of the documents produced + // by rules inside the module. + Package struct { + Path Ref `json:"path"` + Location *Location `json:"location,omitempty"` + } + + // Import represents a dependency on a document outside of the policy + // namespace. Imports are optional. + Import struct { + Path *Term `json:"path"` + Alias Var `json:"alias,omitempty"` + Location *Location `json:"location,omitempty"` + } + + // Rule represents a rule as defined in the language. Rules define the + // content of documents that represent policy decisions. + Rule struct { + Default bool `json:"default,omitempty"` + Head *Head `json:"head"` + Body Body `json:"body"` + Else *Rule `json:"else,omitempty"` + Location *Location `json:"location,omitempty"` + Annotations []*Annotations `json:"annotations,omitempty"` + + // Module is a pointer to the module containing this rule. If the rule + // was NOT created while parsing/constructing a module, this should be + // left unset. The pointer is not included in any standard operations + // on the rule (e.g., printing, comparison, visiting, etc.) + Module *Module `json:"-"` + + generatedBody bool + } + + // Head represents the head of a rule. + Head struct { + Name Var `json:"name,omitempty"` + Reference Ref `json:"ref,omitempty"` + Args Args `json:"args,omitempty"` + Key *Term `json:"key,omitempty"` + Value *Term `json:"value,omitempty"` + Assign bool `json:"assign,omitempty"` + Location *Location `json:"location,omitempty"` + + keywords []tokens.Token + generatedValue bool + } + + // Args represents zero or more arguments to a rule. + Args []*Term + + // Body represents one or more expressions contained inside a rule or user + // function. + Body []*Expr + + // Expr represents a single expression contained inside the body of a rule. + Expr struct { + With []*With `json:"with,omitempty"` + Terms interface{} `json:"terms"` + Index int `json:"index"` + Generated bool `json:"generated,omitempty"` + Negated bool `json:"negated,omitempty"` + Location *Location `json:"location,omitempty"` + + generatedFrom *Expr + generates []*Expr + } + + // SomeDecl represents a variable declaration statement. The symbols are variables. + SomeDecl struct { + Symbols []*Term `json:"symbols"` + Location *Location `json:"location,omitempty"` + } + + Every struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Domain *Term `json:"domain"` + Body Body `json:"body"` + Location *Location `json:"location,omitempty"` + } + + // With represents a modifier on an expression. + With struct { + Target *Term `json:"target"` + Value *Term `json:"value"` + Location *Location `json:"location,omitempty"` + } +) + +// SetModuleRegoVersion sets the RegoVersion for the Module. +func SetModuleRegoVersion(mod *Module, v RegoVersion) { + mod.regoVersion = v +} + +// Compare returns an integer indicating whether mod is less than, equal to, +// or greater than other. +func (mod *Module) Compare(other *Module) int { + if mod == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := mod.Package.Compare(other.Package); cmp != 0 { + return cmp + } + if cmp := importsCompare(mod.Imports, other.Imports); cmp != 0 { + return cmp + } + if cmp := annotationsCompare(mod.Annotations, other.Annotations); cmp != 0 { + return cmp + } + return rulesCompare(mod.Rules, other.Rules) +} + +// Copy returns a deep copy of mod. +func (mod *Module) Copy() *Module { + cpy := *mod + cpy.Rules = make([]*Rule, len(mod.Rules)) + + nodes := make(map[Node]Node, len(mod.Rules)+len(mod.Imports)+1 /* package */) + + for i := range mod.Rules { + cpy.Rules[i] = mod.Rules[i].Copy() + cpy.Rules[i].Module = &cpy + nodes[mod.Rules[i]] = cpy.Rules[i] + } + + cpy.Imports = make([]*Import, len(mod.Imports)) + for i := range mod.Imports { + cpy.Imports[i] = mod.Imports[i].Copy() + nodes[mod.Imports[i]] = cpy.Imports[i] + } + + cpy.Package = mod.Package.Copy() + nodes[mod.Package] = cpy.Package + + cpy.Annotations = make([]*Annotations, len(mod.Annotations)) + for i, a := range mod.Annotations { + cpy.Annotations[i] = a.Copy(nodes[a.node]) + } + + cpy.Comments = make([]*Comment, len(mod.Comments)) + for i := range mod.Comments { + cpy.Comments[i] = mod.Comments[i].Copy() + } + + cpy.stmts = make([]Statement, len(mod.stmts)) + for i := range mod.stmts { + cpy.stmts[i] = nodes[mod.stmts[i]] + } + + return &cpy +} + +// Equal returns true if mod equals other. +func (mod *Module) Equal(other *Module) bool { + return mod.Compare(other) == 0 +} + +func (mod *Module) String() string { + byNode := map[Node][]*Annotations{} + for _, a := range mod.Annotations { + byNode[a.node] = append(byNode[a.node], a) + } + + appendAnnotationStrings := func(buf []string, node Node) []string { + if as, ok := byNode[node]; ok { + for i := range as { + buf = append(buf, "# METADATA") + buf = append(buf, "# "+as[i].String()) + } + } + return buf + } + + buf := []string{} + buf = appendAnnotationStrings(buf, mod.Package) + buf = append(buf, mod.Package.String()) + + if len(mod.Imports) > 0 { + buf = append(buf, "") + for _, imp := range mod.Imports { + buf = appendAnnotationStrings(buf, imp) + buf = append(buf, imp.String()) + } + } + if len(mod.Rules) > 0 { + buf = append(buf, "") + for _, rule := range mod.Rules { + buf = appendAnnotationStrings(buf, rule) + buf = append(buf, rule.stringWithOpts(toStringOpts{regoVersion: mod.regoVersion})) + } + } + return strings.Join(buf, "\n") +} + +// RuleSet returns a RuleSet containing named rules in the mod. +func (mod *Module) RuleSet(name Var) RuleSet { + rs := NewRuleSet() + for _, rule := range mod.Rules { + if rule.Head.Name.Equal(name) { + rs.Add(rule) + } + } + return rs +} + +// UnmarshalJSON parses bs and stores the result in mod. The rules in the module +// will have their module pointer set to mod. +func (mod *Module) UnmarshalJSON(bs []byte) error { + + // Declare a new type and use a type conversion to avoid recursively calling + // Module#UnmarshalJSON. + type module Module + + if err := util.UnmarshalJSON(bs, (*module)(mod)); err != nil { + return err + } + + WalkRules(mod, func(rule *Rule) bool { + rule.Module = mod + return false + }) + + return nil +} + +func (mod *Module) regoV1Compatible() bool { + return mod.regoVersion == RegoV1 || mod.regoVersion == RegoV0CompatV1 +} + +func (mod *Module) RegoVersion() RegoVersion { + return mod.regoVersion +} + +// SetRegoVersion sets the RegoVersion for the module. +// Note: Setting a rego-version that does not match the module's rego-version might have unintended consequences. +func (mod *Module) SetRegoVersion(v RegoVersion) { + mod.regoVersion = v +} + +// NewComment returns a new Comment object. +func NewComment(text []byte) *Comment { + return &Comment{ + Text: text, + } +} + +// Loc returns the location of the comment in the definition. +func (c *Comment) Loc() *Location { + if c == nil { + return nil + } + return c.Location +} + +// SetLoc sets the location on c. +func (c *Comment) SetLoc(loc *Location) { + c.Location = loc +} + +func (c *Comment) String() string { + return "#" + string(c.Text) +} + +// Copy returns a deep copy of c. +func (c *Comment) Copy() *Comment { + cpy := *c + cpy.Text = make([]byte, len(c.Text)) + copy(cpy.Text, c.Text) + return &cpy +} + +// Equal returns true if this comment equals the other comment. +// Unlike other equality checks on AST nodes, comment equality +// depends on location. +func (c *Comment) Equal(other *Comment) bool { + return c.Location.Equal(other.Location) && bytes.Equal(c.Text, other.Text) +} + +// Compare returns an integer indicating whether pkg is less than, equal to, +// or greater than other. +func (pkg *Package) Compare(other *Package) int { + return Compare(pkg.Path, other.Path) +} + +// Copy returns a deep copy of pkg. +func (pkg *Package) Copy() *Package { + cpy := *pkg + cpy.Path = pkg.Path.Copy() + return &cpy +} + +// Equal returns true if pkg is equal to other. +func (pkg *Package) Equal(other *Package) bool { + return pkg.Compare(other) == 0 +} + +// Loc returns the location of the Package in the definition. +func (pkg *Package) Loc() *Location { + if pkg == nil { + return nil + } + return pkg.Location +} + +// SetLoc sets the location on pkg. +func (pkg *Package) SetLoc(loc *Location) { + pkg.Location = loc +} + +func (pkg *Package) String() string { + if pkg == nil { + return "" + } else if len(pkg.Path) <= 1 { + return fmt.Sprintf("package ", pkg.Path) + } + // Omit head as all packages have the DefaultRootDocument prepended at parse time. + path := make(Ref, len(pkg.Path)-1) + path[0] = VarTerm(string(pkg.Path[1].Value.(String))) + copy(path[1:], pkg.Path[2:]) + return fmt.Sprintf("package %v", path) +} + +func (pkg *Package) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": pkg.Path, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Package { + if pkg.Location != nil { + data["location"] = pkg.Location + } + } + + return json.Marshal(data) +} + +// IsValidImportPath returns an error indicating if the import path is invalid. +// If the import path is valid, err is nil. +func IsValidImportPath(v Value) (err error) { + switch v := v.(type) { + case Var: + if !v.Equal(DefaultRootDocument.Value) && !v.Equal(InputRootDocument.Value) { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + case Ref: + if err := IsValidImportPath(v[0].Value); err != nil { + return fmt.Errorf("invalid path %v: path must begin with input or data", v) + } + for _, e := range v[1:] { + if _, ok := e.Value.(String); !ok { + return fmt.Errorf("invalid path %v: path elements must be strings", v) + } + } + default: + return fmt.Errorf("invalid path %v: path must be ref or var", v) + } + return nil +} + +// Compare returns an integer indicating whether imp is less than, equal to, +// or greater than other. +func (imp *Import) Compare(other *Import) int { + if imp == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(imp.Path, other.Path); cmp != 0 { + return cmp + } + return Compare(imp.Alias, other.Alias) +} + +// Copy returns a deep copy of imp. +func (imp *Import) Copy() *Import { + cpy := *imp + cpy.Path = imp.Path.Copy() + return &cpy +} + +// Equal returns true if imp is equal to other. +func (imp *Import) Equal(other *Import) bool { + return imp.Compare(other) == 0 +} + +// Loc returns the location of the Import in the definition. +func (imp *Import) Loc() *Location { + if imp == nil { + return nil + } + return imp.Location +} + +// SetLoc sets the location on imp. +func (imp *Import) SetLoc(loc *Location) { + imp.Location = loc +} + +// Name returns the variable that is used to refer to the imported virtual +// document. This is the alias if defined otherwise the last element in the +// path. +func (imp *Import) Name() Var { + if len(imp.Alias) != 0 { + return imp.Alias + } + switch v := imp.Path.Value.(type) { + case Var: + return v + case Ref: + if len(v) == 1 { + return v[0].Value.(Var) + } + return Var(v[len(v)-1].Value.(String)) + } + panic("illegal import") +} + +func (imp *Import) String() string { + buf := []string{"import", imp.Path.String()} + if len(imp.Alias) > 0 { + buf = append(buf, "as "+imp.Alias.String()) + } + return strings.Join(buf, " ") +} + +func (imp *Import) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "path": imp.Path, + } + + if len(imp.Alias) != 0 { + data["alias"] = imp.Alias + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Import { + if imp.Location != nil { + data["location"] = imp.Location + } + } + + return json.Marshal(data) +} + +// Compare returns an integer indicating whether rule is less than, equal to, +// or greater than other. +func (rule *Rule) Compare(other *Rule) int { + if rule == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := rule.Head.Compare(other.Head); cmp != 0 { + return cmp + } + if cmp := util.Compare(rule.Default, other.Default); cmp != 0 { + return cmp + } + if cmp := rule.Body.Compare(other.Body); cmp != 0 { + return cmp + } + + if cmp := annotationsCompare(rule.Annotations, other.Annotations); cmp != 0 { + return cmp + } + + return rule.Else.Compare(other.Else) +} + +// Copy returns a deep copy of rule. +func (rule *Rule) Copy() *Rule { + cpy := *rule + cpy.Head = rule.Head.Copy() + cpy.Body = rule.Body.Copy() + + cpy.Annotations = make([]*Annotations, len(rule.Annotations)) + for i, a := range rule.Annotations { + cpy.Annotations[i] = a.Copy(&cpy) + } + + if cpy.Else != nil { + cpy.Else = rule.Else.Copy() + } + return &cpy +} + +// Equal returns true if rule is equal to other. +func (rule *Rule) Equal(other *Rule) bool { + return rule.Compare(other) == 0 +} + +// Loc returns the location of the Rule in the definition. +func (rule *Rule) Loc() *Location { + if rule == nil { + return nil + } + return rule.Location +} + +// SetLoc sets the location on rule. +func (rule *Rule) SetLoc(loc *Location) { + rule.Location = loc +} + +// Path returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. +// Deprecated: Poor handling of ref rules. Use `(*Rule).Ref()` instead. +func (rule *Rule) Path() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref().GroundPrefix()) +} + +// Ref returns a ref referring to the document produced by this rule. If rule +// is not contained in a module, this function panics. The returned ref may +// contain variables in the last position. +func (rule *Rule) Ref() Ref { + if rule.Module == nil { + panic("assertion failed") + } + return rule.Module.Package.Path.Extend(rule.Head.Ref()) +} + +func (rule *Rule) String() string { + regoVersion := DefaultRegoVersion + if rule.Module != nil { + regoVersion = rule.Module.RegoVersion() + } + return rule.stringWithOpts(toStringOpts{regoVersion: regoVersion}) +} + +type toStringOpts struct { + regoVersion RegoVersion +} + +func (o toStringOpts) RegoVersion() RegoVersion { + if o.regoVersion == RegoUndefined { + return DefaultRegoVersion + } + return o.regoVersion +} + +func (rule *Rule) stringWithOpts(opts toStringOpts) string { + buf := []string{} + if rule.Default { + buf = append(buf, "default") + } + buf = append(buf, rule.Head.stringWithOpts(opts)) + if !rule.Default { + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, "if") + } + buf = append(buf, "{") + buf = append(buf, rule.Body.String()) + buf = append(buf, "}") + } + if rule.Else != nil { + buf = append(buf, rule.Else.elseString(opts)) + } + return strings.Join(buf, " ") +} + +func (rule *Rule) isFunction() bool { + return len(rule.Head.Args) > 0 +} + +func (rule *Rule) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "head": rule.Head, + "body": rule.Body, + } + + if rule.Default { + data["default"] = true + } + + if rule.Else != nil { + data["else"] = rule.Else + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Rule { + if rule.Location != nil { + data["location"] = rule.Location + } + } + + if len(rule.Annotations) != 0 { + data["annotations"] = rule.Annotations + } + + return json.Marshal(data) +} + +func (rule *Rule) elseString(opts toStringOpts) string { + var buf []string + + buf = append(buf, "else") + + value := rule.Head.Value + if value != nil { + buf = append(buf, "=") + buf = append(buf, value.String()) + } + + switch opts.RegoVersion() { + case RegoV1, RegoV0CompatV1: + buf = append(buf, "if") + } + + buf = append(buf, "{") + buf = append(buf, rule.Body.String()) + buf = append(buf, "}") + + if rule.Else != nil { + buf = append(buf, rule.Else.elseString(opts)) + } + + return strings.Join(buf, " ") +} + +// NewHead returns a new Head object. If args are provided, the first will be +// used for the key and the second will be used for the value. +func NewHead(name Var, args ...*Term) *Head { + head := &Head{ + Name: name, // backcompat + Reference: []*Term{NewTerm(name)}, + } + if len(args) == 0 { + return head + } + head.Key = args[0] + if len(args) == 1 { + return head + } + head.Value = args[1] + if head.Key != nil && head.Value != nil { + head.Reference = head.Reference.Append(args[0]) + } + return head +} + +// VarHead creates a head object, initializes its Name and Location and returns the new head. +// NOTE: The JSON options argument is no longer used, and kept only for backwards compatibility. +func VarHead(name Var, location *Location, _ *astJSON.Options) *Head { + h := NewHead(name) + h.Reference[0].Location = location + return h +} + +// RefHead returns a new Head object with the passed Ref. If args are provided, +// the first will be used for the value. +func RefHead(ref Ref, args ...*Term) *Head { + head := &Head{} + head.SetRef(ref) + if len(ref) < 2 { + head.Name = ref[0].Value.(Var) + } + if len(args) >= 1 { + head.Value = args[0] + } + return head +} + +// DocKind represents the collection of document types that can be produced by rules. +type DocKind int + +const ( + // CompleteDoc represents a document that is completely defined by the rule. + CompleteDoc = iota + + // PartialSetDoc represents a set document that is partially defined by the rule. + PartialSetDoc + + // PartialObjectDoc represents an object document that is partially defined by the rule. + PartialObjectDoc +) // TODO(sr): Deprecate? + +// DocKind returns the type of document produced by this rule. +func (head *Head) DocKind() DocKind { + if head.Key != nil { + if head.Value != nil { + return PartialObjectDoc + } + return PartialSetDoc + } + return CompleteDoc +} + +type RuleKind int + +const ( + SingleValue = iota + MultiValue +) + +// RuleKind returns the type of rule this is +func (head *Head) RuleKind() RuleKind { + // NOTE(sr): This is bit verbose, since the key is irrelevant for single vs + // multi value, but as good a spot as to assert the invariant. + switch { + case head.Value != nil: + return SingleValue + case head.Key != nil: + return MultiValue + default: + panic("unreachable") + } +} + +// Ref returns the Ref of the rule. If it doesn't have one, it's filled in +// via the Head's Name. +func (head *Head) Ref() Ref { + if len(head.Reference) > 0 { + return head.Reference + } + return Ref{&Term{Value: head.Name}} +} + +// SetRef can be used to set a rule head's Reference +func (head *Head) SetRef(r Ref) { + head.Reference = r +} + +// Compare returns an integer indicating whether head is less than, equal to, +// or greater than other. +func (head *Head) Compare(other *Head) int { + if head == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if head.Assign && !other.Assign { + return -1 + } else if !head.Assign && other.Assign { + return 1 + } + if cmp := Compare(head.Args, other.Args); cmp != 0 { + return cmp + } + if cmp := Compare(head.Reference, other.Reference); cmp != 0 { + return cmp + } + if cmp := Compare(head.Name, other.Name); cmp != 0 { + return cmp + } + if cmp := Compare(head.Key, other.Key); cmp != 0 { + return cmp + } + return Compare(head.Value, other.Value) +} + +// Copy returns a deep copy of head. +func (head *Head) Copy() *Head { + cpy := *head + cpy.Reference = head.Reference.Copy() + cpy.Args = head.Args.Copy() + cpy.Key = head.Key.Copy() + cpy.Value = head.Value.Copy() + cpy.keywords = nil + return &cpy +} + +// Equal returns true if this head equals other. +func (head *Head) Equal(other *Head) bool { + return head.Compare(other) == 0 +} + +func (head *Head) String() string { + return head.stringWithOpts(toStringOpts{}) +} + +func (head *Head) stringWithOpts(opts toStringOpts) string { + buf := strings.Builder{} + buf.WriteString(head.Ref().String()) + containsAdded := false + + switch { + case len(head.Args) != 0: + buf.WriteString(head.Args.String()) + case len(head.Reference) == 1 && head.Key != nil: + switch opts.RegoVersion() { + case RegoV0: + buf.WriteRune('[') + buf.WriteString(head.Key.String()) + buf.WriteRune(']') + default: + containsAdded = true + buf.WriteString(" contains ") + buf.WriteString(head.Key.String()) + } + } + if head.Value != nil { + if head.Assign { + buf.WriteString(" := ") + } else { + buf.WriteString(" = ") + } + buf.WriteString(head.Value.String()) + } else if !containsAdded && head.Name == "" && head.Key != nil { + buf.WriteString(" contains ") + buf.WriteString(head.Key.String()) + } + return buf.String() +} + +func (head *Head) MarshalJSON() ([]byte, error) { + var loc *Location + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Head && head.Location != nil { + loc = head.Location + } + + // NOTE(sr): we do this to override the rendering of `head.Reference`. + // It's still what'll be used via the default means of encoding/json + // for unmarshaling a json object into a Head struct! + type h Head + return json.Marshal(struct { + h + Ref Ref `json:"ref"` + Location *Location `json:"location,omitempty"` + }{ + h: h(*head), + Ref: head.Ref(), + Location: loc, + }) +} + +// Vars returns a set of vars found in the head. +func (head *Head) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + // TODO: improve test coverage for this. + if head.Args != nil { + vis.Walk(head.Args) + } + if head.Key != nil { + vis.Walk(head.Key) + } + if head.Value != nil { + vis.Walk(head.Value) + } + if len(head.Reference) > 0 { + vis.Walk(head.Reference[1:]) + } + return vis.vars +} + +// Loc returns the Location of head. +func (head *Head) Loc() *Location { + if head == nil { + return nil + } + return head.Location +} + +// SetLoc sets the location on head. +func (head *Head) SetLoc(loc *Location) { + head.Location = loc +} + +func (head *Head) HasDynamicRef() bool { + pos := head.Reference.Dynamic() + // Ref is dynamic if it has one non-constant term that isn't the first or last term or if it's a partial set rule. + return pos > 0 && (pos < len(head.Reference)-1 || head.RuleKind() == MultiValue) +} + +// Copy returns a deep copy of a. +func (a Args) Copy() Args { + cpy := Args{} + for _, t := range a { + cpy = append(cpy, t.Copy()) + } + return cpy +} + +func (a Args) String() string { + buf := make([]string, 0, len(a)) + for _, t := range a { + buf = append(buf, t.String()) + } + return "(" + strings.Join(buf, ", ") + ")" +} + +// Loc returns the Location of a. +func (a Args) Loc() *Location { + if len(a) == 0 { + return nil + } + return a[0].Location +} + +// SetLoc sets the location on a. +func (a Args) SetLoc(loc *Location) { + if len(a) != 0 { + a[0].SetLocation(loc) + } +} + +// Vars returns a set of vars that appear in a. +func (a Args) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + vis.Walk(a) + return vis.vars +} + +// NewBody returns a new Body containing the given expressions. The indices of +// the immediate expressions will be reset. +func NewBody(exprs ...*Expr) Body { + for i, expr := range exprs { + expr.Index = i + } + return Body(exprs) +} + +// MarshalJSON returns JSON encoded bytes representing body. +func (body Body) MarshalJSON() ([]byte, error) { + // Serialize empty Body to empty array. This handles both the empty case and the + // nil case (whereas by default the result would be null if body was nil.) + if len(body) == 0 { + return []byte(`[]`), nil + } + ret, err := json.Marshal([]*Expr(body)) + return ret, err +} + +// Append adds the expr to the body and updates the expr's index accordingly. +func (body *Body) Append(expr *Expr) { + n := len(*body) + expr.Index = n + *body = append(*body, expr) +} + +// Set sets the expr in the body at the specified position and updates the +// expr's index accordingly. +func (body Body) Set(expr *Expr, pos int) { + body[pos] = expr + expr.Index = pos +} + +// Compare returns an integer indicating whether body is less than, equal to, +// or greater than other. +// +// If body is a subset of other, it is considered less than (and vice versa). +func (body Body) Compare(other Body) int { + minLen := len(body) + if len(other) < minLen { + minLen = len(other) + } + for i := 0; i < minLen; i++ { + if cmp := body[i].Compare(other[i]); cmp != 0 { + return cmp + } + } + if len(body) < len(other) { + return -1 + } + if len(other) < len(body) { + return 1 + } + return 0 +} + +// Copy returns a deep copy of body. +func (body Body) Copy() Body { + cpy := make(Body, len(body)) + for i := range body { + cpy[i] = body[i].Copy() + } + return cpy +} + +// Contains returns true if this body contains the given expression. +func (body Body) Contains(x *Expr) bool { + for _, e := range body { + if e.Equal(x) { + return true + } + } + return false +} + +// Equal returns true if this Body is equal to the other Body. +func (body Body) Equal(other Body) bool { + return body.Compare(other) == 0 +} + +// Hash returns the hash code for the Body. +func (body Body) Hash() int { + s := 0 + for _, e := range body { + s += e.Hash() + } + return s +} + +// IsGround returns true if all of the expressions in the Body are ground. +func (body Body) IsGround() bool { + for _, e := range body { + if !e.IsGround() { + return false + } + } + return true +} + +// Loc returns the location of the Body in the definition. +func (body Body) Loc() *Location { + if len(body) == 0 { + return nil + } + return body[0].Location +} + +// SetLoc sets the location on body. +func (body Body) SetLoc(loc *Location) { + if len(body) != 0 { + body[0].SetLocation(loc) + } +} + +func (body Body) String() string { + buf := make([]string, 0, len(body)) + for _, v := range body { + buf = append(buf, v.String()) + } + return strings.Join(buf, "; ") +} + +// Vars returns a VarSet containing variables in body. The params can be set to +// control which vars are included. +func (body Body) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.Walk(body) + return vis.Vars() +} + +// NewExpr returns a new Expr object. +func NewExpr(terms interface{}) *Expr { + switch terms.(type) { + case *SomeDecl, *Every, *Term, []*Term: // ok + default: + panic("unreachable") + } + return &Expr{ + Negated: false, + Terms: terms, + Index: 0, + With: nil, + } +} + +// Complement returns a copy of this expression with the negation flag flipped. +func (expr *Expr) Complement() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + return &cpy +} + +// ComplementNoWith returns a copy of this expression with the negation flag flipped +// and the with modifier removed. This is the same as calling .Complement().NoWith() +// but without making an intermediate copy. +func (expr *Expr) ComplementNoWith() *Expr { + cpy := *expr + cpy.Negated = !cpy.Negated + cpy.With = nil + return &cpy +} + +// Equal returns true if this Expr equals the other Expr. +func (expr *Expr) Equal(other *Expr) bool { + return expr.Compare(other) == 0 +} + +// Compare returns an integer indicating whether expr is less than, equal to, +// or greater than other. +// +// Expressions are compared as follows: +// +// 1. Declarations are always less than other expressions. +// 2. Preceding expression (by Index) is always less than the other expression. +// 3. Non-negated expressions are always less than negated expressions. +// 4. Single term expressions are always less than built-in expressions. +// +// Otherwise, the expression terms are compared normally. If both expressions +// have the same terms, the modifiers are compared. +func (expr *Expr) Compare(other *Expr) int { + + if expr == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + + o1 := expr.sortOrder() + o2 := other.sortOrder() + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + + switch { + case expr.Index < other.Index: + return -1 + case expr.Index > other.Index: + return 1 + } + + switch { + case expr.Negated && !other.Negated: + return 1 + case !expr.Negated && other.Negated: + return -1 + } + + switch t := expr.Terms.(type) { + case *Term: + if cmp := Compare(t.Value, other.Terms.(*Term).Value); cmp != 0 { + return cmp + } + case []*Term: + if cmp := termSliceCompare(t, other.Terms.([]*Term)); cmp != 0 { + return cmp + } + case *SomeDecl: + if cmp := Compare(t, other.Terms.(*SomeDecl)); cmp != 0 { + return cmp + } + case *Every: + if cmp := Compare(t, other.Terms.(*Every)); cmp != 0 { + return cmp + } + } + + return withSliceCompare(expr.With, other.With) +} + +func (expr *Expr) sortOrder() int { + switch expr.Terms.(type) { + case *SomeDecl: + return 0 + case *Term: + return 1 + case []*Term: + return 2 + case *Every: + return 3 + } + return -1 +} + +// CopyWithoutTerms returns a deep copy of expr without its Terms +func (expr *Expr) CopyWithoutTerms() *Expr { + cpy := *expr + + if expr.With != nil { + cpy.With = make([]*With, len(expr.With)) + for i := range expr.With { + cpy.With[i] = expr.With[i].Copy() + } + } + + return &cpy +} + +// Copy returns a deep copy of expr. +func (expr *Expr) Copy() *Expr { + + cpy := expr.CopyWithoutTerms() + + switch ts := expr.Terms.(type) { + case *SomeDecl: + cpy.Terms = ts.Copy() + case []*Term: + cpyTs := make([]*Term, len(ts)) + for i := range ts { + cpyTs[i] = ts[i].Copy() + } + cpy.Terms = cpyTs + case *Term: + cpy.Terms = ts.Copy() + case *Every: + cpy.Terms = ts.Copy() + } + + return cpy +} + +// Hash returns the hash code of the Expr. +func (expr *Expr) Hash() int { + s := expr.Index + switch ts := expr.Terms.(type) { + case *SomeDecl: + s += ts.Hash() + case []*Term: + for _, t := range ts { + s += t.Value.Hash() + } + case *Term: + s += ts.Value.Hash() + } + if expr.Negated { + s++ + } + for _, w := range expr.With { + s += w.Hash() + } + return s +} + +// IncludeWith returns a copy of expr with the with modifier appended. +func (expr *Expr) IncludeWith(target *Term, value *Term) *Expr { + cpy := *expr + cpy.With = append(cpy.With, &With{Target: target, Value: value}) + return &cpy +} + +// NoWith returns a copy of expr where the with modifier has been removed. +func (expr *Expr) NoWith() *Expr { + cpy := *expr + cpy.With = nil + return &cpy +} + +// IsEquality returns true if this is an equality expression. +func (expr *Expr) IsEquality() bool { + return isGlobalBuiltin(expr, Var(Equality.Name)) +} + +// IsAssignment returns true if this an assignment expression. +func (expr *Expr) IsAssignment() bool { + return isGlobalBuiltin(expr, Var(Assign.Name)) +} + +// IsCall returns true if this expression calls a function. +func (expr *Expr) IsCall() bool { + _, ok := expr.Terms.([]*Term) + return ok +} + +// IsEvery returns true if this expression is an 'every' expression. +func (expr *Expr) IsEvery() bool { + _, ok := expr.Terms.(*Every) + return ok +} + +// IsSome returns true if this expression is a 'some' expression. +func (expr *Expr) IsSome() bool { + _, ok := expr.Terms.(*SomeDecl) + return ok +} + +// Operator returns the name of the function or built-in this expression refers +// to. If this expression is not a function call, returns nil. +func (expr *Expr) Operator() Ref { + op := expr.OperatorTerm() + if op == nil { + return nil + } + return op.Value.(Ref) +} + +// OperatorTerm returns the name of the function or built-in this expression +// refers to. If this expression is not a function call, returns nil. +func (expr *Expr) OperatorTerm() *Term { + terms, ok := expr.Terms.([]*Term) + if !ok || len(terms) == 0 { + return nil + } + return terms[0] +} + +// Operand returns the term at the zero-based pos. If the expr does not include +// at least pos+1 terms, this function returns nil. +func (expr *Expr) Operand(pos int) *Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + idx := pos + 1 + if idx < len(terms) { + return terms[idx] + } + return nil +} + +// Operands returns the built-in function operands. +func (expr *Expr) Operands() []*Term { + terms, ok := expr.Terms.([]*Term) + if !ok { + return nil + } + return terms[1:] +} + +// IsGround returns true if all of the expression terms are ground. +func (expr *Expr) IsGround() bool { + switch ts := expr.Terms.(type) { + case []*Term: + for _, t := range ts[1:] { + if !t.IsGround() { + return false + } + } + case *Term: + return ts.IsGround() + } + return true +} + +// SetOperator sets the expr's operator and returns the expr itself. If expr is +// not a call expr, this function will panic. +func (expr *Expr) SetOperator(term *Term) *Expr { + expr.Terms.([]*Term)[0] = term + return expr +} + +// SetLocation sets the expr's location and returns the expr itself. +func (expr *Expr) SetLocation(loc *Location) *Expr { + expr.Location = loc + return expr +} + +// Loc returns the Location of expr. +func (expr *Expr) Loc() *Location { + if expr == nil { + return nil + } + return expr.Location +} + +// SetLoc sets the location on expr. +func (expr *Expr) SetLoc(loc *Location) { + expr.SetLocation(loc) +} + +func (expr *Expr) String() string { + buf := make([]string, 0, 2+len(expr.With)) + if expr.Negated { + buf = append(buf, "not") + } + switch t := expr.Terms.(type) { + case []*Term: + if expr.IsEquality() && validEqAssignArgCount(expr) { + buf = append(buf, fmt.Sprintf("%v %v %v", t[1], Equality.Infix, t[2])) + } else { + buf = append(buf, Call(t).String()) + } + case fmt.Stringer: + buf = append(buf, t.String()) + } + + for i := range expr.With { + buf = append(buf, expr.With[i].String()) + } + + return strings.Join(buf, " ") +} + +func (expr *Expr) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "terms": expr.Terms, + "index": expr.Index, + } + + if len(expr.With) > 0 { + data["with"] = expr.With + } + + if expr.Generated { + data["generated"] = true + } + + if expr.Negated { + data["negated"] = true + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Expr { + if expr.Location != nil { + data["location"] = expr.Location + } + } + + return json.Marshal(data) +} + +// UnmarshalJSON parses the byte array and stores the result in expr. +func (expr *Expr) UnmarshalJSON(bs []byte) error { + v := map[string]interface{}{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + return unmarshalExpr(expr, v) +} + +// Vars returns a VarSet containing variables in expr. The params can be set to +// control which vars are included. +func (expr *Expr) Vars(params VarVisitorParams) VarSet { + vis := NewVarVisitor().WithParams(params) + vis.Walk(expr) + return vis.Vars() +} + +// NewBuiltinExpr creates a new Expr object with the supplied terms. +// The builtin operator must be the first term. +func NewBuiltinExpr(terms ...*Term) *Expr { + return &Expr{Terms: terms} +} + +func (expr *Expr) CogeneratedExprs() []*Expr { + visited := map[*Expr]struct{}{} + visitCogeneratedExprs(expr, func(e *Expr) bool { + if expr.Equal(e) { + return true + } + if _, ok := visited[e]; ok { + return true + } + visited[e] = struct{}{} + return false + }) + + result := make([]*Expr, 0, len(visited)) + for e := range visited { + result = append(result, e) + } + return result +} + +func (expr *Expr) BaseCogeneratedExpr() *Expr { + if expr.generatedFrom == nil { + return expr + } + return expr.generatedFrom.BaseCogeneratedExpr() +} + +func visitCogeneratedExprs(expr *Expr, f func(*Expr) bool) { + if parent := expr.generatedFrom; parent != nil { + if stop := f(parent); !stop { + visitCogeneratedExprs(parent, f) + } + } + for _, child := range expr.generates { + if stop := f(child); !stop { + visitCogeneratedExprs(child, f) + } + } +} + +func (d *SomeDecl) String() string { + if call, ok := d.Symbols[0].Value.(Call); ok { + if len(call) == 4 { + return "some " + call[1].String() + ", " + call[2].String() + " in " + call[3].String() + } + return "some " + call[1].String() + " in " + call[2].String() + } + buf := make([]string, len(d.Symbols)) + for i := range buf { + buf[i] = d.Symbols[i].String() + } + return "some " + strings.Join(buf, ", ") +} + +// SetLoc sets the Location on d. +func (d *SomeDecl) SetLoc(loc *Location) { + d.Location = loc +} + +// Loc returns the Location of d. +func (d *SomeDecl) Loc() *Location { + return d.Location +} + +// Copy returns a deep copy of d. +func (d *SomeDecl) Copy() *SomeDecl { + cpy := *d + cpy.Symbols = termSliceCopy(d.Symbols) + return &cpy +} + +// Compare returns an integer indicating whether d is less than, equal to, or +// greater than other. +func (d *SomeDecl) Compare(other *SomeDecl) int { + return termSliceCompare(d.Symbols, other.Symbols) +} + +// Hash returns a hash code of d. +func (d *SomeDecl) Hash() int { + return termSliceHash(d.Symbols) +} + +func (d *SomeDecl) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "symbols": d.Symbols, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.SomeDecl { + if d.Location != nil { + data["location"] = d.Location + } + } + + return json.Marshal(data) +} + +func (q *Every) String() string { + if q.Key != nil { + return fmt.Sprintf("every %s, %s in %s { %s }", + q.Key, + q.Value, + q.Domain, + q.Body) + } + return fmt.Sprintf("every %s in %s { %s }", + q.Value, + q.Domain, + q.Body) +} + +func (q *Every) Loc() *Location { + return q.Location +} + +func (q *Every) SetLoc(l *Location) { + q.Location = l +} + +// Copy returns a deep copy of d. +func (q *Every) Copy() *Every { + cpy := *q + cpy.Key = q.Key.Copy() + cpy.Value = q.Value.Copy() + cpy.Domain = q.Domain.Copy() + cpy.Body = q.Body.Copy() + return &cpy +} + +func (q *Every) Compare(other *Every) int { + for _, terms := range [][2]*Term{ + {q.Key, other.Key}, + {q.Value, other.Value}, + {q.Domain, other.Domain}, + } { + if d := Compare(terms[0], terms[1]); d != 0 { + return d + } + } + return q.Body.Compare(other.Body) +} + +// KeyValueVars returns the key and val arguments of an `every` +// expression, if they are non-nil and not wildcards. +func (q *Every) KeyValueVars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + if q.Key != nil { + vis.Walk(q.Key) + } + vis.Walk(q.Value) + return vis.vars +} + +func (q *Every) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "key": q.Key, + "value": q.Value, + "domain": q.Domain, + "body": q.Body, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.Every { + if q.Location != nil { + data["location"] = q.Location + } + } + + return json.Marshal(data) +} + +func (w *With) String() string { + return "with " + w.Target.String() + " as " + w.Value.String() +} + +// Equal returns true if this With is equals the other With. +func (w *With) Equal(other *With) bool { + return Compare(w, other) == 0 +} + +// Compare returns an integer indicating whether w is less than, equal to, or +// greater than other. +func (w *With) Compare(other *With) int { + if w == nil { + if other == nil { + return 0 + } + return -1 + } else if other == nil { + return 1 + } + if cmp := Compare(w.Target, other.Target); cmp != 0 { + return cmp + } + return Compare(w.Value, other.Value) +} + +// Copy returns a deep copy of w. +func (w *With) Copy() *With { + cpy := *w + cpy.Value = w.Value.Copy() + cpy.Target = w.Target.Copy() + return &cpy +} + +// Hash returns the hash code of the With. +func (w With) Hash() int { + return w.Target.Hash() + w.Value.Hash() +} + +// SetLocation sets the location on w. +func (w *With) SetLocation(loc *Location) *With { + w.Location = loc + return w +} + +// Loc returns the Location of w. +func (w *With) Loc() *Location { + if w == nil { + return nil + } + return w.Location +} + +// SetLoc sets the location on w. +func (w *With) SetLoc(loc *Location) { + w.Location = loc +} + +func (w *With) MarshalJSON() ([]byte, error) { + data := map[string]interface{}{ + "target": w.Target, + "value": w.Value, + } + + if astJSON.GetOptions().MarshalOptions.IncludeLocation.With { + if w.Location != nil { + data["location"] = w.Location + } + } + + return json.Marshal(data) +} + +// Copy returns a deep copy of the AST node x. If x is not an AST node, x is returned unmodified. +func Copy(x interface{}) interface{} { + switch x := x.(type) { + case *Module: + return x.Copy() + case *Package: + return x.Copy() + case *Import: + return x.Copy() + case *Rule: + return x.Copy() + case *Head: + return x.Copy() + case Args: + return x.Copy() + case Body: + return x.Copy() + case *Expr: + return x.Copy() + case *With: + return x.Copy() + case *SomeDecl: + return x.Copy() + case *Every: + return x.Copy() + case *Term: + return x.Copy() + case *ArrayComprehension: + return x.Copy() + case *SetComprehension: + return x.Copy() + case *ObjectComprehension: + return x.Copy() + case Set: + return x.Copy() + case *object: + return x.Copy() + case *Array: + return x.Copy() + case Ref: + return x.Copy() + case Call: + return x.Copy() + case *Comment: + return x.Copy() + } + return x +} + +// RuleSet represents a collection of rules that produce a virtual document. +type RuleSet []*Rule + +// NewRuleSet returns a new RuleSet containing the given rules. +func NewRuleSet(rules ...*Rule) RuleSet { + rs := make(RuleSet, 0, len(rules)) + for _, rule := range rules { + rs.Add(rule) + } + return rs +} + +// Add inserts the rule into rs. +func (rs *RuleSet) Add(rule *Rule) { + for _, exist := range *rs { + if exist.Equal(rule) { + return + } + } + *rs = append(*rs, rule) +} + +// Contains returns true if rs contains rule. +func (rs RuleSet) Contains(rule *Rule) bool { + for i := range rs { + if rs[i].Equal(rule) { + return true + } + } + return false +} + +// Diff returns a new RuleSet containing rules in rs that are not in other. +func (rs RuleSet) Diff(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + if !other.Contains(rs[i]) { + result.Add(rs[i]) + } + } + return result +} + +// Equal returns true if rs equals other. +func (rs RuleSet) Equal(other RuleSet) bool { + return len(rs.Diff(other)) == 0 && len(other.Diff(rs)) == 0 +} + +// Merge returns a ruleset containing the union of rules from rs an other. +func (rs RuleSet) Merge(other RuleSet) RuleSet { + result := NewRuleSet() + for i := range rs { + result.Add(rs[i]) + } + for i := range other { + result.Add(other[i]) + } + return result +} + +func (rs RuleSet) String() string { + buf := make([]string, 0, len(rs)) + for _, rule := range rs { + buf = append(buf, rule.String()) + } + return "{" + strings.Join(buf, ", ") + "}" +} + +// Returns true if the equality or assignment expression referred to by expr +// has a valid number of arguments. +func validEqAssignArgCount(expr *Expr) bool { + return len(expr.Operands()) == 2 +} + +// this function checks if the expr refers to a non-namespaced (global) built-in +// function like eq, gt, plus, etc. +func isGlobalBuiltin(expr *Expr, name Var) bool { + terms, ok := expr.Terms.([]*Term) + if !ok { + return false + } + + // NOTE(tsandall): do not use Term#Equal or Value#Compare to avoid + // allocation here. + ref, ok := terms[0].Value.(Ref) + if !ok || len(ref) != 1 { + return false + } + if head, ok := ref[0].Value.(Var); ok { + return head.Equal(name) + } + return false +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go new file mode 100644 index 000000000..b4f05ad50 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/pretty.go @@ -0,0 +1,82 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "io" + "strings" +) + +// Pretty writes a pretty representation of the AST rooted at x to w. +// +// This is function is intended for debug purposes when inspecting ASTs. +func Pretty(w io.Writer, x interface{}) { + pp := &prettyPrinter{ + depth: -1, + w: w, + } + NewBeforeAfterVisitor(pp.Before, pp.After).Walk(x) +} + +type prettyPrinter struct { + depth int + w io.Writer +} + +func (pp *prettyPrinter) Before(x interface{}) bool { + switch x.(type) { + case *Term: + default: + pp.depth++ + } + + switch x := x.(type) { + case *Term: + return false + case Args: + if len(x) == 0 { + return false + } + pp.writeType(x) + case *Expr: + extras := []string{} + if x.Negated { + extras = append(extras, "negated") + } + extras = append(extras, fmt.Sprintf("index=%d", x.Index)) + pp.writeIndent("%v %v", TypeName(x), strings.Join(extras, " ")) + case Null, Boolean, Number, String, Var: + pp.writeValue(x) + default: + pp.writeType(x) + } + return false +} + +func (pp *prettyPrinter) After(x interface{}) { + switch x.(type) { + case *Term: + default: + pp.depth-- + } +} + +func (pp *prettyPrinter) writeValue(x interface{}) { + pp.writeIndent(fmt.Sprint(x)) +} + +func (pp *prettyPrinter) writeType(x interface{}) { + pp.writeIndent(TypeName(x)) +} + +func (pp *prettyPrinter) writeIndent(f string, a ...interface{}) { + pad := strings.Repeat(" ", pp.depth) + pp.write(pad+f, a...) +} + +func (pp *prettyPrinter) write(f string, a ...interface{}) { + fmt.Fprintf(pp.w, f+"\n", a...) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go similarity index 90% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go index 9fa1c6f9b..8b757ecc3 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/rego_v1.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/rego_v1.go @@ -3,7 +3,7 @@ package ast import ( "fmt" - "github.com/open-policy-agent/opa/ast/internal/tokens" + "github.com/open-policy-agent/opa/v1/ast/internal/tokens" ) func checkDuplicateImports(modules []*Module) (errors Errors) { @@ -50,9 +50,12 @@ func checkRootDocumentOverrides(node interface{}) Errors { WalkExprs(node, func(expr *Expr) bool { if expr.IsAssignment() { - name := expr.Operand(0).String() - if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { - errors = append(errors, NewError(CompileErr, expr.Location, "variables must not shadow %v (use a different variable name)", name)) + // assign() can be called directly, so we need to assert its given first operand exists before checking its name. + if nameOp := expr.Operand(0); nameOp != nil { + name := nameOp.String() + if RootDocumentRefs.Contains(RefTerm(VarTerm(name))) { + errors = append(errors, NewError(CompileErr, expr.Location, "variables must not shadow %v (use a different variable name)", name)) + } } } return false @@ -189,7 +192,7 @@ func checkRegoV1Rule(rule *Rule, opts RegoCheckOptions) Errors { var errs Errors if opts.NoKeywordsAsRuleNames && IsKeywordInRegoVersion(rule.Head.Name.String(), RegoV1) { - errs = append(errs, NewError(ParseErr, rule.Location, fmt.Sprintf("%s keyword cannot be used for rule name", rule.Head.Name.String()))) + errs = append(errs, NewError(ParseErr, rule.Location, "%s keyword cannot be used for rule name", rule.Head.Name.String())) } if opts.RequireRuleBodyOrValue && rule.generatedBody && rule.Head.generatedValue { errs = append(errs, NewError(ParseErr, rule.Location, "%s must have value assignment and/or body declaration", t)) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go new file mode 100644 index 000000000..e84a147a4 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/schema.go @@ -0,0 +1,63 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +// SchemaSet holds a map from a path to a schema. +type SchemaSet struct { + m *util.HashMap +} + +// NewSchemaSet returns an empty SchemaSet. +func NewSchemaSet() *SchemaSet { + + eqFunc := func(a, b util.T) bool { + return a.(Ref).Equal(b.(Ref)) + } + + hashFunc := func(x util.T) int { return x.(Ref).Hash() } + + return &SchemaSet{ + m: util.NewHashMap(eqFunc, hashFunc), + } +} + +// Put inserts a raw schema into the set. +func (ss *SchemaSet) Put(path Ref, raw interface{}) { + ss.m.Put(path, raw) +} + +// Get returns the raw schema identified by the path. +func (ss *SchemaSet) Get(path Ref) interface{} { + if ss == nil { + return nil + } + x, ok := ss.m.Get(path) + if !ok { + return nil + } + return x +} + +func loadSchema(raw interface{}, allowNet []string) (types.Type, error) { + + jsonSchema, err := compileSchema(raw, allowNet) + if err != nil { + return nil, err + } + + tpe, err := newSchemaParser().parseSchema(jsonSchema.RootSchema) + if err != nil { + return nil, fmt.Errorf("type checking: %w", err) + } + + return tpe, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go new file mode 100644 index 000000000..40d66753f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/strings.go @@ -0,0 +1,54 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "reflect" + "strings" +) + +// TypeName returns a human readable name for the AST element type. +func TypeName(x interface{}) string { + if _, ok := x.(*lazyObj); ok { + return "object" + } + return strings.ToLower(reflect.Indirect(reflect.ValueOf(x)).Type().Name()) +} + +// ValueName returns a human readable name for the AST Value type. +// This is preferrable over calling TypeName when the argument is known to be +// a Value, as this doesn't require reflection (= heap allocations). +func ValueName(x Value) string { + switch x.(type) { + case String: + return "string" + case Boolean: + return "boolean" + case Number: + return "number" + case Null: + return "null" + case Var: + return "var" + case Object: + return "object" + case Set: + return "set" + case Ref: + return "ref" + case Call: + return "call" + case *Array: + return "array" + case *ArrayComprehension: + return "arraycomprehension" + case *ObjectComprehension: + return "objectcomprehension" + case *SetComprehension: + return "setcomprehension" + } + + return TypeName(x) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/term.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/term.go new file mode 100644 index 000000000..9abc29346 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/term.go @@ -0,0 +1,3403 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// nolint: deadcode // Public API. +package ast + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "net/url" + "regexp" + "slices" + "strconv" + "strings" + "sync" + + "github.com/OneOfOne/xxhash" + + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/ast/location" + "github.com/open-policy-agent/opa/v1/util" +) + +var errFindNotFound = fmt.Errorf("find: not found") + +// Location records a position in source code. +type Location = location.Location + +// NewLocation returns a new Location object. +func NewLocation(text []byte, file string, row int, col int) *Location { + return location.NewLocation(text, file, row, col) +} + +// Value declares the common interface for all Term values. Every kind of Term value +// in the language is represented as a type that implements this interface: +// +// - Null, Boolean, Number, String +// - Object, Array, Set +// - Variables, References +// - Array, Set, and Object Comprehensions +// - Calls +type Value interface { + Compare(other Value) int // Compare returns <0, 0, or >0 if this Value is less than, equal to, or greater than other, respectively. + Find(path Ref) (Value, error) // Find returns value referred to by path or an error if path is not found. + Hash() int // Returns hash code of the value. + IsGround() bool // IsGround returns true if this value is not a variable or contains no variables. + String() string // String returns a human readable string representation of the value. +} + +// InterfaceToValue converts a native Go value x to a Value. +func InterfaceToValue(x interface{}) (Value, error) { + switch x := x.(type) { + case nil: + return NullValue, nil + case bool: + if x { + return InternedBooleanTerm(true).Value, nil + } + return InternedBooleanTerm(false).Value, nil + case json.Number: + if interned := InternedIntNumberTermFromString(string(x)); interned != nil { + return interned.Value, nil + } + return Number(x), nil + case int64: + return int64Number(x), nil + case uint64: + return uint64Number(x), nil + case float64: + return floatNumber(x), nil + case int: + return intNumber(x), nil + case string: + return String(x), nil + case []any: + r := util.NewPtrSlice[Term](len(x)) + for i, e := range x { + e, err := InterfaceToValue(e) + if err != nil { + return nil, err + } + r[i].Value = e + } + return NewArray(r...), nil + case map[string]any: + kvs := util.NewPtrSlice[Term](len(x) * 2) + idx := 0 + for k, v := range x { + kvs[idx].Value = String(k) + v, err := InterfaceToValue(v) + if err != nil { + return nil, err + } + kvs[idx+1].Value = v + idx += 2 + } + tuples := make([][2]*Term, len(kvs)/2) + for i := 0; i < len(kvs); i += 2 { + tuples[i/2] = *(*[2]*Term)(kvs[i : i+2]) + } + return NewObject(tuples...), nil + case map[string]string: + r := newobject(len(x)) + for k, v := range x { + r.Insert(StringTerm(k), StringTerm(v)) + } + return r, nil + default: + ptr := util.Reference(x) + if err := util.RoundTrip(ptr); err != nil { + return nil, fmt.Errorf("ast: interface conversion: %w", err) + } + return InterfaceToValue(*ptr) + } +} + +// ValueFromReader returns an AST value from a JSON serialized value in the reader. +func ValueFromReader(r io.Reader) (Value, error) { + var x interface{} + if err := util.NewJSONDecoder(r).Decode(&x); err != nil { + return nil, err + } + return InterfaceToValue(x) +} + +// As converts v into a Go native type referred to by x. +func As(v Value, x interface{}) error { + return util.NewJSONDecoder(strings.NewReader(v.String())).Decode(x) +} + +// Resolver defines the interface for resolving references to native Go values. +type Resolver interface { + Resolve(Ref) (interface{}, error) +} + +// ValueResolver defines the interface for resolving references to AST values. +type ValueResolver interface { + Resolve(Ref) (Value, error) +} + +// UnknownValueErr indicates a ValueResolver was unable to resolve a reference +// because the reference refers to an unknown value. +type UnknownValueErr struct{} + +func (UnknownValueErr) Error() string { + return "unknown value" +} + +// IsUnknownValueErr returns true if the err is an UnknownValueErr. +func IsUnknownValueErr(err error) bool { + _, ok := err.(UnknownValueErr) + return ok +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref Ref) (interface{}, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} + +// ValueToInterface returns the Go representation of an AST value. The AST +// value should not contain any values that require evaluation (e.g., vars, +// comprehensions, etc.) +func ValueToInterface(v Value, resolver Resolver) (interface{}, error) { + return valueToInterface(v, resolver, JSONOpt{}) +} + +func valueToInterface(v Value, resolver Resolver, opt JSONOpt) (interface{}, error) { + switch v := v.(type) { + case Null: + return nil, nil + case Boolean: + return bool(v), nil + case Number: + return json.Number(v), nil + case String: + return string(v), nil + case *Array: + buf := []interface{}{} + for i := 0; i < v.Len(); i++ { + x1, err := valueToInterface(v.Elem(i).Value, resolver, opt) + if err != nil { + return nil, err + } + buf = append(buf, x1) + } + return buf, nil + case *object: + buf := make(map[string]interface{}, v.Len()) + err := v.Iter(func(k, v *Term) error { + ki, err := valueToInterface(k.Value, resolver, opt) + if err != nil { + return err + } + var str string + var ok bool + if str, ok = ki.(string); !ok { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(ki); err != nil { + return err + } + str = strings.TrimSpace(buf.String()) + } + vi, err := valueToInterface(v.Value, resolver, opt) + if err != nil { + return err + } + buf[str] = vi + return nil + }) + if err != nil { + return nil, err + } + return buf, nil + case *lazyObj: + if opt.CopyMaps { + return valueToInterface(v.force(), resolver, opt) + } + return v.native, nil + case Set: + buf := []interface{}{} + iter := func(x *Term) error { + x1, err := valueToInterface(x.Value, resolver, opt) + if err != nil { + return err + } + buf = append(buf, x1) + return nil + } + var err error + if opt.SortSets { + err = v.Sorted().Iter(iter) + } else { + err = v.Iter(iter) + } + if err != nil { + return nil, err + } + return buf, nil + case Ref: + return resolver.Resolve(v) + default: + return nil, fmt.Errorf("%v requires evaluation", TypeName(v)) + } +} + +// JSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSON(v Value) (interface{}, error) { + return JSONWithOpt(v, JSONOpt{}) +} + +// JSONOpt defines parameters for AST to JSON conversion. +type JSONOpt struct { + SortSets bool // sort sets before serializing (this makes conversion more expensive) + CopyMaps bool // enforces copying of map[string]interface{} read from the store +} + +// JSONWithOpt returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) +func JSONWithOpt(v Value, opt JSONOpt) (interface{}, error) { + return valueToInterface(v, illegalResolver{}, opt) +} + +// MustJSON returns the JSON representation of v. The value must not contain any +// refs or terms that require evaluation (e.g., vars, comprehensions, etc.) If +// the conversion fails, this function will panic. This function is mostly for +// test purposes. +func MustJSON(v Value) interface{} { + r, err := JSON(v) + if err != nil { + panic(err) + } + return r +} + +// MustInterfaceToValue converts a native Go value x to a Value. If the +// conversion fails, this function will panic. This function is mostly for test +// purposes. +func MustInterfaceToValue(x interface{}) Value { + v, err := InterfaceToValue(x) + if err != nil { + panic(err) + } + return v +} + +// Term is an argument to a function. +type Term struct { + Value Value `json:"value"` // the value of the Term as represented in Go + Location *Location `json:"location,omitempty"` // the location of the Term in the source +} + +// NewTerm returns a new Term object. +func NewTerm(v Value) *Term { + return &Term{ + Value: v, + } +} + +// SetLocation updates the term's Location and returns the term itself. +func (term *Term) SetLocation(loc *Location) *Term { + term.Location = loc + return term +} + +// Loc returns the Location of term. +func (term *Term) Loc() *Location { + if term == nil { + return nil + } + return term.Location +} + +// SetLoc sets the location on term. +func (term *Term) SetLoc(loc *Location) { + term.SetLocation(loc) +} + +// Copy returns a deep copy of term. +func (term *Term) Copy() *Term { + if term == nil { + return nil + } + + cpy := *term + + switch v := term.Value.(type) { + case Null, Boolean, Number, String, Var: + cpy.Value = v + case Ref: + cpy.Value = v.Copy() + case *Array: + cpy.Value = v.Copy() + case Set: + cpy.Value = v.Copy() + case *object: + cpy.Value = v.Copy() + case *ArrayComprehension: + cpy.Value = v.Copy() + case *ObjectComprehension: + cpy.Value = v.Copy() + case *SetComprehension: + cpy.Value = v.Copy() + case Call: + cpy.Value = v.Copy() + } + + return &cpy +} + +// Equal returns true if this term equals the other term. Equality is +// defined for each kind of term, and does not compare the Location. +func (term *Term) Equal(other *Term) bool { + if term == nil && other != nil { + return false + } + if term != nil && other == nil { + return false + } + if term == other { + return true + } + + return ValueEqual(term.Value, other.Value) +} + +// Get returns a value referred to by name from the term. +func (term *Term) Get(name *Term) *Term { + switch v := term.Value.(type) { + case *object: + return v.Get(name) + case *Array: + return v.Get(name) + case interface { + Get(*Term) *Term + }: + return v.Get(name) + case Set: + if v.Contains(name) { + return name + } + } + return nil +} + +// Hash returns the hash code of the Term's Value. Its Location +// is ignored. +func (term *Term) Hash() int { + return term.Value.Hash() +} + +// IsGround returns true if this term's Value is ground. +func (term *Term) IsGround() bool { + return term.Value.IsGround() +} + +// MarshalJSON returns the JSON encoding of the term. +// +// Specialized marshalling logic is required to include a type hint for Value. +func (term *Term) MarshalJSON() ([]byte, error) { + d := map[string]interface{}{ + "type": ValueName(term.Value), + "value": term.Value, + } + jsonOptions := astJSON.GetOptions().MarshalOptions + if jsonOptions.IncludeLocation.Term { + if term.Location != nil { + d["location"] = term.Location + } + } + return json.Marshal(d) +} + +func (term *Term) String() string { + return term.Value.String() +} + +// UnmarshalJSON parses the byte array and stores the result in term. +// Specialized unmarshalling is required to handle Value and Location. +func (term *Term) UnmarshalJSON(bs []byte) error { + v := map[string]interface{}{} + if err := util.UnmarshalJSON(bs, &v); err != nil { + return err + } + val, err := unmarshalValue(v) + if err != nil { + return err + } + term.Value = val + + if loc, ok := v["location"].(map[string]interface{}); ok { + term.Location = &Location{} + err := unmarshalLocation(term.Location, loc) + if err != nil { + return err + } + } + return nil +} + +// Vars returns a VarSet with variables contained in this term. +func (term *Term) Vars() VarSet { + vis := &VarVisitor{vars: VarSet{}} + vis.Walk(term) + return vis.vars +} + +// IsConstant returns true if the AST value is constant. +func IsConstant(v Value) bool { + found := false + vis := GenericVisitor{ + func(x interface{}) bool { + switch x.(type) { + case Var, Ref, *ArrayComprehension, *ObjectComprehension, *SetComprehension, Call: + found = true + return true + } + return false + }, + } + vis.Walk(v) + return !found +} + +// IsComprehension returns true if the supplied value is a comprehension. +func IsComprehension(x Value) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + } + return false +} + +// ContainsRefs returns true if the Value v contains refs. +func ContainsRefs(v interface{}) bool { + found := false + WalkRefs(v, func(Ref) bool { + found = true + return found + }) + return found +} + +// ContainsComprehensions returns true if the Value v contains comprehensions. +func ContainsComprehensions(v interface{}) bool { + found := false + WalkClosures(v, func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + found = true + return found + } + return found + }) + return found +} + +// ContainsClosures returns true if the Value v contains closures. +func ContainsClosures(v interface{}) bool { + found := false + WalkClosures(v, func(x interface{}) bool { + switch x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + found = true + return found + } + return found + }) + return found +} + +// IsScalar returns true if the AST value is a scalar. +func IsScalar(v Value) bool { + switch v.(type) { + case String, Number, Boolean, Null: + return true + } + return false +} + +// Null represents the null value defined by JSON. +type Null struct{} + +var NullValue Value = Null{} + +// NullTerm creates a new Term with a Null value. +func NullTerm() *Term { + return &Term{Value: NullValue} +} + +// Equal returns true if the other term Value is also Null. +func (null Null) Equal(other Value) bool { + switch other.(type) { + case Null: + return true + default: + return false + } +} + +// Compare compares null to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (null Null) Compare(other Value) int { + if _, ok := other.(Null); ok { + return 0 + } + return -1 +} + +// Find returns the current value or a not found error. +func (null Null) Find(path Ref) (Value, error) { + if len(path) == 0 { + return NullValue, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (null Null) Hash() int { + return 0 +} + +// IsGround always returns true. +func (Null) IsGround() bool { + return true +} + +func (null Null) String() string { + return "null" +} + +// Boolean represents a boolean value defined by JSON. +type Boolean bool + +// BooleanTerm creates a new Term with a Boolean value. +func BooleanTerm(b bool) *Term { + if b { + return &Term{Value: InternedBooleanTerm(true).Value} + } + return &Term{Value: InternedBooleanTerm(false).Value} +} + +// Equal returns true if the other Value is a Boolean and is equal. +func (bol Boolean) Equal(other Value) bool { + switch other := other.(type) { + case Boolean: + return bol == other + default: + return false + } +} + +// Compare compares bol to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (bol Boolean) Compare(other Value) int { + switch other := other.(type) { + case Boolean: + if bol == other { + return 0 + } + if !bol { + return -1 + } + return 1 + case Null: + return 1 + } + + return -1 +} + +// Find returns the current value or a not found error. +func (bol Boolean) Find(path Ref) (Value, error) { + if len(path) == 0 { + if bol { + return InternedBooleanTerm(true).Value, nil + } + return InternedBooleanTerm(false).Value, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (bol Boolean) Hash() int { + if bol { + return 1 + } + return 0 +} + +// IsGround always returns true. +func (Boolean) IsGround() bool { + return true +} + +func (bol Boolean) String() string { + return strconv.FormatBool(bool(bol)) +} + +// Number represents a numeric value as defined by JSON. +type Number json.Number + +// NumberTerm creates a new Term with a Number value. +func NumberTerm(n json.Number) *Term { + return &Term{Value: Number(n)} +} + +// IntNumberTerm creates a new Term with an integer Number value. +func IntNumberTerm(i int) *Term { + return &Term{Value: Number(strconv.Itoa(i))} +} + +// UIntNumberTerm creates a new Term with an unsigned integer Number value. +func UIntNumberTerm(u uint64) *Term { + return &Term{Value: uint64Number(u)} +} + +// FloatNumberTerm creates a new Term with a floating point Number value. +func FloatNumberTerm(f float64) *Term { + s := strconv.FormatFloat(f, 'g', -1, 64) + return &Term{Value: Number(s)} +} + +// Equal returns true if the other Value is a Number and is equal. +func (num Number) Equal(other Value) bool { + switch other := other.(type) { + case Number: + if n1, ok1 := num.Int64(); ok1 { + n2, ok2 := other.Int64() + if ok1 && ok2 && n1 == n2 { + return true + } + } + + return num.Compare(other) == 0 + default: + return false + } +} + +// Compare compares num to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (num Number) Compare(other Value) int { + // Optimize for the common case, as calling Compare allocates on heap. + if otherNum, yes := other.(Number); yes { + if ai, ok := num.Int64(); ok { + if bi, ok := otherNum.Int64(); ok { + if ai == bi { + return 0 + } + if ai < bi { + return -1 + } + return 1 + } + } + } + + return Compare(num, other) +} + +// Find returns the current value or a not found error. +func (num Number) Find(path Ref) (Value, error) { + if len(path) == 0 { + return num, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (num Number) Hash() int { + f, err := json.Number(num).Float64() + if err != nil { + bs := []byte(num) + h := xxhash.Checksum64(bs) + return int(h) + } + return int(f) +} + +// Int returns the int representation of num if possible. +func (num Number) Int() (int, bool) { + i64, ok := num.Int64() + return int(i64), ok +} + +// Int64 returns the int64 representation of num if possible. +func (num Number) Int64() (int64, bool) { + i, err := json.Number(num).Int64() + if err != nil { + return 0, false + } + return i, true +} + +// Float64 returns the float64 representation of num if possible. +func (num Number) Float64() (float64, bool) { + f, err := json.Number(num).Float64() + if err != nil { + return 0, false + } + return f, true +} + +// IsGround always returns true. +func (Number) IsGround() bool { + return true +} + +// MarshalJSON returns JSON encoded bytes representing num. +func (num Number) MarshalJSON() ([]byte, error) { + return json.Marshal(json.Number(num)) +} + +func (num Number) String() string { + return string(num) +} + +func intNumber(i int) Number { + return Number(strconv.Itoa(i)) +} + +func int64Number(i int64) Number { + return Number(strconv.FormatInt(i, 10)) +} + +func uint64Number(u uint64) Number { + return Number(strconv.FormatUint(u, 10)) +} + +func floatNumber(f float64) Number { + return Number(strconv.FormatFloat(f, 'g', -1, 64)) +} + +// String represents a string value as defined by JSON. +type String string + +// StringTerm creates a new Term with a String value. +func StringTerm(s string) *Term { + return &Term{Value: String(s)} +} + +// Equal returns true if the other Value is a String and is equal. +func (str String) Equal(other Value) bool { + switch other := other.(type) { + case String: + return str == other + default: + return false + } +} + +// Compare compares str to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (str String) Compare(other Value) int { + // Optimize for the common case of one string being compared to another by + // using a direct comparison of values. This avoids the allocation performed + // when calling Compare and its interface{} argument conversion. + if otherStr, ok := other.(String); ok { + if str == otherStr { + return 0 + } + if str < otherStr { + return -1 + } + return 1 + } + + return Compare(str, other) +} + +// Find returns the current value or a not found error. +func (str String) Find(path Ref) (Value, error) { + if len(path) == 0 { + return str, nil + } + return nil, errFindNotFound +} + +// IsGround always returns true. +func (String) IsGround() bool { + return true +} + +func (str String) String() string { + return strconv.Quote(string(str)) +} + +// Hash returns the hash code for the Value. +func (str String) Hash() int { + h := xxhash.ChecksumString64S(string(str), hashSeed0) + return int(h) +} + +// Var represents a variable as defined by the language. +type Var string + +// VarTerm creates a new Term with a Variable value. +func VarTerm(v string) *Term { + return &Term{Value: Var(v)} +} + +// Equal returns true if the other Value is a Variable and has the same value +// (name). +func (v Var) Equal(other Value) bool { + switch other := other.(type) { + case Var: + return v == other + default: + return false + } +} + +// Compare compares v to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (v Var) Compare(other Value) int { + if otherVar, ok := other.(Var); ok { + return strings.Compare(string(v), string(otherVar)) + } + return Compare(v, other) +} + +// Find returns the current value or a not found error. +func (v Var) Find(path Ref) (Value, error) { + if len(path) == 0 { + return v, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (v Var) Hash() int { + h := xxhash.ChecksumString64S(string(v), hashSeed0) + return int(h) +} + +// IsGround always returns false. +func (Var) IsGround() bool { + return false +} + +// IsWildcard returns true if this is a wildcard variable. +func (v Var) IsWildcard() bool { + return strings.HasPrefix(string(v), WildcardPrefix) +} + +// IsGenerated returns true if this variable was generated during compilation. +func (v Var) IsGenerated() bool { + return strings.HasPrefix(string(v), "__local") +} + +func (v Var) String() string { + // Special case for wildcard so that string representation is parseable. The + // parser mangles wildcard variables to make their names unique and uses an + // illegal variable name character (WildcardPrefix) to avoid conflicts. When + // we serialize the variable here, we need to make sure it's parseable. + if v.IsWildcard() { + return Wildcard.String() + } + return string(v) +} + +// Ref represents a reference as defined by the language. +type Ref []*Term + +// EmptyRef returns a new, empty reference. +func EmptyRef() Ref { + return Ref([]*Term{}) +} + +// PtrRef returns a new reference against the head for the pointer +// s. Path components in the pointer are unescaped. +func PtrRef(head *Term, s string) (Ref, error) { + s = strings.Trim(s, "/") + if s == "" { + return Ref{head}, nil + } + parts := strings.Split(s, "/") + if maxLen := math.MaxInt32; len(parts) >= maxLen { + return nil, fmt.Errorf("path too long: %s, %d > %d (max)", s, len(parts), maxLen) + } + ref := make(Ref, uint(len(parts))+1) + ref[0] = head + for i := 0; i < len(parts); i++ { + var err error + parts[i], err = url.PathUnescape(parts[i]) + if err != nil { + return nil, err + } + ref[i+1] = StringTerm(parts[i]) + } + return ref, nil +} + +// RefTerm creates a new Term with a Ref value. +func RefTerm(r ...*Term) *Term { + return &Term{Value: Ref(r)} +} + +// Append returns a copy of ref with the term appended to the end. +func (ref Ref) Append(term *Term) Ref { + n := len(ref) + dst := make(Ref, n+1) + copy(dst, ref) + dst[n] = term + return dst +} + +// Insert returns a copy of the ref with x inserted at pos. If pos < len(ref), +// existing elements are shifted to the right. If pos > len(ref)+1 this +// function panics. +func (ref Ref) Insert(x *Term, pos int) Ref { + switch { + case pos == len(ref): + return ref.Append(x) + case pos > len(ref)+1: + panic("illegal index") + } + cpy := make(Ref, len(ref)+1) + copy(cpy, ref[:pos]) + cpy[pos] = x + copy(cpy[pos+1:], ref[pos:]) + return cpy +} + +// Extend returns a copy of ref with the terms from other appended. The head of +// other will be converted to a string. +func (ref Ref) Extend(other Ref) Ref { + dst := make(Ref, len(ref)+len(other)) + copy(dst, ref) + + head := other[0].Copy() + head.Value = String(head.Value.(Var)) + offset := len(ref) + dst[offset] = head + + copy(dst[offset+1:], other[1:]) + return dst +} + +// Concat returns a ref with the terms appended. +func (ref Ref) Concat(terms []*Term) Ref { + if len(terms) == 0 { + return ref + } + cpy := make(Ref, len(ref)+len(terms)) + copy(cpy, ref) + copy(cpy[len(ref):], terms) + return cpy +} + +// Dynamic returns the offset of the first non-constant operand of ref. +func (ref Ref) Dynamic() int { + switch ref[0].Value.(type) { + case Call: + return 0 + } + for i := 1; i < len(ref); i++ { + if !IsConstant(ref[i].Value) { + return i + } + } + return -1 +} + +// Copy returns a deep copy of ref. +func (ref Ref) Copy() Ref { + return termSliceCopy(ref) +} + +// Equal returns true if ref is equal to other. +func (ref Ref) Equal(other Value) bool { + switch o := other.(type) { + case Ref: + if len(ref) == len(o) { + for i := range ref { + if !ref[i].Equal(o[i]) { + return false + } + } + + return true + } + } + + return false +} + +// Compare compares ref to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ref Ref) Compare(other Value) int { + if o, ok := other.(Ref); ok { + return termSliceCompare(ref, o) + } + + return Compare(ref, other) +} + +// Find returns the current value or a "not found" error. +func (ref Ref) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ref, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (ref Ref) Hash() int { + return termSliceHash(ref) +} + +// HasPrefix returns true if the other ref is a prefix of this ref. +func (ref Ref) HasPrefix(other Ref) bool { + if len(other) > len(ref) { + return false + } + for i := range other { + if !ref[i].Equal(other[i]) { + return false + } + } + return true +} + +// ConstantPrefix returns the constant portion of the ref starting from the head. +func (ref Ref) ConstantPrefix() Ref { + i := ref.Dynamic() + if i < 0 { + return ref.Copy() + } + return ref[:i].Copy() +} + +func (ref Ref) StringPrefix() Ref { + for i := 1; i < len(ref); i++ { + switch ref[i].Value.(type) { + case String: // pass + default: // cut off + return ref[:i].Copy() + } + } + + return ref.Copy() +} + +// GroundPrefix returns the ground portion of the ref starting from the head. By +// definition, the head of the reference is always ground. +func (ref Ref) GroundPrefix() Ref { + if ref.IsGround() { + return ref + } + + prefix := make(Ref, 0, len(ref)) + + for i, x := range ref { + if i > 0 && !x.IsGround() { + break + } + prefix = append(prefix, x) + } + + return prefix +} + +func (ref Ref) DynamicSuffix() Ref { + i := ref.Dynamic() + if i < 0 { + return nil + } + return ref[i:] +} + +// IsGround returns true if all of the parts of the Ref are ground. +func (ref Ref) IsGround() bool { + if len(ref) == 0 { + return true + } + return termSliceIsGround(ref[1:]) +} + +// IsNested returns true if this ref contains other Refs. +func (ref Ref) IsNested() bool { + for _, x := range ref { + if _, ok := x.Value.(Ref); ok { + return true + } + } + return false +} + +// Ptr returns a slash-separated path string for this ref. If the ref +// contains non-string terms this function returns an error. Path +// components are escaped. +func (ref Ref) Ptr() (string, error) { + parts := make([]string, 0, len(ref)-1) + for _, term := range ref[1:] { + if str, ok := term.Value.(String); ok { + parts = append(parts, url.PathEscape(string(str))) + } else { + return "", fmt.Errorf("invalid path value type") + } + } + return strings.Join(parts, "/"), nil +} + +var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") + +func IsVarCompatibleString(s string) bool { + return varRegexp.MatchString(s) +} + +var sbPool = sync.Pool{ + New: func() any { + return &strings.Builder{} + }, +} + +func (ref Ref) String() string { + if len(ref) == 0 { + return "" + } + + sb := sbPool.Get().(*strings.Builder) + sb.Reset() + + defer sbPool.Put(sb) + + sb.Grow(10 * len(ref)) + + sb.WriteString(ref[0].Value.String()) + + for _, p := range ref[1:] { + switch p := p.Value.(type) { + case String: + str := string(p) + if varRegexp.MatchString(str) && !IsKeyword(str) { + sb.WriteByte('.') + sb.WriteString(str) + } else { + sb.WriteString(`["`) + sb.WriteString(str) + sb.WriteString(`"]`) + } + default: + sb.WriteByte('[') + sb.WriteString(p.String()) + sb.WriteByte(']') + } + } + + return sb.String() +} + +// OutputVars returns a VarSet containing variables that would be bound by evaluating +// this expression in isolation. +func (ref Ref) OutputVars() VarSet { + vis := NewVarVisitor().WithParams(VarVisitorParams{SkipRefHead: true}) + vis.Walk(ref) + return vis.Vars() +} + +func (ref Ref) toArray() *Array { + a := NewArray() + for _, term := range ref { + if _, ok := term.Value.(String); ok { + a = a.Append(term) + } else { + a = a.Append(StringTerm(term.Value.String())) + } + } + return a +} + +// QueryIterator defines the interface for querying AST documents with references. +type QueryIterator func(map[Var]Value, Value) error + +// ArrayTerm creates a new Term with an Array value. +func ArrayTerm(a ...*Term) *Term { + return NewTerm(NewArray(a...)) +} + +// NewArray creates an Array with the terms provided. The array will +// use the provided term slice. +func NewArray(a ...*Term) *Array { + hs := make([]int, len(a)) + for i, e := range a { + hs[i] = e.Value.Hash() + } + arr := &Array{elems: a, hashs: hs, ground: termSliceIsGround(a)} + arr.rehash() + return arr +} + +// Array represents an array as defined by the language. Arrays are similar to the +// same types as defined by JSON with the exception that they can contain Vars +// and References. +type Array struct { + elems []*Term + hashs []int // element hashes + hash int + ground bool +} + +// Copy returns a deep copy of arr. +func (arr *Array) Copy() *Array { + cpy := make([]int, len(arr.elems)) + copy(cpy, arr.hashs) + return &Array{ + elems: termSliceCopy(arr.elems), + hashs: cpy, + hash: arr.hash, + ground: arr.IsGround()} +} + +// Equal returns true if arr is equal to other. +func (arr *Array) Equal(other Value) bool { + if arr == other { + return true + } + + if other, ok := other.(*Array); ok && len(arr.elems) == len(other.elems) { + for i := range arr.elems { + if !arr.elems[i].Equal(other.elems[i]) { + return false + } + } + return true + } + + return false +} + +// Compare compares arr to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (arr *Array) Compare(other Value) int { + if b, ok := other.(*Array); ok { + return termSliceCompare(arr.elems, b.elems) + } + + sortA := sortOrder(arr) + sortB := sortOrder(other) + + if sortA < sortB { + return -1 + } else if sortB < sortA { + return 1 + } + + return Compare(arr, other) +} + +// Find returns the value at the index or an out-of-range error. +func (arr *Array) Find(path Ref) (Value, error) { + if len(path) == 0 { + return arr, nil + } + num, ok := path[0].Value.(Number) + if !ok { + return nil, errFindNotFound + } + i, ok := num.Int() + if !ok { + return nil, errFindNotFound + } + if i < 0 || i >= arr.Len() { + return nil, errFindNotFound + } + return arr.Elem(i).Value.Find(path[1:]) +} + +// Get returns the element at pos or nil if not possible. +func (arr *Array) Get(pos *Term) *Term { + num, ok := pos.Value.(Number) + if !ok { + return nil + } + + i, ok := num.Int() + if !ok { + return nil + } + + if i >= 0 && i < len(arr.elems) { + return arr.elems[i] + } + + return nil +} + +// Sorted returns a new Array that contains the sorted elements of arr. +func (arr *Array) Sorted() *Array { + cpy := make([]*Term, len(arr.elems)) + for i := range cpy { + cpy[i] = arr.elems[i] + } + + slices.SortFunc(cpy, TermValueCompare) + + a := NewArray(cpy...) + a.hashs = arr.hashs + return a +} + +// Hash returns the hash code for the Value. +func (arr *Array) Hash() int { + return arr.hash +} + +// IsGround returns true if all of the Array elements are ground. +func (arr *Array) IsGround() bool { + return arr.ground +} + +// MarshalJSON returns JSON encoded bytes representing arr. +func (arr *Array) MarshalJSON() ([]byte, error) { + if len(arr.elems) == 0 { + return []byte(`[]`), nil + } + return json.Marshal(arr.elems) +} + +func (arr *Array) String() string { + sb := sbPool.Get().(*strings.Builder) + sb.Reset() + sb.Grow(len(arr.elems) * 16) + + defer sbPool.Put(sb) + + sb.WriteRune('[') + for i, e := range arr.elems { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(e.String()) + } + sb.WriteRune(']') + + return sb.String() +} + +// Len returns the number of elements in the array. +func (arr *Array) Len() int { + return len(arr.elems) +} + +// Elem returns the element i of arr. +func (arr *Array) Elem(i int) *Term { + return arr.elems[i] +} + +// Set sets the element i of arr. +func (arr *Array) Set(i int, v *Term) { + arr.set(i, v) +} + +// rehash updates the cached hash of arr. +func (arr *Array) rehash() { + arr.hash = 0 + for _, h := range arr.hashs { + arr.hash += h + } +} + +// set sets the element i of arr. +func (arr *Array) set(i int, v *Term) { + arr.ground = arr.ground && v.IsGround() + arr.elems[i] = v + arr.hashs[i] = v.Value.Hash() + arr.rehash() +} + +// Slice returns a slice of arr starting from i index to j. -1 +// indicates the end of the array. The returned value array is not a +// copy and any modifications to either of arrays may be reflected to +// the other. +func (arr *Array) Slice(i, j int) *Array { + var elems []*Term + var hashs []int + if j == -1 { + elems = arr.elems[i:] + hashs = arr.hashs[i:] + } else { + elems = arr.elems[i:j] + hashs = arr.hashs[i:j] + } + // If arr is ground, the slice is, too. + // If it's not, the slice could still be. + gr := arr.ground || termSliceIsGround(elems) + + s := &Array{elems: elems, hashs: hashs, ground: gr} + s.rehash() + return s +} + +// Iter calls f on each element in arr. If f returns an error, +// iteration stops and the return value is the error. +func (arr *Array) Iter(f func(*Term) error) error { + for i := range arr.elems { + if err := f(arr.elems[i]); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in arr. If f returns true, iteration stops. +func (arr *Array) Until(f func(*Term) bool) bool { + for _, term := range arr.elems { + if f(term) { + return true + } + } + return false +} + +// Foreach calls f on each element in arr. +func (arr *Array) Foreach(f func(*Term)) { + for _, term := range arr.elems { + f(term) + } +} + +// Append appends a term to arr, returning the appended array. +func (arr *Array) Append(v *Term) *Array { + cpy := *arr + cpy.elems = append(arr.elems, v) + cpy.hashs = append(arr.hashs, v.Value.Hash()) + cpy.hash = arr.hash + v.Value.Hash() + cpy.ground = arr.ground && v.IsGround() + return &cpy +} + +// Set represents a set as defined by the language. +type Set interface { + Value + Len() int + Copy() Set + Diff(Set) Set + Intersect(Set) Set + Union(Set) Set + Add(*Term) + Iter(func(*Term) error) error + Until(func(*Term) bool) bool + Foreach(func(*Term)) + Contains(*Term) bool + Map(func(*Term) (*Term, error)) (Set, error) + Reduce(*Term, func(*Term, *Term) (*Term, error)) (*Term, error) + Sorted() *Array + Slice() []*Term +} + +// NewSet returns a new Set containing t. +func NewSet(t ...*Term) Set { + s := newset(len(t)) + for _, term := range t { + s.insert(term, false) + } + return s +} + +func newset(n int) *set { + var keys []*Term + if n > 0 { + keys = make([]*Term, 0, n) + } + return &set{ + elems: make(map[int]*Term, n), + keys: keys, + hash: 0, + ground: true, + sortGuard: sync.Once{}, + } +} + +// SetTerm returns a new Term representing a set containing terms t. +func SetTerm(t ...*Term) *Term { + set := NewSet(t...) + return &Term{ + Value: set, + } +} + +type set struct { + elems map[int]*Term + keys []*Term + hash int + ground bool + // Prevents race condition around sorting. + // We can avoid (the allocation cost of) using a pointer here as all + // methods of `set` use a pointer receiver, and the `sync.Once` value + // is never copied. + sortGuard sync.Once +} + +// Copy returns a deep copy of s. +func (s *set) Copy() Set { + terms := make([]*Term, len(s.keys)) + for i := range s.keys { + terms[i] = s.keys[i].Copy() + } + cpy := NewSet(terms...).(*set) + cpy.hash = s.hash + cpy.ground = s.ground + return cpy +} + +// IsGround returns true if all terms in s are ground. +func (s *set) IsGround() bool { + return s.ground +} + +// Hash returns a hash code for s. +func (s *set) Hash() int { + return s.hash +} + +func (s *set) String() string { + if s.Len() == 0 { + return "set()" + } + + sb := sbPool.Get().(*strings.Builder) + sb.Reset() + sb.Grow(s.Len() * 16) + + defer sbPool.Put(sb) + + sb.WriteRune('{') + for i := range s.sortedKeys() { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(s.keys[i].Value.String()) + } + sb.WriteRune('}') + + return sb.String() +} + +func (s *set) sortedKeys() []*Term { + s.sortGuard.Do(func() { + slices.SortFunc(s.keys, TermValueCompare) + }) + return s.keys +} + +// Compare compares s to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (s *set) Compare(other Value) int { + o1 := sortOrder(s) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o1 > o2 { + return 1 + } + t := other.(*set) + return termSliceCompare(s.sortedKeys(), t.sortedKeys()) +} + +// Find returns the set or dereferences the element itself. +func (s *set) Find(path Ref) (Value, error) { + if len(path) == 0 { + return s, nil + } + if !s.Contains(path[0]) { + return nil, errFindNotFound + } + return path[0].Value.Find(path[1:]) +} + +// Diff returns elements in s that are not in other. +func (s *set) Diff(other Set) Set { + terms := make([]*Term, 0, len(s.keys)) + for _, term := range s.sortedKeys() { + if !other.Contains(term) { + terms = append(terms, term) + } + } + + return NewSet(terms...) +} + +// Intersect returns the set containing elements in both s and other. +func (s *set) Intersect(other Set) Set { + o := other.(*set) + n, m := s.Len(), o.Len() + ss := s + so := o + if m < n { + ss = o + so = s + n = m + } + + terms := make([]*Term, 0, n) + for _, term := range ss.sortedKeys() { + if so.Contains(term) { + terms = append(terms, term) + } + } + + return NewSet(terms...) +} + +// Union returns the set containing all elements of s and other. +func (s *set) Union(other Set) Set { + r := NewSet() + s.Foreach(r.Add) + other.Foreach(r.Add) + return r +} + +// Add updates s to include t. +func (s *set) Add(t *Term) { + s.insert(t, true) +} + +// Iter calls f on each element in s. If f returns an error, iteration stops +// and the return value is the error. +func (s *set) Iter(f func(*Term) error) error { + for _, term := range s.sortedKeys() { + if err := f(term); err != nil { + return err + } + } + return nil +} + +// Until calls f on each element in s. If f returns true, iteration stops. +func (s *set) Until(f func(*Term) bool) bool { + for _, term := range s.sortedKeys() { + if f(term) { + return true + } + } + return false +} + +// Foreach calls f on each element in s. +func (s *set) Foreach(f func(*Term)) { + for _, term := range s.sortedKeys() { + f(term) + } +} + +// Map returns a new Set obtained by applying f to each value in s. +func (s *set) Map(f func(*Term) (*Term, error)) (Set, error) { + mapped := make([]*Term, 0, len(s.keys)) + for _, x := range s.sortedKeys() { + term, err := f(x) + if err != nil { + return nil, err + } + mapped = append(mapped, term) + } + return NewSet(mapped...), nil +} + +// Reduce returns a Term produced by applying f to each value in s. The first +// argument to f is the reduced value (starting with i) and the second argument +// to f is the element in s. +func (s *set) Reduce(i *Term, f func(*Term, *Term) (*Term, error)) (*Term, error) { + err := s.Iter(func(x *Term) error { + var err error + i, err = f(i, x) + if err != nil { + return err + } + return nil + }) + return i, err +} + +// Contains returns true if t is in s. +func (s *set) Contains(t *Term) bool { + return s.get(t) != nil +} + +// Len returns the number of elements in the set. +func (s *set) Len() int { + return len(s.keys) +} + +// MarshalJSON returns JSON encoded bytes representing s. +func (s *set) MarshalJSON() ([]byte, error) { + if s.keys == nil { + return []byte(`[]`), nil + } + return json.Marshal(s.sortedKeys()) +} + +// Sorted returns an Array that contains the sorted elements of s. +func (s *set) Sorted() *Array { + cpy := make([]*Term, len(s.keys)) + copy(cpy, s.sortedKeys()) + return NewArray(cpy...) +} + +// Slice returns a slice of terms contained in the set. +func (s *set) Slice() []*Term { + return s.sortedKeys() +} + +// Internal method to use for cases where a set may be reused in favor +// of creating a new one (with the associated allocations). +func (s *set) clear() { + clear(s.elems) + s.keys = s.keys[:0] + s.hash = 0 + s.ground = true + s.sortGuard = sync.Once{} +} + +func (s *set) insertNoGuard(x *Term) { + s.insert(x, false) +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (s *set) insert(x *Term, resetSortGuard bool) { + hash := x.Hash() + insertHash := hash + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := x.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr, ok := s.elems[insertHash]; ok; { + if equal(curr.Value) { + return + } + + insertHash++ + curr, ok = s.elems[insertHash] + } + + s.elems[insertHash] = x + // O(1) insertion, but we'll have to re-sort the keys later. + s.keys = append(s.keys, x) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this pointer when it's known not to be needed. + s.sortGuard = sync.Once{} + } + + s.hash += hash + s.ground = s.ground && x.IsGround() +} + +func (s *set) get(x *Term) *Term { + hash := x.Hash() + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := x.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + return false + + } + + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr, ok := s.elems[hash]; ok; { + if equal(curr.Value) { + return curr + } + + hash++ + curr, ok = s.elems[hash] + } + return nil +} + +// Object represents an object as defined by the language. +type Object interface { + Value + Len() int + Get(*Term) *Term + Copy() Object + Insert(*Term, *Term) + Iter(func(*Term, *Term) error) error + Until(func(*Term, *Term) bool) bool + Foreach(func(*Term, *Term)) + Map(func(*Term, *Term) (*Term, *Term, error)) (Object, error) + Diff(other Object) Object + Intersect(other Object) [][3]*Term + Merge(other Object) (Object, bool) + MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) + Filter(filter Object) (Object, error) + Keys() []*Term + KeysIterator() ObjectKeysIterator + get(k *Term) *objectElem // To prevent external implementations +} + +// NewObject creates a new Object with t. +func NewObject(t ...[2]*Term) Object { + obj := newobject(len(t)) + for i := range t { + obj.insert(t[i][0], t[i][1], false) + } + return obj +} + +// ObjectTerm creates a new Term with an Object value. +func ObjectTerm(o ...[2]*Term) *Term { + return &Term{Value: NewObject(o...)} +} + +func LazyObject(blob map[string]interface{}) Object { + return &lazyObj{native: blob, cache: map[string]Value{}} +} + +type lazyObj struct { + strict Object + cache map[string]Value + native map[string]interface{} +} + +func (l *lazyObj) force() Object { + if l.strict == nil { + l.strict = MustInterfaceToValue(l.native).(Object) + // NOTE(jf): a possible performance improvement here would be to check how many + // entries have been realized to AST in the cache, and if some threshold compared to the + // total number of keys is exceeded, realize the remaining entries and set l.strict to l.cache. + l.cache = map[string]Value{} // We don't need the cache anymore; drop it to free up memory. + } + return l.strict +} + +func (l *lazyObj) Compare(other Value) int { + o1 := sortOrder(l) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + return l.force().Compare(other) +} + +func (l *lazyObj) Copy() Object { + return l +} + +func (l *lazyObj) Diff(other Object) Object { + return l.force().Diff(other) +} + +func (l *lazyObj) Intersect(other Object) [][3]*Term { + return l.force().Intersect(other) +} + +func (l *lazyObj) Iter(f func(*Term, *Term) error) error { + return l.force().Iter(f) +} + +func (l *lazyObj) Until(f func(*Term, *Term) bool) bool { + // NOTE(sr): there could be benefits in not forcing here -- if we abort because + // `f` returns true, we could save us from converting the rest of the object. + return l.force().Until(f) +} + +func (l *lazyObj) Foreach(f func(*Term, *Term)) { + l.force().Foreach(f) +} + +func (l *lazyObj) Filter(filter Object) (Object, error) { + return l.force().Filter(filter) +} + +func (l *lazyObj) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + return l.force().Map(f) +} + +func (l *lazyObj) MarshalJSON() ([]byte, error) { + return l.force().(*object).MarshalJSON() +} + +func (l *lazyObj) Merge(other Object) (Object, bool) { + return l.force().Merge(other) +} + +func (l *lazyObj) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + return l.force().MergeWith(other, conflictResolver) +} + +func (l *lazyObj) Len() int { + return len(l.native) +} + +func (l *lazyObj) String() string { + return l.force().String() +} + +// get is merely there to implement the Object interface -- `get` there serves the +// purpose of prohibiting external implementations. It's never called for lazyObj. +func (*lazyObj) get(*Term) *objectElem { + return nil +} + +func (l *lazyObj) Get(k *Term) *Term { + if l.strict != nil { + return l.strict.Get(k) + } + if s, ok := k.Value.(String); ok { + if v, ok := l.cache[string(s)]; ok { + return NewTerm(v) + } + + if val, ok := l.native[string(s)]; ok { + var converted Value + switch val := val.(type) { + case map[string]interface{}: + converted = LazyObject(val) + default: + converted = MustInterfaceToValue(val) + } + l.cache[string(s)] = converted + return NewTerm(converted) + } + } + return nil +} + +func (l *lazyObj) Insert(k, v *Term) { + l.force().Insert(k, v) +} + +func (*lazyObj) IsGround() bool { + return true +} + +func (l *lazyObj) Hash() int { + return l.force().Hash() +} + +func (l *lazyObj) Keys() []*Term { + if l.strict != nil { + return l.strict.Keys() + } + ret := make([]*Term, 0, len(l.native)) + for k := range l.native { + ret = append(ret, StringTerm(k)) + } + slices.SortFunc(ret, TermValueCompare) + + return ret +} + +func (l *lazyObj) KeysIterator() ObjectKeysIterator { + return &lazyObjKeysIterator{keys: l.Keys()} +} + +type lazyObjKeysIterator struct { + current int + keys []*Term +} + +func (ki *lazyObjKeysIterator) Next() (*Term, bool) { + if ki.current == len(ki.keys) { + return nil, false + } + ki.current++ + return ki.keys[ki.current-1], true +} + +func (l *lazyObj) Find(path Ref) (Value, error) { + if l.strict != nil { + return l.strict.Find(path) + } + if len(path) == 0 { + return l, nil + } + if p0, ok := path[0].Value.(String); ok { + if v, ok := l.cache[string(p0)]; ok { + return v.Find(path[1:]) + } + + if v, ok := l.native[string(p0)]; ok { + var converted Value + switch v := v.(type) { + case map[string]interface{}: + converted = LazyObject(v) + default: + converted = MustInterfaceToValue(v) + } + l.cache[string(p0)] = converted + return converted.Find(path[1:]) + } + } + return nil, errFindNotFound +} + +type object struct { + elems map[int]*objectElem + keys objectElemSlice + ground int // number of key and value grounds. Counting is + // required to support insert's key-value replace. + hash int + sortGuard sync.Once // Prevents race condition around sorting. +} + +func newobject(n int) *object { + var keys objectElemSlice + if n > 0 { + keys = make(objectElemSlice, 0, n) + } + return &object{ + elems: make(map[int]*objectElem, n), + keys: keys, + ground: 0, + hash: 0, + sortGuard: sync.Once{}, + } +} + +type objectElem struct { + key *Term + value *Term + next *objectElem +} + +type objectElemSlice []*objectElem + +func (s objectElemSlice) Less(i, j int) bool { return Compare(s[i].key.Value, s[j].key.Value) < 0 } +func (s objectElemSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } +func (s objectElemSlice) Len() int { return len(s) } + +// Item is a helper for constructing an tuple containing two Terms +// representing a key/value pair in an Object. +func Item(key, value *Term) [2]*Term { + return [2]*Term{key, value} +} + +func (obj *object) sortedKeys() objectElemSlice { + obj.sortGuard.Do(func() { + slices.SortFunc(obj.keys, func(a, b *objectElem) int { + return a.key.Value.Compare(b.key.Value) + }) + }) + return obj.keys +} + +// Compare compares obj to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (obj *object) Compare(other Value) int { + if x, ok := other.(*lazyObj); ok { + other = x.force() + } + o1 := sortOrder(obj) + o2 := sortOrder(other) + if o1 < o2 { + return -1 + } else if o2 < o1 { + return 1 + } + a := obj + b := other.(*object) + // Ensure that keys are in canonical sorted order before use! + akeys := a.sortedKeys() + bkeys := b.sortedKeys() + minLen := len(akeys) + if len(b.keys) < len(akeys) { + minLen = len(bkeys) + } + for i := 0; i < minLen; i++ { + keysCmp := Compare(akeys[i].key, bkeys[i].key) + if keysCmp < 0 { + return -1 + } + if keysCmp > 0 { + return 1 + } + valA := akeys[i].value + valB := bkeys[i].value + valCmp := Compare(valA, valB) + if valCmp != 0 { + return valCmp + } + } + if len(akeys) < len(bkeys) { + return -1 + } + if len(bkeys) < len(akeys) { + return 1 + } + return 0 +} + +// Find returns the value at the key or undefined. +func (obj *object) Find(path Ref) (Value, error) { + if len(path) == 0 { + return obj, nil + } + value := obj.Get(path[0]) + if value == nil { + return nil, errFindNotFound + } + return value.Value.Find(path[1:]) +} + +func (obj *object) Insert(k, v *Term) { + obj.insert(k, v, true) +} + +// Get returns the value of k in obj if k exists, otherwise nil. +func (obj *object) Get(k *Term) *Term { + if elem := obj.get(k); elem != nil { + return elem.value + } + return nil +} + +// Hash returns the hash code for the Value. +func (obj *object) Hash() int { + return obj.hash +} + +// IsGround returns true if all of the Object key/value pairs are ground. +func (obj *object) IsGround() bool { + return obj.ground == 2*len(obj.keys) +} + +// Copy returns a deep copy of obj. +func (obj *object) Copy() Object { + cpy, _ := obj.Map(func(k, v *Term) (*Term, *Term, error) { + return k.Copy(), v.Copy(), nil + }) + cpy.(*object).hash = obj.hash + return cpy +} + +// Diff returns a new Object that contains only the key/value pairs that exist in obj. +func (obj *object) Diff(other Object) Object { + r := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + if other.Get(node.key) == nil { + r.insert(node.key, node.value, false) + } + } + return r +} + +// Intersect returns a slice of term triplets that represent the intersection of keys +// between obj and other. For each intersecting key, the values from obj and other are included +// as the last two terms in the triplet (respectively). +func (obj *object) Intersect(other Object) [][3]*Term { + r := [][3]*Term{} + obj.Foreach(func(k, v *Term) { + if v2 := other.Get(k); v2 != nil { + r = append(r, [3]*Term{k, v, v2}) + } + }) + return r +} + +// Iter calls the function f for each key-value pair in the object. If f +// returns an error, iteration stops and the error is returned. +func (obj *object) Iter(f func(*Term, *Term) error) error { + for _, node := range obj.sortedKeys() { + if err := f(node.key, node.value); err != nil { + return err + } + } + return nil +} + +// Until calls f for each key-value pair in the object. If f returns +// true, iteration stops and Until returns true. Otherwise, return +// false. +func (obj *object) Until(f func(*Term, *Term) bool) bool { + for _, node := range obj.sortedKeys() { + if f(node.key, node.value) { + return true + } + } + return false +} + +// Foreach calls f for each key-value pair in the object. +func (obj *object) Foreach(f func(*Term, *Term)) { + for _, node := range obj.sortedKeys() { + f(node.key, node.value) + } +} + +// Map returns a new Object constructed by mapping each element in the object +// using the function f. +func (obj *object) Map(f func(*Term, *Term) (*Term, *Term, error)) (Object, error) { + cpy := newobject(obj.Len()) + for _, node := range obj.sortedKeys() { + k, v, err := f(node.key, node.value) + if err != nil { + return nil, err + } + cpy.insert(k, v, false) + } + return cpy, nil +} + +// Keys returns the keys of obj. +func (obj *object) Keys() []*Term { + keys := make([]*Term, len(obj.keys)) + + for i, elem := range obj.sortedKeys() { + keys[i] = elem.key + } + + return keys +} + +// Returns an iterator over the obj's keys. +func (obj *object) KeysIterator() ObjectKeysIterator { + return newobjectKeysIterator(obj) +} + +// MarshalJSON returns JSON encoded bytes representing obj. +func (obj *object) MarshalJSON() ([]byte, error) { + sl := make([][2]*Term, obj.Len()) + for i, node := range obj.sortedKeys() { + sl[i] = Item(node.key, node.value) + } + return json.Marshal(sl) +} + +// Merge returns a new Object containing the non-overlapping keys of obj and other. If there are +// overlapping keys between obj and other, the values of associated with the keys are merged. Only +// objects can be merged with other objects. If the values cannot be merged, the second turn value +// will be false. +func (obj *object) Merge(other Object) (Object, bool) { + return obj.MergeWith(other, func(v1, v2 *Term) (*Term, bool) { + obj1, ok1 := v1.Value.(Object) + obj2, ok2 := v2.Value.(Object) + if !ok1 || !ok2 { + return nil, true + } + obj3, ok := obj1.Merge(obj2) + if !ok { + return nil, true + } + return NewTerm(obj3), false + }) +} + +// MergeWith returns a new Object containing the merged keys of obj and other. +// If there are overlapping keys between obj and other, the conflictResolver +// is called. The conflictResolver can return a merged value and a boolean +// indicating if the merge has failed and should stop. +func (obj *object) MergeWith(other Object, conflictResolver func(v1, v2 *Term) (*Term, bool)) (Object, bool) { + result := NewObject() + stop := obj.Until(func(k, v *Term) bool { + v2 := other.Get(k) + // The key didn't exist in other, keep the original value + if v2 == nil { + result.Insert(k, v) + return false + } + + // The key exists in both, resolve the conflict if possible + merged, stop := conflictResolver(v, v2) + if !stop { + result.Insert(k, merged) + } + return stop + }) + + if stop { + return nil, false + } + + // Copy in any values from other for keys that don't exist in obj + other.Foreach(func(k, v *Term) { + if v2 := obj.Get(k); v2 == nil { + result.Insert(k, v) + } + }) + return result, true +} + +// Filter returns a new object from values in obj where the keys are +// found in filter. Array indices for values can be specified as +// number strings. +func (obj *object) Filter(filter Object) (Object, error) { + filtered, err := filterObject(obj, filter) + if err != nil { + return nil, err + } + return filtered.(Object), nil +} + +// Len returns the number of elements in the object. +func (obj *object) Len() int { + return len(obj.keys) +} + +func (obj *object) String() string { + sb := sbPool.Get().(*strings.Builder) + sb.Reset() + sb.Grow(obj.Len() * 32) + + defer sbPool.Put(sb) + + sb.WriteRune('{') + + for i, elem := range obj.sortedKeys() { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(elem.key.String()) + sb.WriteString(": ") + sb.WriteString(elem.value.String()) + } + sb.WriteRune('}') + + return sb.String() +} + +func (obj *object) get(k *Term) *objectElem { + hash := k.Hash() + + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := k.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, ok := x.Int64(); ok { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, ok := y.Int64(); ok { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr := obj.elems[hash]; curr != nil; curr = curr.next { + if equal(curr.key.Value) { + return curr + } + } + return nil +} + +// NOTE(philipc): We assume a many-readers, single-writer model here. +// This method should NOT be used concurrently, or else we risk data races. +func (obj *object) insert(k, v *Term, resetSortGuard bool) { + hash := k.Hash() + head := obj.elems[hash] + // This `equal` utility is duplicated and manually inlined a number of + // time in this file. Inlining it avoids heap allocations, so it makes + // a big performance difference: some operations like lookup become twice + // as slow without it. + var equal func(v Value) bool + + switch x := k.Value.(type) { + case Null, Boolean, String, Var: + equal = func(y Value) bool { return x == y } + case Number: + if xi, err := json.Number(x).Int64(); err == nil { + equal = func(y Value) bool { + if y, ok := y.(Number); ok { + if yi, err := json.Number(y).Int64(); err == nil { + return xi == yi + } + } + + return false + } + break + } + + // We use big.Rat for comparing big numbers. + // It replaces big.Float due to following reason: + // big.Float comes with a default precision of 64, and setting a + // larger precision results in more memory being allocated + // (regardless of the actual number we are parsing with SetString). + // + // Note: If we're so close to zero that big.Float says we are zero, do + // *not* big.Rat).SetString on the original string it'll potentially + // take very long. + var a *big.Rat + fa, ok := new(big.Float).SetString(string(x)) + if !ok { + panic("illegal value") + } + if fa.IsInt() { + if i, _ := fa.Int64(); i == 0 { + a = new(big.Rat).SetInt64(0) + } + } + if a == nil { + a, ok = new(big.Rat).SetString(string(x)) + if !ok { + panic("illegal value") + } + } + + equal = func(b Value) bool { + if bNum, ok := b.(Number); ok { + var b *big.Rat + fb, ok := new(big.Float).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + if fb.IsInt() { + if i, _ := fb.Int64(); i == 0 { + b = new(big.Rat).SetInt64(0) + } + } + if b == nil { + b, ok = new(big.Rat).SetString(string(bNum)) + if !ok { + panic("illegal value") + } + } + + return a.Cmp(b) == 0 + } + + return false + } + default: + equal = func(y Value) bool { return Compare(x, y) == 0 } + } + + for curr := head; curr != nil; curr = curr.next { + if equal(curr.key.Value) { + // The ground bit of the value may change in + // replace, hence adjust the counter per old + // and new value. + + if curr.value.IsGround() { + obj.ground-- + } + if v.IsGround() { + obj.ground++ + } + + curr.value = v + + obj.rehash() + return + } + } + elem := &objectElem{ + key: k, + value: v, + next: head, + } + obj.elems[hash] = elem + // O(1) insertion, but we'll have to re-sort the keys later. + obj.keys = append(obj.keys, elem) + + if resetSortGuard { + // Reset the sync.Once instance. + // See https://github.com/golang/go/issues/25955 for why we do it this way. + // Note that this will always be the case when external code calls insert via + // Add, or otherwise. Internal code may however benefit from not having to + // re-create this when it's known not to be needed. + obj.sortGuard = sync.Once{} + } + + obj.hash += hash + v.Hash() + + if k.IsGround() { + obj.ground++ + } + if v.IsGround() { + obj.ground++ + } +} + +func (obj *object) rehash() { + // obj.keys is considered truth, from which obj.hash and obj.elems are recalculated. + + obj.hash = 0 + obj.elems = make(map[int]*objectElem, len(obj.keys)) + + for _, elem := range obj.keys { + hash := elem.key.Hash() + obj.hash += hash + elem.value.Hash() + obj.elems[hash] = elem + } +} + +func filterObject(o Value, filter Value) (Value, error) { + if (Null{}).Equal(filter) { + return o, nil + } + + filteredObj, ok := filter.(*object) + if !ok { + return nil, fmt.Errorf("invalid filter value %q, expected an object", filter) + } + + switch v := o.(type) { + case String, Number, Boolean, Null: + return o, nil + case *Array: + values := NewArray() + for i := 0; i < v.Len(); i++ { + subFilter := filteredObj.Get(StringTerm(strconv.Itoa(i))) + if subFilter != nil { + filteredValue, err := filterObject(v.Elem(i).Value, subFilter.Value) + if err != nil { + return nil, err + } + values = values.Append(NewTerm(filteredValue)) + } + } + return values, nil + case Set: + terms := make([]*Term, 0, v.Len()) + for _, t := range v.Slice() { + if filteredObj.Get(t) != nil { + filteredValue, err := filterObject(t.Value, filteredObj.Get(t).Value) + if err != nil { + return nil, err + } + terms = append(terms, NewTerm(filteredValue)) + } + } + return NewSet(terms...), nil + case *object: + values := NewObject() + + iterObj := v + other := filteredObj + if v.Len() < filteredObj.Len() { + iterObj = filteredObj + other = v + } + + err := iterObj.Iter(func(key *Term, _ *Term) error { + if other.Get(key) != nil { + filteredValue, err := filterObject(v.Get(key).Value, filteredObj.Get(key).Value) + if err != nil { + return err + } + values.Insert(key, NewTerm(filteredValue)) + } + return nil + }) + return values, err + default: + return nil, fmt.Errorf("invalid object value type %q", v) + } +} + +// NOTE(philipc): The only way to get an ObjectKeyIterator should be +// from an Object. This ensures that the iterator can have implementation- +// specific details internally, with no contracts except to the very +// limited interface. +type ObjectKeysIterator interface { + Next() (*Term, bool) +} + +type objectKeysIterator struct { + obj *object + numKeys int + index int +} + +func newobjectKeysIterator(o *object) ObjectKeysIterator { + return &objectKeysIterator{ + obj: o, + numKeys: o.Len(), + index: 0, + } +} + +func (oki *objectKeysIterator) Next() (*Term, bool) { + if oki.index == oki.numKeys || oki.numKeys == 0 { + return nil, false + } + oki.index++ + return oki.obj.sortedKeys()[oki.index-1].key, true +} + +// ArrayComprehension represents an array comprehension as defined in the language. +type ArrayComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// ArrayComprehensionTerm creates a new Term with an ArrayComprehension value. +func ArrayComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &ArrayComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of ac. +func (ac *ArrayComprehension) Copy() *ArrayComprehension { + cpy := *ac + cpy.Body = ac.Body.Copy() + cpy.Term = ac.Term.Copy() + return &cpy +} + +// Equal returns true if ac is equal to other. +func (ac *ArrayComprehension) Equal(other Value) bool { + return Compare(ac, other) == 0 +} + +// Compare compares ac to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (ac *ArrayComprehension) Compare(other Value) int { + return Compare(ac, other) +} + +// Find returns the current value or a not found error. +func (ac *ArrayComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return ac, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (ac *ArrayComprehension) Hash() int { + return ac.Term.Hash() + ac.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (ac *ArrayComprehension) IsGround() bool { + return ac.Term.IsGround() && ac.Body.IsGround() +} + +func (ac *ArrayComprehension) String() string { + return "[" + ac.Term.String() + " | " + ac.Body.String() + "]" +} + +// ObjectComprehension represents an object comprehension as defined in the language. +type ObjectComprehension struct { + Key *Term `json:"key"` + Value *Term `json:"value"` + Body Body `json:"body"` +} + +// ObjectComprehensionTerm creates a new Term with an ObjectComprehension value. +func ObjectComprehensionTerm(key, value *Term, body Body) *Term { + return &Term{ + Value: &ObjectComprehension{ + Key: key, + Value: value, + Body: body, + }, + } +} + +// Copy returns a deep copy of oc. +func (oc *ObjectComprehension) Copy() *ObjectComprehension { + cpy := *oc + cpy.Body = oc.Body.Copy() + cpy.Key = oc.Key.Copy() + cpy.Value = oc.Value.Copy() + return &cpy +} + +// Equal returns true if oc is equal to other. +func (oc *ObjectComprehension) Equal(other Value) bool { + return Compare(oc, other) == 0 +} + +// Compare compares oc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (oc *ObjectComprehension) Compare(other Value) int { + return Compare(oc, other) +} + +// Find returns the current value or a not found error. +func (oc *ObjectComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return oc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (oc *ObjectComprehension) Hash() int { + return oc.Key.Hash() + oc.Value.Hash() + oc.Body.Hash() +} + +// IsGround returns true if the Key, Value and Body are ground. +func (oc *ObjectComprehension) IsGround() bool { + return oc.Key.IsGround() && oc.Value.IsGround() && oc.Body.IsGround() +} + +func (oc *ObjectComprehension) String() string { + return "{" + oc.Key.String() + ": " + oc.Value.String() + " | " + oc.Body.String() + "}" +} + +// SetComprehension represents a set comprehension as defined in the language. +type SetComprehension struct { + Term *Term `json:"term"` + Body Body `json:"body"` +} + +// SetComprehensionTerm creates a new Term with an SetComprehension value. +func SetComprehensionTerm(term *Term, body Body) *Term { + return &Term{ + Value: &SetComprehension{ + Term: term, + Body: body, + }, + } +} + +// Copy returns a deep copy of sc. +func (sc *SetComprehension) Copy() *SetComprehension { + cpy := *sc + cpy.Body = sc.Body.Copy() + cpy.Term = sc.Term.Copy() + return &cpy +} + +// Equal returns true if sc is equal to other. +func (sc *SetComprehension) Equal(other Value) bool { + return Compare(sc, other) == 0 +} + +// Compare compares sc to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (sc *SetComprehension) Compare(other Value) int { + return Compare(sc, other) +} + +// Find returns the current value or a not found error. +func (sc *SetComprehension) Find(path Ref) (Value, error) { + if len(path) == 0 { + return sc, nil + } + return nil, errFindNotFound +} + +// Hash returns the hash code of the Value. +func (sc *SetComprehension) Hash() int { + return sc.Term.Hash() + sc.Body.Hash() +} + +// IsGround returns true if the Term and Body are ground. +func (sc *SetComprehension) IsGround() bool { + return sc.Term.IsGround() && sc.Body.IsGround() +} + +func (sc *SetComprehension) String() string { + return "{" + sc.Term.String() + " | " + sc.Body.String() + "}" +} + +// Call represents as function call in the language. +type Call []*Term + +// CallTerm returns a new Term with a Call value defined by terms. The first +// term is the operator and the rest are operands. +func CallTerm(terms ...*Term) *Term { + return NewTerm(Call(terms)) +} + +// Copy returns a deep copy of c. +func (c Call) Copy() Call { + return termSliceCopy(c) +} + +// Compare compares c to other, return <0, 0, or >0 if it is less than, equal to, +// or greater than other. +func (c Call) Compare(other Value) int { + return Compare(c, other) +} + +// Find returns the current value or a not found error. +func (c Call) Find(Ref) (Value, error) { + return nil, errFindNotFound +} + +// Hash returns the hash code for the Value. +func (c Call) Hash() int { + return termSliceHash(c) +} + +// IsGround returns true if the Value is ground. +func (c Call) IsGround() bool { + return termSliceIsGround(c) +} + +// MakeExpr returns an ew Expr from this call. +func (c Call) MakeExpr(output *Term) *Expr { + terms := []*Term(c) + return NewExpr(append(terms, output)) +} + +func (c Call) String() string { + args := make([]string, len(c)-1) + for i := 1; i < len(c); i++ { + args[i-1] = c[i].String() + } + return fmt.Sprintf("%v(%v)", c[0], strings.Join(args, ", ")) +} + +func termSliceCopy(a []*Term) []*Term { + cpy := make([]*Term, len(a)) + termSliceCopyTo(a, cpy) + return cpy +} + +func termSliceCopyTo(src, dst []*Term) { + for i := range src { + dst[i] = src[i].Copy() + } +} + +func termSliceEqual(a, b []*Term) bool { + if len(a) == len(b) { + for i := range a { + if !a[i].Equal(b[i]) { + return false + } + } + return true + } + return false +} + +func termSliceHash(a []*Term) int { + var hash int + for _, v := range a { + hash += v.Value.Hash() + } + return hash +} + +func termSliceIsGround(a []*Term) bool { + for _, v := range a { + if !v.IsGround() { + return false + } + } + return true +} + +// NOTE(tsandall): The unmarshalling errors in these functions are not +// helpful for callers because they do not identify the source of the +// unmarshalling error. Because OPA doesn't accept JSON describing ASTs +// from callers, this is acceptable (for now). If that changes in the future, +// the error messages should be revisited. The current approach focuses +// on the happy path and treats all errors the same. If better error +// reporting is needed, the error paths will need to be fleshed out. + +func unmarshalBody(b []interface{}) (Body, error) { + buf := Body{} + for _, e := range b { + if m, ok := e.(map[string]interface{}); ok { + expr := &Expr{} + if err := unmarshalExpr(expr, m); err == nil { + buf = append(buf, expr) + continue + } + } + goto unmarshal_error + } + return buf, nil +unmarshal_error: + return nil, fmt.Errorf("ast: unable to unmarshal body") +} + +func unmarshalExpr(expr *Expr, v map[string]interface{}) error { + if x, ok := v["negated"]; ok { + if b, ok := x.(bool); ok { + expr.Negated = b + } else { + return fmt.Errorf("ast: unable to unmarshal negated field with type: %T (expected true or false)", v["negated"]) + } + } + if generatedRaw, ok := v["generated"]; ok { + if b, ok := generatedRaw.(bool); ok { + expr.Generated = b + } else { + return fmt.Errorf("ast: unable to unmarshal generated field with type: %T (expected true or false)", v["generated"]) + } + } + + if err := unmarshalExprIndex(expr, v); err != nil { + return err + } + switch ts := v["terms"].(type) { + case map[string]interface{}: + t, err := unmarshalTerm(ts) + if err != nil { + return err + } + expr.Terms = t + case []interface{}: + terms, err := unmarshalTermSlice(ts) + if err != nil { + return err + } + expr.Terms = terms + default: + return fmt.Errorf(`ast: unable to unmarshal terms field with type: %T (expected {"value": ..., "type": ...} or [{"value": ..., "type": ...}, ...])`, v["terms"]) + } + if x, ok := v["with"]; ok { + if sl, ok := x.([]interface{}); ok { + ws := make([]*With, len(sl)) + for i := range sl { + var err error + ws[i], err = unmarshalWith(sl[i]) + if err != nil { + return err + } + } + expr.With = ws + } + } + if loc, ok := v["location"].(map[string]interface{}); ok { + expr.Location = &Location{} + if err := unmarshalLocation(expr.Location, loc); err != nil { + return err + } + } + return nil +} + +func unmarshalLocation(loc *Location, v map[string]interface{}) error { + if x, ok := v["file"]; ok { + if s, ok := x.(string); ok { + loc.File = s + } else { + return fmt.Errorf("ast: unable to unmarshal file field with type: %T (expected string)", v["file"]) + } + } + if x, ok := v["row"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Row = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal row field with type: %T (expected number)", v["row"]) + } + } + if x, ok := v["col"]; ok { + if n, ok := x.(json.Number); ok { + i64, err := n.Int64() + if err != nil { + return err + } + loc.Col = int(i64) + } else { + return fmt.Errorf("ast: unable to unmarshal col field with type: %T (expected number)", v["col"]) + } + } + + return nil +} + +func unmarshalExprIndex(expr *Expr, v map[string]interface{}) error { + if x, ok := v["index"]; ok { + if n, ok := x.(json.Number); ok { + i, err := n.Int64() + if err == nil { + expr.Index = int(i) + return nil + } + } + } + return fmt.Errorf("ast: unable to unmarshal index field with type: %T (expected integer)", v["index"]) +} + +func unmarshalTerm(m map[string]interface{}) (*Term, error) { + var term Term + + v, err := unmarshalValue(m) + if err != nil { + return nil, err + } + term.Value = v + + if loc, ok := m["location"].(map[string]interface{}); ok { + term.Location = &Location{} + if err := unmarshalLocation(term.Location, loc); err != nil { + return nil, err + } + } + + return &term, nil +} + +func unmarshalTermSlice(s []interface{}) ([]*Term, error) { + buf := []*Term{} + for _, x := range s { + if m, ok := x.(map[string]interface{}); ok { + t, err := unmarshalTerm(m) + if err == nil { + buf = append(buf, t) + continue + } + return nil, err + } + return nil, fmt.Errorf("ast: unable to unmarshal term") + } + return buf, nil +} + +func unmarshalTermSliceValue(d map[string]interface{}) ([]*Term, error) { + if s, ok := d["value"].([]interface{}); ok { + return unmarshalTermSlice(s) + } + return nil, fmt.Errorf(`ast: unable to unmarshal term (expected {"value": [...], "type": ...} where type is one of: ref, array, or set)`) +} + +func unmarshalWith(i interface{}) (*With, error) { + if m, ok := i.(map[string]interface{}); ok { + tgt, _ := m["target"].(map[string]interface{}) + target, err := unmarshalTerm(tgt) + if err == nil { + val, _ := m["value"].(map[string]interface{}) + value, err := unmarshalTerm(val) + if err == nil { + return &With{ + Target: target, + Value: value, + }, nil + } + return nil, err + } + return nil, err + } + return nil, fmt.Errorf(`ast: unable to unmarshal with modifier (expected {"target": {...}, "value": {...}})`) +} + +func unmarshalValue(d map[string]interface{}) (Value, error) { + v := d["value"] + switch d["type"] { + case "null": + return NullValue, nil + case "boolean": + if b, ok := v.(bool); ok { + return Boolean(b), nil + } + case "number": + if n, ok := v.(json.Number); ok { + return Number(n), nil + } + case "string": + if s, ok := v.(string); ok { + return String(s), nil + } + case "var": + if s, ok := v.(string); ok { + return Var(s), nil + } + case "ref": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Ref(s), nil + } + case "array": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewArray(s...), nil + } + case "set": + if s, err := unmarshalTermSliceValue(d); err == nil { + return NewSet(s...), nil + } + case "object": + if s, ok := v.([]interface{}); ok { + buf := NewObject() + for _, x := range s { + if i, ok := x.([]interface{}); ok && len(i) == 2 { + p, err := unmarshalTermSlice(i) + if err == nil { + buf.Insert(p[0], p[1]) + continue + } + } + goto unmarshal_error + } + return buf, nil + } + case "arraycomprehension", "setcomprehension": + if m, ok := v.(map[string]interface{}); ok { + t, ok := m["term"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + term, err := unmarshalTerm(t) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]interface{}) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + if d["type"] == "arraycomprehension" { + return &ArrayComprehension{Term: term, Body: body}, nil + } + return &SetComprehension{Term: term, Body: body}, nil + } + case "objectcomprehension": + if m, ok := v.(map[string]interface{}); ok { + k, ok := m["key"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + key, err := unmarshalTerm(k) + if err != nil { + goto unmarshal_error + } + + v, ok := m["value"].(map[string]interface{}) + if !ok { + goto unmarshal_error + } + + value, err := unmarshalTerm(v) + if err != nil { + goto unmarshal_error + } + + b, ok := m["body"].([]interface{}) + if !ok { + goto unmarshal_error + } + + body, err := unmarshalBody(b) + if err != nil { + goto unmarshal_error + } + + return &ObjectComprehension{Key: key, Value: value, Body: body}, nil + } + case "call": + if s, err := unmarshalTermSliceValue(d); err == nil { + return Call(s), nil + } + } +unmarshal_error: + return nil, fmt.Errorf("ast: unable to unmarshal term") +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go new file mode 100644 index 000000000..391a16486 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/transform.go @@ -0,0 +1,431 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" +) + +// Transformer defines the interface for transforming AST elements. If the +// transformer returns nil and does not indicate an error, the AST element will +// be set to nil and no transformations will be applied to children of the +// element. +type Transformer interface { + Transform(interface{}) (interface{}, error) +} + +// Transform iterates the AST and calls the Transform function on the +// Transformer t for x before recursing. +func Transform(t Transformer, x interface{}) (interface{}, error) { + + if term, ok := x.(*Term); ok { + return Transform(t, term.Value) + } + + y, err := t.Transform(x) + if err != nil { + return x, err + } + + if y == nil { + return nil, nil + } + + var ok bool + switch y := y.(type) { + case *Module: + p, err := Transform(t, y.Package) + if err != nil { + return nil, err + } + if y.Package, ok = p.(*Package); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Package, p) + } + for i := range y.Imports { + imp, err := Transform(t, y.Imports[i]) + if err != nil { + return nil, err + } + if y.Imports[i], ok = imp.(*Import); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Imports[i], imp) + } + } + for i := range y.Rules { + rule, err := Transform(t, y.Rules[i]) + if err != nil { + return nil, err + } + if y.Rules[i], ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Rules[i], rule) + } + } + for i := range y.Annotations { + a, err := Transform(t, y.Annotations[i]) + if err != nil { + return nil, err + } + if y.Annotations[i], ok = a.(*Annotations); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Annotations[i], a) + } + } + for i := range y.Comments { + comment, err := Transform(t, y.Comments[i]) + if err != nil { + return nil, err + } + if y.Comments[i], ok = comment.(*Comment); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Comments[i], comment) + } + } + return y, nil + case *Package: + ref, err := Transform(t, y.Path) + if err != nil { + return nil, err + } + if y.Path, ok = ref.(Ref); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Path, ref) + } + return y, nil + case *Import: + y.Path, err = transformTerm(t, y.Path) + if err != nil { + return nil, err + } + if y.Alias, err = transformVar(t, y.Alias); err != nil { + return nil, err + } + return y, nil + case *Rule: + if y.Head, err = transformHead(t, y.Head); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + if y.Else != nil { + rule, err := Transform(t, y.Else) + if err != nil { + return nil, err + } + if y.Else, ok = rule.(*Rule); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.Else, rule) + } + } + return y, nil + case *Head: + if y.Reference, err = transformRef(t, y.Reference); err != nil { + return nil, err + } + if y.Name, err = transformVar(t, y.Name); err != nil { + return nil, err + } + if y.Args, err = transformArgs(t, y.Args); err != nil { + return nil, err + } + if y.Key != nil { + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + } + if y.Value != nil { + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + } + return y, nil + case Args: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + case Body: + for i, e := range y { + e, err := Transform(t, e) + if err != nil { + return nil, err + } + if y[i], ok = e.(*Expr); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y[i], e) + } + } + return y, nil + case *Expr: + switch ts := y.Terms.(type) { + case *SomeDecl: + decl, err := Transform(t, ts) + if err != nil { + return nil, err + } + if y.Terms, ok = decl.(*SomeDecl); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y, decl) + } + return y, nil + case []*Term: + for i := range ts { + if ts[i], err = transformTerm(t, ts[i]); err != nil { + return nil, err + } + } + case *Term: + if y.Terms, err = transformTerm(t, ts); err != nil { + return nil, err + } + case *Every: + if ts.Key != nil { + ts.Key, err = transformTerm(t, ts.Key) + if err != nil { + return nil, err + } + } + ts.Value, err = transformTerm(t, ts.Value) + if err != nil { + return nil, err + } + ts.Domain, err = transformTerm(t, ts.Domain) + if err != nil { + return nil, err + } + ts.Body, err = transformBody(t, ts.Body) + if err != nil { + return nil, err + } + y.Terms = ts + } + for i, w := range y.With { + w, err := Transform(t, w) + if err != nil { + return nil, err + } + if y.With[i], ok = w.(*With); !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", y.With[i], w) + } + } + return y, nil + case *With: + if y.Target, err = transformTerm(t, y.Target); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + return y, nil + case Ref: + for i, term := range y { + if y[i], err = transformTerm(t, term); err != nil { + return nil, err + } + } + return y, nil + case *object: + return y.Map(func(k, v *Term) (*Term, *Term, error) { + k, err := transformTerm(t, k) + if err != nil { + return nil, nil, err + } + v, err = transformTerm(t, v) + if err != nil { + return nil, nil, err + } + return k, v, nil + }) + case *Array: + for i := 0; i < y.Len(); i++ { + v, err := transformTerm(t, y.Elem(i)) + if err != nil { + return nil, err + } + y.set(i, v) + } + return y, nil + case Set: + y, err = y.Map(func(term *Term) (*Term, error) { + return transformTerm(t, term) + }) + if err != nil { + return nil, err + } + return y, nil + case *ArrayComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *ObjectComprehension: + if y.Key, err = transformTerm(t, y.Key); err != nil { + return nil, err + } + if y.Value, err = transformTerm(t, y.Value); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case *SetComprehension: + if y.Term, err = transformTerm(t, y.Term); err != nil { + return nil, err + } + if y.Body, err = transformBody(t, y.Body); err != nil { + return nil, err + } + return y, nil + case Call: + for i := range y { + if y[i], err = transformTerm(t, y[i]); err != nil { + return nil, err + } + } + return y, nil + default: + return y, nil + } +} + +// TransformRefs calls the function f on all references under x. +func TransformRefs(x interface{}, f func(Ref) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + if r, ok := x.(Ref); ok { + return f(r) + } + return x, nil + }} + return Transform(t, x) +} + +// TransformVars calls the function f on all vars under x. +func TransformVars(x interface{}, f func(Var) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + if v, ok := x.(Var); ok { + return f(v) + } + return x, nil + }} + return Transform(t, x) +} + +// TransformComprehensions calls the functio nf on all comprehensions under x. +func TransformComprehensions(x interface{}, f func(interface{}) (Value, error)) (interface{}, error) { + t := &GenericTransformer{func(x interface{}) (interface{}, error) { + switch x := x.(type) { + case *ArrayComprehension: + return f(x) + case *SetComprehension: + return f(x) + case *ObjectComprehension: + return f(x) + } + return x, nil + }} + return Transform(t, x) +} + +// GenericTransformer implements the Transformer interface to provide a utility +// to transform AST nodes using a closure. +type GenericTransformer struct { + f func(interface{}) (interface{}, error) +} + +// NewGenericTransformer returns a new GenericTransformer that will transform +// AST nodes using the function f. +func NewGenericTransformer(f func(x interface{}) (interface{}, error)) *GenericTransformer { + return &GenericTransformer{ + f: f, + } +} + +// Transform calls the function f on the GenericTransformer. +func (t *GenericTransformer) Transform(x interface{}) (interface{}, error) { + return t.f(x) +} + +func transformHead(t Transformer, head *Head) (*Head, error) { + y, err := Transform(t, head) + if err != nil { + return nil, err + } + h, ok := y.(*Head) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", head, y) + } + return h, nil +} + +func transformArgs(t Transformer, args Args) (Args, error) { + y, err := Transform(t, args) + if err != nil { + return nil, err + } + a, ok := y.(Args) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", args, y) + } + return a, nil +} + +func transformBody(t Transformer, body Body) (Body, error) { + y, err := Transform(t, body) + if err != nil { + return nil, err + } + r, ok := y.(Body) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", body, y) + } + return r, nil +} + +func transformTerm(t Transformer, term *Term) (*Term, error) { + v, err := transformValue(t, term.Value) + if err != nil { + return nil, err + } + r := &Term{ + Value: v, + Location: term.Location, + } + return r, nil +} + +func transformValue(t Transformer, v Value) (Value, error) { + v1, err := Transform(t, v) + if err != nil { + return nil, err + } + r, ok := v1.(Value) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", v, v1) + } + return r, nil +} + +func transformVar(t Transformer, v Var) (Var, error) { + v1, err := Transform(t, v) + if err != nil { + return "", err + } + r, ok := v1.(Var) + if !ok { + return "", fmt.Errorf("illegal transform: %T != %T", v, v1) + } + return r, nil +} + +func transformRef(t Transformer, r Ref) (Ref, error) { + r1, err := Transform(t, r) + if err != nil { + return nil, err + } + r2, ok := r1.(Ref) + if !ok { + return nil, fmt.Errorf("illegal transform: %T != %T", r, r2) + } + return r2, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go new file mode 100644 index 000000000..60244974a --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/unify.go @@ -0,0 +1,235 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +func isRefSafe(ref Ref, safe VarSet) bool { + switch head := ref[0].Value.(type) { + case Var: + return safe.Contains(head) + case Call: + return isCallSafe(head, safe) + default: + for v := range ref[0].Vars() { + if !safe.Contains(v) { + return false + } + } + return true + } +} + +func isCallSafe(call Call, safe VarSet) bool { + vis := NewVarVisitor().WithParams(SafetyCheckVisitorParams) + vis.Walk(call) + unsafe := vis.Vars().Diff(safe) + return len(unsafe) == 0 +} + +// Unify returns a set of variables that will be unified when the equality expression defined by +// terms a and b is evaluated. The unifier assumes that variables in the VarSet safe are already +// unified. +func Unify(safe VarSet, a *Term, b *Term) VarSet { + u := &unifier{ + safe: safe, + unified: VarSet{}, + unknown: map[Var]VarSet{}, + } + u.unify(a, b) + return u.unified +} + +type unifier struct { + safe VarSet + unified VarSet + unknown map[Var]VarSet +} + +func (u *unifier) isSafe(x Var) bool { + return u.safe.Contains(x) || u.unified.Contains(x) +} + +func (u *unifier) unify(a *Term, b *Term) { + + switch a := a.Value.(type) { + + case Var: + switch b := b.Value.(type) { + case Var: + if u.isSafe(b) { + u.markSafe(a) + } else if u.isSafe(a) { + u.markSafe(b) + } else { + u.markUnknown(a, b) + u.markUnknown(b, a) + } + case *Array, Object: + u.unifyAll(a, b) + case Ref: + if isRefSafe(b, u.safe) { + u.markSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markSafe(a) + } + default: + u.markSafe(a) + } + + case Ref: + if isRefSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case Call: + if isCallSafe(a, u.safe) { + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array, Object: + u.markAllSafe(b) + } + } + + case *ArrayComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *Array: + u.markAllSafe(b) + } + case *ObjectComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + case *object: + u.markAllSafe(b) + } + case *SetComprehension: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + + case *Array: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + u.markAllSafe(a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *Array: + if a.Len() == b.Len() { + for i := 0; i < a.Len(); i++ { + u.unify(a.Elem(i), b.Elem(i)) + } + } + } + + case *object: + switch b := b.Value.(type) { + case Var: + u.unifyAll(b, a) + case Ref: + if isRefSafe(b, u.safe) { + u.markAllSafe(a) + } + case Call: + if isCallSafe(b, u.safe) { + u.markAllSafe(a) + } + case *object: + if a.Len() == b.Len() { + _ = a.Iter(func(k, v *Term) error { + if v2 := b.Get(k); v2 != nil { + u.unify(v, v2) + } + return nil + }) // impossible to return error + } + } + + default: + switch b := b.Value.(type) { + case Var: + u.markSafe(b) + } + } +} + +func (u *unifier) markAllSafe(x Value) { + vis := u.varVisitor() + vis.Walk(x) + for v := range vis.Vars() { + u.markSafe(v) + } +} + +func (u *unifier) markSafe(x Var) { + u.unified.Add(x) + + // Add dependencies of 'x' to safe set + vs := u.unknown[x] + delete(u.unknown, x) + for v := range vs { + u.markSafe(v) + } + + // Add dependants of 'x' to safe set if they have no more + // dependencies. + for v, deps := range u.unknown { + if deps.Contains(x) { + delete(deps, x) + if len(deps) == 0 { + u.markSafe(v) + } + } + } +} + +func (u *unifier) markUnknown(a, b Var) { + if _, ok := u.unknown[a]; !ok { + u.unknown[a] = NewVarSet() + } + u.unknown[a].Add(b) +} + +func (u *unifier) unifyAll(a Var, b Value) { + if u.isSafe(a) { + u.markAllSafe(b) + } else { + vis := u.varVisitor() + vis.Walk(b) + unsafe := vis.Vars().Diff(u.safe).Diff(u.unified) + if len(unsafe) == 0 { + u.markSafe(a) + } else { + for v := range unsafe { + u.markUnknown(a, v) + } + } + } +} + +func (u *unifier) varVisitor() *VarVisitor { + return NewVarVisitor().WithParams(VarVisitorParams{ + SkipRefHead: true, + SkipObjectKeys: true, + SkipClosures: true, + }) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go new file mode 100644 index 000000000..d51abbdae --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/varset.go @@ -0,0 +1,95 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +import ( + "fmt" + "slices" + + "github.com/open-policy-agent/opa/v1/util" +) + +// VarSet represents a set of variables. +type VarSet map[Var]struct{} + +// NewVarSet returns a new VarSet containing the specified variables. +func NewVarSet(vs ...Var) VarSet { + s := VarSet{} + for _, v := range vs { + s.Add(v) + } + return s +} + +// Add updates the set to include the variable "v". +func (s VarSet) Add(v Var) { + s[v] = struct{}{} +} + +// Contains returns true if the set contains the variable "v". +func (s VarSet) Contains(v Var) bool { + _, ok := s[v] + return ok +} + +// Copy returns a shallow copy of the VarSet. +func (s VarSet) Copy() VarSet { + cpy := VarSet{} + for v := range s { + cpy.Add(v) + } + return cpy +} + +// Diff returns a VarSet containing variables in s that are not in vs. +func (s VarSet) Diff(vs VarSet) VarSet { + r := VarSet{} + for v := range s { + if !vs.Contains(v) { + r.Add(v) + } + } + return r +} + +// Equal returns true if s contains exactly the same elements as vs. +func (s VarSet) Equal(vs VarSet) bool { + if len(s.Diff(vs)) > 0 { + return false + } + return len(vs.Diff(s)) == 0 +} + +// Intersect returns a VarSet containing variables in s that are in vs. +func (s VarSet) Intersect(vs VarSet) VarSet { + r := VarSet{} + for v := range s { + if vs.Contains(v) { + r.Add(v) + } + } + return r +} + +// Sorted returns a sorted slice of vars from s. +func (s VarSet) Sorted() []Var { + sorted := make([]Var, 0, len(s)) + for v := range s { + sorted = append(sorted, v) + } + slices.SortFunc(sorted, VarCompare) + return sorted +} + +// Update merges the other VarSet into this VarSet. +func (s VarSet) Update(vs VarSet) { + for v := range vs { + s.Add(v) + } +} + +func (s VarSet) String() string { + return fmt.Sprintf("%v", util.KeysSorted(s)) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ast/version_index.json b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json similarity index 99% rename from constraint/vendor/github.com/open-policy-agent/opa/ast/version_index.json rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json index 718df220f..b888b3e02 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ast/version_index.json +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/version_index.json @@ -1395,6 +1395,13 @@ } }, "features": { + "rego_v1": { + "Major": 1, + "Minor": 0, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "rego_v1_import": { "Major": 0, "Minor": 59, diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go new file mode 100644 index 000000000..91cfa208e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ast/visit.go @@ -0,0 +1,783 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package ast + +// Visitor defines the interface for iterating AST elements. The Visit function +// can return a Visitor w which will be used to visit the children of the AST +// element v. If the Visit function returns nil, the children will not be +// visited. +// Deprecated: use GenericVisitor or another visitor implementation +type Visitor interface { + Visit(v interface{}) (w Visitor) +} + +// BeforeAndAfterVisitor wraps Visitor to provide hooks for being called before +// and after the AST has been visited. +// Deprecated: use GenericVisitor or another visitor implementation +type BeforeAndAfterVisitor interface { + Visitor + Before(x interface{}) + After(x interface{}) +} + +// Walk iterates the AST by calling the Visit function on the Visitor +// v for x before recursing. +// Deprecated: use GenericVisitor.Walk +func Walk(v Visitor, x interface{}) { + if bav, ok := v.(BeforeAndAfterVisitor); !ok { + walk(v, x) + } else { + bav.Before(x) + defer bav.After(x) + walk(bav, x) + } +} + +// WalkBeforeAndAfter iterates the AST by calling the Visit function on the +// Visitor v for x before recursing. +// Deprecated: use GenericVisitor.Walk +func WalkBeforeAndAfter(v BeforeAndAfterVisitor, x interface{}) { + Walk(v, x) +} + +func walk(v Visitor, x interface{}) { + w := v.Visit(x) + if w == nil { + return + } + switch x := x.(type) { + case *Module: + Walk(w, x.Package) + for i := range x.Imports { + Walk(w, x.Imports[i]) + } + for i := range x.Rules { + Walk(w, x.Rules[i]) + } + for i := range x.Annotations { + Walk(w, x.Annotations[i]) + } + for i := range x.Comments { + Walk(w, x.Comments[i]) + } + case *Package: + Walk(w, x.Path) + case *Import: + Walk(w, x.Path) + Walk(w, x.Alias) + case *Rule: + Walk(w, x.Head) + Walk(w, x.Body) + if x.Else != nil { + Walk(w, x.Else) + } + case *Head: + Walk(w, x.Name) + Walk(w, x.Args) + if x.Key != nil { + Walk(w, x.Key) + } + if x.Value != nil { + Walk(w, x.Value) + } + case Body: + for i := range x { + Walk(w, x[i]) + } + case Args: + for i := range x { + Walk(w, x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + Walk(w, ts) + case []*Term: + for i := range ts { + Walk(w, ts[i]) + } + } + for i := range x.With { + Walk(w, x.With[i]) + } + case *With: + Walk(w, x.Target) + Walk(w, x.Value) + case *Term: + Walk(w, x.Value) + case Ref: + for i := range x { + Walk(w, x[i]) + } + case *object: + x.Foreach(func(k, vv *Term) { + Walk(w, k) + Walk(w, vv) + }) + case *Array: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case Set: + x.Foreach(func(t *Term) { + Walk(w, t) + }) + case *ArrayComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case *ObjectComprehension: + Walk(w, x.Key) + Walk(w, x.Value) + Walk(w, x.Body) + case *SetComprehension: + Walk(w, x.Term) + Walk(w, x.Body) + case Call: + for i := range x { + Walk(w, x[i]) + } + case *Every: + if x.Key != nil { + Walk(w, x.Key) + } + Walk(w, x.Value) + Walk(w, x.Domain) + Walk(w, x.Body) + case *SomeDecl: + for i := range x.Symbols { + Walk(w, x.Symbols[i]) + } + } +} + +// WalkVars calls the function f on all vars under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkVars(x interface{}, f func(Var) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if v, ok := x.(Var); ok { + return f(v) + } + return false + }} + vis.Walk(x) +} + +// WalkClosures calls the function f on all closures under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkClosures(x interface{}, f func(interface{}) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + switch x := x.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension, *Every: + return f(x) + } + return false + }} + vis.Walk(x) +} + +// WalkRefs calls the function f on all references under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRefs(x interface{}, f func(Ref) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(Ref); ok { + return f(r) + } + return false + }} + vis.Walk(x) +} + +// WalkTerms calls the function f on all terms under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkTerms(x interface{}, f func(*Term) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if term, ok := x.(*Term); ok { + return f(term) + } + return false + }} + vis.Walk(x) +} + +// WalkWiths calls the function f on all with modifiers under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkWiths(x interface{}, f func(*With) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if w, ok := x.(*With); ok { + return f(w) + } + return false + }} + vis.Walk(x) +} + +// WalkExprs calls the function f on all expressions under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkExprs(x interface{}, f func(*Expr) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(*Expr); ok { + return f(r) + } + return false + }} + vis.Walk(x) +} + +// WalkBodies calls the function f on all bodies under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkBodies(x interface{}, f func(Body) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if b, ok := x.(Body); ok { + return f(b) + } + return false + }} + vis.Walk(x) +} + +// WalkRules calls the function f on all rules under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkRules(x interface{}, f func(*Rule) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if r, ok := x.(*Rule); ok { + stop := f(r) + // NOTE(tsandall): since rules cannot be embedded inside of queries + // we can stop early if there is no else block. + if stop || r.Else == nil { + return true + } + } + return false + }} + vis.Walk(x) +} + +// WalkNodes calls the function f on all nodes under x. If the function f +// returns true, AST nodes under the last node will not be visited. +func WalkNodes(x interface{}, f func(Node) bool) { + vis := &GenericVisitor{func(x interface{}) bool { + if n, ok := x.(Node); ok { + return f(n) + } + return false + }} + vis.Walk(x) +} + +// GenericVisitor provides a utility to walk over AST nodes using a +// closure. If the closure returns true, the visitor will not walk +// over AST nodes under x. +type GenericVisitor struct { + f func(x interface{}) bool +} + +// NewGenericVisitor returns a new GenericVisitor that will invoke the function +// f on AST nodes. +func NewGenericVisitor(f func(x interface{}) bool) *GenericVisitor { + return &GenericVisitor{f} +} + +// Walk iterates the AST by calling the function f on the +// GenericVisitor before recursing. Contrary to the generic Walk, this +// does not require allocating the visitor from heap. +func (vis *GenericVisitor) Walk(x interface{}) { + if vis.f(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + vis.Walk(x.Name) + vis.Walk(x.Args) + if x.Key != nil { + vis.Walk(x.Key) + } + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + for _, k := range x.Keys() { + vis.Walk(k) + vis.Walk(x.Get(k)) + } + case *Array: + for i := 0; i < x.Len(); i++ { + vis.Walk(x.Elem(i)) + } + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} + +// BeforeAfterVisitor provides a utility to walk over AST nodes using +// closures. If the before closure returns true, the visitor will not +// walk over AST nodes under x. The after closure is invoked always +// after visiting a node. +type BeforeAfterVisitor struct { + before func(x interface{}) bool + after func(x interface{}) +} + +// NewBeforeAfterVisitor returns a new BeforeAndAfterVisitor that +// will invoke the functions before and after AST nodes. +func NewBeforeAfterVisitor(before func(x interface{}) bool, after func(x interface{})) *BeforeAfterVisitor { + return &BeforeAfterVisitor{before, after} +} + +// Walk iterates the AST by calling the functions on the +// BeforeAndAfterVisitor before and after recursing. Contrary to the +// generic Walk, this does not require allocating the visitor from +// heap. +func (vis *BeforeAfterVisitor) Walk(x interface{}) { + defer vis.after(x) + if vis.before(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Annotations { + vis.Walk(x.Annotations[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.Walk(x.Reference) + } else { + vis.Walk(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.Walk(x.Args) + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case Object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} + +// VarVisitor walks AST nodes under a given node and collects all encountered +// variables. The collected variables can be controlled by specifying +// VarVisitorParams when creating the visitor. +type VarVisitor struct { + params VarVisitorParams + vars VarSet +} + +// VarVisitorParams contains settings for a VarVisitor. +type VarVisitorParams struct { + SkipRefHead bool + SkipRefCallHead bool + SkipObjectKeys bool + SkipClosures bool + SkipWithTarget bool + SkipSets bool +} + +// NewVarVisitor returns a new VarVisitor object. +func NewVarVisitor() *VarVisitor { + return &VarVisitor{ + vars: NewVarSet(), + } +} + +// WithParams sets the parameters in params on vis. +func (vis *VarVisitor) WithParams(params VarVisitorParams) *VarVisitor { + vis.params = params + return vis +} + +// Vars returns a VarSet that contains collected vars. +func (vis *VarVisitor) Vars() VarSet { + return vis.vars +} + +// visit determines if the VarVisitor will recurse into x: if it returns `true`, +// the visitor will _skip_ that branch of the AST +func (vis *VarVisitor) visit(v interface{}) bool { + if vis.params.SkipObjectKeys { + if o, ok := v.(Object); ok { + o.Foreach(func(_, v *Term) { + vis.Walk(v) + }) + return true + } + } + if vis.params.SkipRefHead { + if r, ok := v.(Ref); ok { + rSlice := r[1:] + for i := range rSlice { + vis.Walk(rSlice[i]) + } + return true + } + } + if vis.params.SkipClosures { + switch v := v.(type) { + case *ArrayComprehension, *ObjectComprehension, *SetComprehension: + return true + case *Expr: + if ev, ok := v.Terms.(*Every); ok { + vis.Walk(ev.Domain) + // We're _not_ walking ev.Body -- that's the closure here + return true + } + } + } + if vis.params.SkipWithTarget { + if v, ok := v.(*With); ok { + vis.Walk(v.Value) + return true + } + } + if vis.params.SkipSets { + if _, ok := v.(Set); ok { + return true + } + } + if vis.params.SkipRefCallHead { + switch v := v.(type) { + case *Expr: + if terms, ok := v.Terms.([]*Term); ok { + termSlice := terms[0].Value.(Ref)[1:] + for i := range termSlice { + vis.Walk(termSlice[i]) + } + for i := 1; i < len(terms); i++ { + vis.Walk(terms[i]) + } + for i := range v.With { + vis.Walk(v.With[i]) + } + return true + } + case Call: + operator := v[0].Value.(Ref) + for i := 1; i < len(operator); i++ { + vis.Walk(operator[i]) + } + for i := 1; i < len(v); i++ { + vis.Walk(v[i]) + } + return true + case *With: + if ref, ok := v.Target.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } + if ref, ok := v.Value.Value.(Ref); ok { + refSlice := ref[1:] + for i := range refSlice { + vis.Walk(refSlice[i]) + } + } else { + vis.Walk(v.Value) + } + return true + } + } + if v, ok := v.(Var); ok { + vis.vars.Add(v) + } + return false +} + +// Walk iterates the AST by calling the function f on the +// GenericVisitor before recursing. Contrary to the generic Walk, this +// does not require allocating the visitor from heap. +func (vis *VarVisitor) Walk(x interface{}) { + if vis.visit(x) { + return + } + + switch x := x.(type) { + case *Module: + vis.Walk(x.Package) + for i := range x.Imports { + vis.Walk(x.Imports[i]) + } + for i := range x.Rules { + vis.Walk(x.Rules[i]) + } + for i := range x.Comments { + vis.Walk(x.Comments[i]) + } + case *Package: + vis.Walk(x.Path) + case *Import: + vis.Walk(x.Path) + vis.Walk(x.Alias) + case *Rule: + vis.Walk(x.Head) + vis.Walk(x.Body) + if x.Else != nil { + vis.Walk(x.Else) + } + case *Head: + if len(x.Reference) > 0 { + vis.Walk(x.Reference) + } else { + vis.Walk(x.Name) + if x.Key != nil { + vis.Walk(x.Key) + } + } + vis.Walk(x.Args) + + if x.Value != nil { + vis.Walk(x.Value) + } + case Body: + for i := range x { + vis.Walk(x[i]) + } + case Args: + for i := range x { + vis.Walk(x[i]) + } + case *Expr: + switch ts := x.Terms.(type) { + case *Term, *SomeDecl, *Every: + vis.Walk(ts) + case []*Term: + for i := range ts { + vis.Walk(ts[i]) + } + } + for i := range x.With { + vis.Walk(x.With[i]) + } + case *With: + vis.Walk(x.Target) + vis.Walk(x.Value) + case *Term: + vis.Walk(x.Value) + case Ref: + for i := range x { + vis.Walk(x[i]) + } + case *object: + x.Foreach(func(k, _ *Term) { + vis.Walk(k) + vis.Walk(x.Get(k)) + }) + case *Array: + x.Foreach(func(t *Term) { + vis.Walk(t) + }) + case Set: + xSlice := x.Slice() + for i := range xSlice { + vis.Walk(xSlice[i]) + } + case *ArrayComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case *ObjectComprehension: + vis.Walk(x.Key) + vis.Walk(x.Value) + vis.Walk(x.Body) + case *SetComprehension: + vis.Walk(x.Term) + vis.Walk(x.Body) + case Call: + for i := range x { + vis.Walk(x[i]) + } + case *Every: + if x.Key != nil { + vis.Walk(x.Key) + } + vis.Walk(x.Value) + vis.Walk(x.Domain) + vis.Walk(x.Body) + case *SomeDecl: + for i := range x.Symbols { + vis.Walk(x.Symbols[i]) + } + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go new file mode 100644 index 000000000..12f8bfb32 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/bundle.go @@ -0,0 +1,1774 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle implements bundle loading. +package bundle + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "strings" + + "github.com/gobwas/glob" + "github.com/open-policy-agent/opa/internal/file/archive" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/format" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/util" +) + +// Common file extensions and file names. +const ( + RegoExt = ".rego" + WasmFile = "policy.wasm" + PlanFile = "plan.json" + ManifestExt = ".manifest" + SignaturesFile = "signatures.json" + patchFile = "patch.json" + dataFile = "data.json" + yamlDataFile = "data.yaml" + ymlDataFile = "data.yml" + defaultHashingAlg = "SHA-256" + DefaultSizeLimitBytes = (1024 * 1024 * 1024) // limit bundle reads to 1GB to protect against gzip bombs + DeltaBundleType = "delta" + SnapshotBundleType = "snapshot" +) + +// Bundle represents a loaded bundle. The bundle can contain data and policies. +type Bundle struct { + Signatures SignaturesConfig + Manifest Manifest + Data map[string]interface{} + Modules []ModuleFile + Wasm []byte // Deprecated. Use WasmModules instead + WasmModules []WasmModuleFile + PlanModules []PlanModuleFile + Patch Patch + Etag string + Raw []Raw + + lazyLoadingMode bool + sizeLimitBytes int64 +} + +// Raw contains raw bytes representing the bundle's content +type Raw struct { + Path string + Value []byte + module *ModuleFile +} + +// Patch contains an array of objects wherein each object represents the patch operation to be +// applied to the bundle data. +type Patch struct { + Data []PatchOperation `json:"data,omitempty"` +} + +// PatchOperation models a single patch operation against a document. +type PatchOperation struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +// SignaturesConfig represents an array of JWTs that encapsulate the signatures for the bundle. +type SignaturesConfig struct { + Signatures []string `json:"signatures,omitempty"` + Plugin string `json:"plugin,omitempty"` +} + +// isEmpty returns if the SignaturesConfig is empty. +func (s SignaturesConfig) isEmpty() bool { + return reflect.DeepEqual(s, SignaturesConfig{}) +} + +// DecodedSignature represents the decoded JWT payload. +type DecodedSignature struct { + Files []FileInfo `json:"files"` + KeyID string `json:"keyid"` // Deprecated, use kid in the JWT header instead. + Scope string `json:"scope"` + IssuedAt int64 `json:"iat"` + Issuer string `json:"iss"` +} + +// FileInfo contains the hashing algorithm used, resulting digest etc. +type FileInfo struct { + Name string `json:"name"` + Hash string `json:"hash"` + Algorithm string `json:"algorithm"` +} + +// NewFile returns a new FileInfo. +func NewFile(name, hash, alg string) FileInfo { + return FileInfo{ + Name: name, + Hash: hash, + Algorithm: alg, + } +} + +// Manifest represents the manifest from a bundle. The manifest may contain +// metadata such as the bundle revision. +type Manifest struct { + Revision string `json:"revision"` + Roots *[]string `json:"roots,omitempty"` + WasmResolvers []WasmResolver `json:"wasm,omitempty"` + // RegoVersion is the global Rego version for the bundle described by this Manifest. + // The Rego version of individual files can be overridden in FileRegoVersions. + // We don't use ast.RegoVersion here, as this iota type's order isn't guaranteed to be stable over time. + // We use a pointer so that we can support hand-made bundles that don't have an explicit version appropriately. + // E.g. in OPA 0.x if --v1-compatible is used when consuming the bundle, and there is no specified version, + // we should default to v1; if --v1-compatible isn't used, we should default to v0. In OPA 1.0, no --x-compatible + // flag and no explicit bundle version should default to v1. + RegoVersion *int `json:"rego_version,omitempty"` + // FileRegoVersions is a map from file paths to Rego versions. + // This allows individual files to override the global Rego version specified by RegoVersion. + FileRegoVersions map[string]int `json:"file_rego_versions,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + + compiledFileRegoVersions []fileRegoVersion +} + +type fileRegoVersion struct { + path glob.Glob + version int +} + +// WasmResolver maps a wasm module to an entrypoint ref. +type WasmResolver struct { + Entrypoint string `json:"entrypoint,omitempty"` + Module string `json:"module,omitempty"` + Annotations []*ast.Annotations `json:"annotations,omitempty"` +} + +// Init initializes the manifest. If you instantiate a manifest +// manually, call Init to ensure that the roots are set properly. +func (m *Manifest) Init() { + if m.Roots == nil { + defaultRoots := []string{""} + m.Roots = &defaultRoots + } +} + +// AddRoot adds r to the roots of m. This function is idempotent. +func (m *Manifest) AddRoot(r string) { + m.Init() + if !RootPathsContain(*m.Roots, r) { + *m.Roots = append(*m.Roots, r) + } +} + +func (m *Manifest) SetRegoVersion(v ast.RegoVersion) { + m.Init() + regoVersion := 0 + if v == ast.RegoV1 { + regoVersion = 1 + } + m.RegoVersion = ®oVersion +} + +// Equal returns true if m is semantically equivalent to other. +func (m Manifest) Equal(other Manifest) bool { + + // This is safe since both are passed by value. + m.Init() + other.Init() + + if m.Revision != other.Revision { + return false + } + + if m.RegoVersion == nil && other.RegoVersion != nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion == nil { + return false + } + if m.RegoVersion != nil && other.RegoVersion != nil && *m.RegoVersion != *other.RegoVersion { + return false + } + + // If both are nil, or both are empty, we consider them equal. + if !(len(m.FileRegoVersions) == 0 && len(other.FileRegoVersions) == 0) && + !reflect.DeepEqual(m.FileRegoVersions, other.FileRegoVersions) { + return false + } + + if !reflect.DeepEqual(m.Metadata, other.Metadata) { + return false + } + + return m.equalWasmResolversAndRoots(other) +} + +func (m Manifest) Empty() bool { + return m.Equal(Manifest{}) +} + +// Copy returns a deep copy of the manifest. +func (m Manifest) Copy() Manifest { + m.Init() + roots := make([]string, len(*m.Roots)) + copy(roots, *m.Roots) + m.Roots = &roots + + wasmModules := make([]WasmResolver, len(m.WasmResolvers)) + copy(wasmModules, m.WasmResolvers) + m.WasmResolvers = wasmModules + + metadata := m.Metadata + + if metadata != nil { + m.Metadata = make(map[string]interface{}) + for k, v := range metadata { + m.Metadata[k] = v + } + } + + return m +} + +func (m Manifest) String() string { + m.Init() + if m.RegoVersion != nil { + return fmt.Sprintf("", + m.Revision, *m.RegoVersion, *m.Roots, m.WasmResolvers, m.Metadata) + } + return fmt.Sprintf("", + m.Revision, *m.Roots, m.WasmResolvers, m.Metadata) +} + +func (m Manifest) rootSet() stringSet { + rs := map[string]struct{}{} + + for _, r := range *m.Roots { + rs[r] = struct{}{} + } + + return stringSet(rs) +} + +func (m Manifest) equalWasmResolversAndRoots(other Manifest) bool { + if len(m.WasmResolvers) != len(other.WasmResolvers) { + return false + } + + for i := 0; i < len(m.WasmResolvers); i++ { + if !m.WasmResolvers[i].Equal(&other.WasmResolvers[i]) { + return false + } + } + + return m.rootSet().Equal(other.rootSet()) +} + +func (wr *WasmResolver) Equal(other *WasmResolver) bool { + if wr == nil && other == nil { + return true + } + + if wr == nil || other == nil { + return false + } + + if wr.Module != other.Module { + return false + } + + if wr.Entrypoint != other.Entrypoint { + return false + } + + annotLen := len(wr.Annotations) + if annotLen != len(other.Annotations) { + return false + } + + for i := 0; i < annotLen; i++ { + if wr.Annotations[i].Compare(other.Annotations[i]) != 0 { + return false + } + } + + return true +} + +type stringSet map[string]struct{} + +func (ss stringSet) Equal(other stringSet) bool { + if len(ss) != len(other) { + return false + } + for k := range other { + if _, ok := ss[k]; !ok { + return false + } + } + return true +} + +func (m *Manifest) validateAndInjectDefaults(b Bundle) error { + + m.Init() + + // Validate roots in bundle. + roots := *m.Roots + + // Standardize the roots (no starting or trailing slash) + for i := range roots { + roots[i] = strings.Trim(roots[i], "/") + } + + for i := 0; i < len(roots)-1; i++ { + for j := i + 1; j < len(roots); j++ { + if RootPathsOverlap(roots[i], roots[j]) { + return fmt.Errorf("manifest has overlapped roots: '%v' and '%v'", roots[i], roots[j]) + } + } + } + + // Validate modules in bundle. + for _, module := range b.Modules { + found := false + if path, err := module.Parsed.Package.Path.Ptr(); err == nil { + found = RootPathsContain(roots, path) + } + if !found { + return fmt.Errorf("manifest roots %v do not permit '%v' in module '%v'", roots, module.Parsed.Package, module.Path) + } + } + + // Build a set of wasm module entrypoints to validate + wasmModuleToEps := map[string]string{} + seenEps := map[string]struct{}{} + for _, wm := range b.WasmModules { + wasmModuleToEps[wm.Path] = "" + } + + for _, wmConfig := range b.Manifest.WasmResolvers { + _, ok := wasmModuleToEps[wmConfig.Module] + if !ok { + return fmt.Errorf("manifest references wasm module '%s' but the module file does not exist", wmConfig.Module) + } + + // Ensure wasm module entrypoint in within bundle roots + if !RootPathsContain(roots, wmConfig.Entrypoint) { + return fmt.Errorf("manifest roots %v do not permit '%v' entrypoint for wasm module '%v'", roots, wmConfig.Entrypoint, wmConfig.Module) + } + + if _, ok := seenEps[wmConfig.Entrypoint]; ok { + return fmt.Errorf("entrypoint '%s' cannot be used by more than one wasm module", wmConfig.Entrypoint) + } + seenEps[wmConfig.Entrypoint] = struct{}{} + + wasmModuleToEps[wmConfig.Module] = wmConfig.Entrypoint + } + + // Validate data patches in bundle. + for _, patch := range b.Patch.Data { + path := strings.Trim(patch.Path, "/") + if !RootPathsContain(roots, path) { + return fmt.Errorf("manifest roots %v do not permit data patch at path '%s'", roots, path) + } + } + + if b.lazyLoadingMode { + return nil + } + + // Validate data in bundle. + return dfs(b.Data, "", func(path string, node interface{}) (bool, error) { + path = strings.Trim(path, "/") + if RootPathsContain(roots, path) { + return true, nil + } + + if _, ok := node.(map[string]interface{}); ok { + for i := range roots { + if RootPathsContain(strings.Split(path, "/"), roots[i]) { + return false, nil + } + } + } + return false, fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, path) + }) +} + +// ModuleFile represents a single module contained in a bundle. +type ModuleFile struct { + URL string + Path string + RelativePath string + Raw []byte + Parsed *ast.Module +} + +// WasmModuleFile represents a single wasm module contained in a bundle. +type WasmModuleFile struct { + URL string + Path string + Entrypoints []ast.Ref + Raw []byte +} + +// PlanModuleFile represents a single plan module contained in a bundle. +// +// NOTE(tsandall): currently the plans are just opaque binary blobs. In the +// future we could inject the entrypoints so that the plans could be executed +// inside of OPA proper like we do for Wasm modules. +type PlanModuleFile struct { + URL string + Path string + Raw []byte +} + +// Reader contains the reader to load the bundle from. +type Reader struct { + loader DirectoryLoader + includeManifestInData bool + metrics metrics.Metrics + baseDir string + verificationConfig *VerificationConfig + skipVerify bool + processAnnotations bool + capabilities *ast.Capabilities + files map[string]FileInfo // files in the bundle signature payload + sizeLimitBytes int64 + etag string + lazyLoadingMode bool + name string + persist bool + regoVersion ast.RegoVersion + followSymlinks bool +} + +// NewReader is deprecated. Use NewCustomReader instead. +func NewReader(r io.Reader) *Reader { + return NewCustomReader(NewTarballLoader(r)) +} + +// NewCustomReader returns a new Reader configured to use the +// specified DirectoryLoader. +func NewCustomReader(loader DirectoryLoader) *Reader { + nr := Reader{ + loader: loader, + metrics: metrics.New(), + files: make(map[string]FileInfo), + sizeLimitBytes: DefaultSizeLimitBytes + 1, + } + return &nr +} + +// IncludeManifestInData sets whether the manifest metadata should be +// included in the bundle's data. +func (r *Reader) IncludeManifestInData(includeManifestInData bool) *Reader { + r.includeManifestInData = includeManifestInData + return r +} + +// WithMetrics sets the metrics object to be used while loading bundles +func (r *Reader) WithMetrics(m metrics.Metrics) *Reader { + r.metrics = m + return r +} + +// WithBaseDir sets a base directory for file paths of loaded Rego +// modules. This will *NOT* affect the loaded path of data files. +func (r *Reader) WithBaseDir(dir string) *Reader { + r.baseDir = dir + return r +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (r *Reader) WithBundleVerificationConfig(config *VerificationConfig) *Reader { + r.verificationConfig = config + return r +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (r *Reader) WithSkipBundleVerification(skipVerify bool) *Reader { + r.skipVerify = skipVerify + return r +} + +// WithProcessAnnotations enables annotation processing during .rego file parsing. +func (r *Reader) WithProcessAnnotations(yes bool) *Reader { + r.processAnnotations = yes + return r +} + +// WithCapabilities sets the supported capabilities when loading the files +func (r *Reader) WithCapabilities(caps *ast.Capabilities) *Reader { + r.capabilities = caps + return r +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (r *Reader) WithJSONOptions(*astJSON.Options) *Reader { + return r +} + +// WithSizeLimitBytes sets the size limit to apply to files in the bundle. If files are larger +// than this, an error will be returned by the reader. +func (r *Reader) WithSizeLimitBytes(n int64) *Reader { + r.sizeLimitBytes = n + 1 + return r +} + +// WithBundleEtag sets the given etag value on the bundle +func (r *Reader) WithBundleEtag(etag string) *Reader { + r.etag = etag + return r +} + +// WithBundleName specifies the bundle name +func (r *Reader) WithBundleName(name string) *Reader { + r.name = name + return r +} + +func (r *Reader) WithFollowSymlinks(yes bool) *Reader { + r.followSymlinks = yes + return r +} + +// WithLazyLoadingMode sets the bundle loading mode. If true, +// bundles will be read in lazy mode. In this mode, data files in the bundle will not be +// deserialized and the check to validate that the bundle data does not contain paths +// outside the bundle's roots will not be performed while reading the bundle. +func (r *Reader) WithLazyLoadingMode(yes bool) *Reader { + r.lazyLoadingMode = yes + return r +} + +// WithBundlePersistence specifies if the downloaded bundle will eventually be persisted to disk. +func (r *Reader) WithBundlePersistence(persist bool) *Reader { + r.persist = persist + return r +} + +func (r *Reader) WithRegoVersion(version ast.RegoVersion) *Reader { + r.regoVersion = version + return r +} + +func (r *Reader) ParserOptions() ast.ParserOptions { + return ast.ParserOptions{ + ProcessAnnotation: r.processAnnotations, + Capabilities: r.capabilities, + RegoVersion: r.regoVersion, + } +} + +// Read returns a new Bundle loaded from the reader. +func (r *Reader) Read() (Bundle, error) { + + var bundle Bundle + var descriptors []*Descriptor + var err error + var raw []Raw + + bundle.Signatures, bundle.Patch, descriptors, err = preProcessBundle(r.loader, r.skipVerify, r.sizeLimitBytes) + if err != nil { + return bundle, err + } + + bundle.lazyLoadingMode = r.lazyLoadingMode + bundle.sizeLimitBytes = r.sizeLimitBytes + + if bundle.Type() == SnapshotBundleType { + err = r.checkSignaturesAndDescriptors(bundle.Signatures) + if err != nil { + return bundle, err + } + + bundle.Data = map[string]interface{}{} + } + + var modules []ModuleFile + for _, f := range descriptors { + buf, err := readFile(f, r.sizeLimitBytes) + if err != nil { + return bundle, err + } + + // verify the file content + if bundle.Type() == SnapshotBundleType && !bundle.Signatures.isEmpty() { + path := f.Path() + if r.baseDir != "" { + path = f.URL() + } + path = strings.TrimPrefix(path, "/") + + // check if the file is to be excluded from bundle verification + if r.isFileExcluded(path) { + delete(r.files, path) + } else { + if err = r.verifyBundleFile(path, buf); err != nil { + return bundle, err + } + } + } + + // Normalize the paths to use `/` separators + path := filepath.ToSlash(f.Path()) + + if strings.HasSuffix(path, RegoExt) { + fullPath := r.fullPath(path) + bs := buf.Bytes() + + // Modules are parsed after we've had a chance to read the manifest + mf := ModuleFile{ + URL: f.URL(), + Path: fullPath, + RelativePath: path, + Raw: bs, + } + modules = append(modules, mf) + + if r.lazyLoadingMode { + p := fullPath + if r.name != "" { + p = modulePathWithPrefix(r.name, fullPath) + } + + raw = append(raw, Raw{Path: p, Value: bs, module: &mf}) + } + } else if filepath.Base(path) == WasmFile { + bundle.WasmModules = append(bundle.WasmModules, WasmModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == PlanFile { + bundle.PlanModules = append(bundle.PlanModules, PlanModuleFile{ + URL: f.URL(), + Path: r.fullPath(path), + Raw: buf.Bytes(), + }) + } else if filepath.Base(path) == dataFile { + if r.lazyLoadingMode { + raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value interface{} + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.UnmarshalJSON(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(&bundle, path, value); err != nil { + return bundle, err + } + + } else if filepath.Base(path) == yamlDataFile || filepath.Base(path) == ymlDataFile { + if r.lazyLoadingMode { + raw = append(raw, Raw{Path: path, Value: buf.Bytes()}) + continue + } + + var value interface{} + + r.metrics.Timer(metrics.RegoDataParse).Start() + err := util.Unmarshal(buf.Bytes(), &value) + r.metrics.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", r.fullPath(path), err) + } + + if err := insertValue(&bundle, path, value); err != nil { + return bundle, err + } + + } else if strings.HasSuffix(path, ManifestExt) { + if err := util.NewJSONDecoder(&buf).Decode(&bundle.Manifest); err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest decode: %w", err) + } + } + } + + // Parse modules + popts := r.ParserOptions() + popts.RegoVersion = bundle.RegoVersion(popts.EffectiveRegoVersion()) + for _, mf := range modules { + modulePopts := popts + if modulePopts.RegoVersion, err = bundle.RegoVersionForFile(mf.RelativePath, popts.EffectiveRegoVersion()); err != nil { + return bundle, err + } + r.metrics.Timer(metrics.RegoModuleParse).Start() + mf.Parsed, err = ast.ParseModuleWithOpts(mf.Path, string(mf.Raw), modulePopts) + r.metrics.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return bundle, err + } + bundle.Modules = append(bundle.Modules, mf) + } + + if bundle.Type() == DeltaBundleType { + if len(bundle.Data) != 0 { + return bundle, fmt.Errorf("delta bundle expected to contain only patch file but data files found") + } + + if len(bundle.Modules) != 0 { + return bundle, fmt.Errorf("delta bundle expected to contain only patch file but policy files found") + } + + if len(bundle.WasmModules) != 0 { + return bundle, fmt.Errorf("delta bundle expected to contain only patch file but wasm files found") + } + + if r.persist { + return bundle, fmt.Errorf("'persist' property is true in config. persisting delta bundle to disk is not supported") + } + } + + // check if the bundle signatures specify any files that weren't found in the bundle + if bundle.Type() == SnapshotBundleType && len(r.files) != 0 { + extra := []string{} + for k := range r.files { + extra = append(extra, k) + } + return bundle, fmt.Errorf("file(s) %v specified in bundle signatures but not found in the target bundle", extra) + } + + if err := bundle.Manifest.validateAndInjectDefaults(bundle); err != nil { + return bundle, err + } + + // Inject the wasm module entrypoint refs into the WasmModuleFile structs + epMap := map[string][]string{} + for _, r := range bundle.Manifest.WasmResolvers { + epMap[r.Module] = append(epMap[r.Module], r.Entrypoint) + } + for i := 0; i < len(bundle.WasmModules); i++ { + entrypoints := epMap[bundle.WasmModules[i].Path] + for _, entrypoint := range entrypoints { + ref, err := ast.PtrRef(ast.DefaultRootDocument, entrypoint) + if err != nil { + return bundle, fmt.Errorf("failed to parse wasm module entrypoint '%s': %s", entrypoint, err) + } + bundle.WasmModules[i].Entrypoints = append(bundle.WasmModules[i].Entrypoints, ref) + } + } + + if r.includeManifestInData { + var metadata map[string]interface{} + + b, err := json.Marshal(&bundle.Manifest) + if err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest marshal: %w", err) + } + + err = util.UnmarshalJSON(b, &metadata) + if err != nil { + return bundle, fmt.Errorf("bundle load failed on manifest unmarshal: %w", err) + } + + // For backwards compatibility always write to the old unnamed manifest path + // This will *not* be correct if >1 bundle is in use... + if err := bundle.insertData(legacyManifestStoragePath, metadata); err != nil { + return bundle, fmt.Errorf("bundle load failed on %v: %w", legacyRevisionStoragePath, err) + } + } + + bundle.Etag = r.etag + bundle.Raw = raw + + return bundle, nil +} + +func (r *Reader) isFileExcluded(path string) bool { + for _, e := range r.verificationConfig.Exclude { + match, _ := filepath.Match(e, path) + if match { + return true + } + } + return false +} + +func (r *Reader) checkSignaturesAndDescriptors(signatures SignaturesConfig) error { + if r.skipVerify { + return nil + } + + if signatures.isEmpty() && r.verificationConfig != nil && r.verificationConfig.KeyID != "" { + return fmt.Errorf("bundle missing .signatures.json file") + } + + if !signatures.isEmpty() { + if r.verificationConfig == nil { + return fmt.Errorf("verification key not provided") + } + + // verify the JWT signatures included in the `.signatures.json` file + if err := r.verifyBundleSignature(signatures); err != nil { + return err + } + } + return nil +} + +func (r *Reader) verifyBundleSignature(sc SignaturesConfig) error { + var err error + r.files, err = VerifyBundleSignature(sc, r.verificationConfig) + return err +} + +func (r *Reader) verifyBundleFile(path string, data bytes.Buffer) error { + return VerifyBundleFile(path, data, r.files) +} + +func (r *Reader) fullPath(path string) string { + if r.baseDir != "" { + path = filepath.Join(r.baseDir, path) + } + return path +} + +// Write is deprecated. Use NewWriter instead. +func Write(w io.Writer, bundle Bundle) error { + return NewWriter(w). + UseModulePath(true). + DisableFormat(true). + Write(bundle) +} + +// Writer implements bundle serialization. +type Writer struct { + usePath bool + disableFormat bool + w io.Writer +} + +// NewWriter returns a bundle writer that writes to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + } +} + +// UseModulePath configures the writer to use the module file path instead of the +// module file URL during serialization. This is for backwards compatibility. +func (w *Writer) UseModulePath(yes bool) *Writer { + w.usePath = yes + return w +} + +// DisableFormat configures the writer to just write out raw bytes instead +// of formatting modules before serialization. +func (w *Writer) DisableFormat(yes bool) *Writer { + w.disableFormat = yes + return w +} + +// Write writes the bundle to the writer's output stream. +func (w *Writer) Write(bundle Bundle) error { + gw := gzip.NewWriter(w.w) + tw := tar.NewWriter(gw) + + bundleType := bundle.Type() + + if bundleType == SnapshotBundleType { + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Data); err != nil { + return err + } + + if err := archive.WriteFile(tw, "data.json", buf.Bytes()); err != nil { + return err + } + + for _, module := range bundle.Modules { + path := module.URL + if w.usePath { + path = module.Path + } + + if err := archive.WriteFile(tw, path, module.Raw); err != nil { + return err + } + } + + if err := w.writeWasm(tw, bundle); err != nil { + return err + } + + if err := writeSignatures(tw, bundle); err != nil { + return err + } + + if err := w.writePlan(tw, bundle); err != nil { + return err + } + } else if bundleType == DeltaBundleType { + if err := writePatch(tw, bundle); err != nil { + return err + } + } + + if err := writeManifest(tw, bundle); err != nil { + return err + } + + if err := tw.Close(); err != nil { + return err + } + + return gw.Close() +} + +func (w *Writer) writeWasm(tw *tar.Writer, bundle Bundle) error { + for _, wm := range bundle.WasmModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + err := archive.WriteFile(tw, path, wm.Raw) + if err != nil { + return err + } + } + + if len(bundle.Wasm) > 0 { + err := archive.WriteFile(tw, "/"+WasmFile, bundle.Wasm) + if err != nil { + return err + } + } + + return nil +} + +func (w *Writer) writePlan(tw *tar.Writer, bundle Bundle) error { + for _, wm := range bundle.PlanModules { + path := wm.URL + if w.usePath { + path = wm.Path + } + + err := archive.WriteFile(tw, path, wm.Raw) + if err != nil { + return err + } + } + + return nil +} + +func writeManifest(tw *tar.Writer, bundle Bundle) error { + + if bundle.Manifest.Empty() { + return nil + } + + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Manifest); err != nil { + return err + } + + return archive.WriteFile(tw, ManifestExt, buf.Bytes()) +} + +func writePatch(tw *tar.Writer, bundle Bundle) error { + + var buf bytes.Buffer + + if err := json.NewEncoder(&buf).Encode(bundle.Patch); err != nil { + return err + } + + return archive.WriteFile(tw, patchFile, buf.Bytes()) +} + +func writeSignatures(tw *tar.Writer, bundle Bundle) error { + + if bundle.Signatures.isEmpty() { + return nil + } + + bs, err := json.MarshalIndent(bundle.Signatures, "", " ") + if err != nil { + return err + } + + return archive.WriteFile(tw, fmt.Sprintf(".%v", SignaturesFile), bs) +} + +func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { + + files := []FileInfo{} + + bs, err := hash.HashFile(b.Data) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix("data.json", "/"), hex.EncodeToString(bs), defaultHashingAlg)) + + if len(b.Wasm) != 0 { + bs, err := hash.HashFile(b.Wasm) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(WasmFile, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, wasmModule := range b.WasmModules { + bs, err := hash.HashFile(wasmModule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(wasmModule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + for _, planmodule := range b.PlanModules { + bs, err := hash.HashFile(planmodule.Raw) + if err != nil { + return files, err + } + files = append(files, NewFile(strings.TrimPrefix(planmodule.Path, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + // If the manifest is essentially empty, don't add it to the signatures since it + // won't be written to the bundle. Otherwise: + // parse the manifest into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. + if !b.Manifest.Empty() { + mbs, err := json.Marshal(b.Manifest) + if err != nil { + return files, err + } + + var result map[string]interface{} + if err := util.Unmarshal(mbs, &result); err != nil { + return files, err + } + + bs, err = hash.HashFile(result) + if err != nil { + return files, err + } + + files = append(files, NewFile(strings.TrimPrefix(ManifestExt, "/"), hex.EncodeToString(bs), defaultHashingAlg)) + } + + return files, err +} + +// FormatModules formats Rego modules +// Modules will be formatted to comply with [ast.DefaultRegoVersion], but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). +func (b *Bundle) FormatModules(useModulePath bool) error { + return b.FormatModulesForRegoVersion(ast.DefaultRegoVersion, true, useModulePath) +} + +// FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version +func (b *Bundle) FormatModulesForRegoVersion(version ast.RegoVersion, preserveModuleRegoVersion bool, useModulePath bool) error { + var err error + + for i, module := range b.Modules { + opts := format.Opts{} + if preserveModuleRegoVersion { + opts.RegoVersion = module.Parsed.RegoVersion() + opts.ParserOptions = &ast.ParserOptions{ + RegoVersion: opts.RegoVersion, + } + } else { + opts.RegoVersion = version + } + + if module.Raw == nil { + module.Raw, err = format.AstWithOpts(module.Parsed, opts) + if err != nil { + return err + } + } else { + path := module.URL + if useModulePath { + path = module.Path + } + + module.Raw, err = format.SourceWithOpts(path, module.Raw, opts) + if err != nil { + return err + } + } + b.Modules[i].Raw = module.Raw + } + return nil +} + +// GenerateSignature generates the signature for the given bundle. +func (b *Bundle) GenerateSignature(signingConfig *SigningConfig, keyID string, useModulePath bool) error { + + hash, err := NewSignatureHasher(HashingAlgorithm(defaultHashingAlg)) + if err != nil { + return err + } + + files := []FileInfo{} + + for _, module := range b.Modules { + bytes, err := hash.HashFile(module.Raw) + if err != nil { + return err + } + + path := module.URL + if useModulePath { + path = module.Path + } + files = append(files, NewFile(strings.TrimPrefix(path, "/"), hex.EncodeToString(bytes), defaultHashingAlg)) + } + + result, err := hashBundleFiles(hash, b) + if err != nil { + return err + } + files = append(files, result...) + + // generate signed token + token, err := GenerateSignedToken(files, signingConfig, keyID) + if err != nil { + return err + } + + if b.Signatures.isEmpty() { + b.Signatures = SignaturesConfig{} + } + + if signingConfig.Plugin != "" { + b.Signatures.Plugin = signingConfig.Plugin + } + + b.Signatures.Signatures = []string{token} + + return nil +} + +// ParsedModules returns a map of parsed modules with names that are +// unique and human readable for the given a bundle name. +func (b *Bundle) ParsedModules(bundleName string) map[string]*ast.Module { + + mods := make(map[string]*ast.Module, len(b.Modules)) + + for _, mf := range b.Modules { + mods[modulePathWithPrefix(bundleName, mf.Path)] = mf.Parsed + } + + return mods +} + +func (b *Bundle) RegoVersion(def ast.RegoVersion) ast.RegoVersion { + if v := b.Manifest.RegoVersion; v != nil { + if *v == 0 { + return ast.RegoV0 + } else if *v == 1 { + return ast.RegoV1 + } + } + return def +} + +func (b *Bundle) SetRegoVersion(v ast.RegoVersion) { + b.Manifest.SetRegoVersion(v) +} + +// RegoVersionForFile returns the rego-version for the specified file path. +// If there is no defined version for the given path, the default version def is returned. +// If the version does not correspond to ast.RegoV0 or ast.RegoV1, an error is returned. +func (b *Bundle) RegoVersionForFile(path string, def ast.RegoVersion) (ast.RegoVersion, error) { + if def == ast.RegoUndefined { + def = ast.DefaultRegoVersion + } + + version, err := b.Manifest.numericRegoVersionForFile(path) + if err != nil { + return def, err + } else if version == nil { + return def, nil + } else if *version == 0 { + return ast.RegoV0, nil + } else if *version == 1 { + return ast.RegoV1, nil + } + return def, fmt.Errorf("unknown bundle rego-version %d for file '%s'", *version, path) +} + +func (m *Manifest) RegoVersionForFile(path string) (ast.RegoVersion, error) { + v, err := m.numericRegoVersionForFile(path) + if err != nil { + return ast.RegoUndefined, err + } + + if v == nil { + return ast.RegoUndefined, nil + } + + return ast.RegoVersionFromInt(*v), nil +} + +func (m *Manifest) numericRegoVersionForFile(path string) (*int, error) { + var version *int + + if len(m.FileRegoVersions) != len(m.compiledFileRegoVersions) { + m.compiledFileRegoVersions = make([]fileRegoVersion, 0, len(m.FileRegoVersions)) + for pattern, v := range m.FileRegoVersions { + compiled, err := glob.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("failed to compile glob pattern %s: %s", pattern, err) + } + m.compiledFileRegoVersions = append(m.compiledFileRegoVersions, fileRegoVersion{compiled, v}) + } + } + + for _, fv := range m.compiledFileRegoVersions { + if fv.path.Match(path) { + version = &fv.version + break + } + } + + if version == nil { + version = m.RegoVersion + } + return version, nil +} + +// Equal returns true if this bundle's contents equal the other bundle's +// contents. +func (b Bundle) Equal(other Bundle) bool { + if !reflect.DeepEqual(b.Data, other.Data) { + return false + } + + if len(b.Modules) != len(other.Modules) { + return false + } + for i := range b.Modules { + // To support bundles built from rootless filesystems we ignore a "/" prefix + // for URLs and Paths, such that "/file" and "file" are equivalent + if strings.TrimPrefix(b.Modules[i].URL, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].URL, string(filepath.Separator)) { + return false + } + if strings.TrimPrefix(b.Modules[i].Path, string(filepath.Separator)) != + strings.TrimPrefix(other.Modules[i].Path, string(filepath.Separator)) { + return false + } + if !b.Modules[i].Parsed.Equal(other.Modules[i].Parsed) { + return false + } + if !bytes.Equal(b.Modules[i].Raw, other.Modules[i].Raw) { + return false + } + } + if (b.Wasm == nil && other.Wasm != nil) || (b.Wasm != nil && other.Wasm == nil) { + return false + } + + return bytes.Equal(b.Wasm, other.Wasm) +} + +// Copy returns a deep copy of the bundle. +func (b Bundle) Copy() Bundle { + + // Copy data. + var x interface{} = b.Data + + if err := util.RoundTrip(&x); err != nil { + panic(err) + } + + if x != nil { + b.Data = x.(map[string]interface{}) + } + + // Copy modules. + for i := range b.Modules { + bs := make([]byte, len(b.Modules[i].Raw)) + copy(bs, b.Modules[i].Raw) + b.Modules[i].Raw = bs + b.Modules[i].Parsed = b.Modules[i].Parsed.Copy() + } + + // Copy manifest. + b.Manifest = b.Manifest.Copy() + + return b +} + +func (b *Bundle) insertData(key []string, value interface{}) error { + // Build an object with the full structure for the value + obj, err := mktree(key, value) + if err != nil { + return err + } + + // Merge the new data in with the current bundle data object + merged, ok := merge.InterfaceMaps(b.Data, obj) + if !ok { + return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) + } + + b.Data = merged + + return nil +} + +func (b *Bundle) readData(key []string) *interface{} { + + if len(key) == 0 { + if len(b.Data) == 0 { + return nil + } + var result interface{} = b.Data + return &result + } + + node := b.Data + + for i := 0; i < len(key)-1; i++ { + + child, ok := node[key[i]] + if !ok { + return nil + } + + childObj, ok := child.(map[string]interface{}) + if !ok { + return nil + } + + node = childObj + } + + child, ok := node[key[len(key)-1]] + if !ok { + return nil + } + + return &child +} + +// Type returns the type of the bundle. +func (b *Bundle) Type() string { + if len(b.Patch.Data) != 0 { + return DeltaBundleType + } + return SnapshotBundleType +} + +func mktree(path []string, value interface{}) (map[string]interface{}, error) { + if len(path) == 0 { + // For 0 length path the value is the full tree. + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("root value must be object") + } + return obj, nil + } + + dir := map[string]interface{}{} + for i := len(path) - 1; i > 0; i-- { + dir[path[i]] = value + value = dir + dir = map[string]interface{}{} + } + dir[path[0]] = value + + return dir, nil +} + +// Merge accepts a set of bundles and merges them into a single result bundle. If there are +// any conflicts during the merge (e.g., with roots) an error is returned. The result bundle +// will have an empty revision except in the special case where a single bundle is provided +// (and in that case the bundle is just returned unmodified.) +func Merge(bundles []*Bundle) (*Bundle, error) { + return MergeWithRegoVersion(bundles, ast.DefaultRegoVersion, false) +} + +// MergeWithRegoVersion creates a merged bundle from the provided bundles, similar to Merge. +// If more than one bundle is provided, the rego version of the result bundle is set to the provided regoVersion. +// Any Rego files in a bundle of conflicting rego version will be marked in the result's manifest with the rego version +// of its original bundle. If the Rego file already had an overriding rego version, it will be preserved. +// If a single bundle is provided, it will retain any rego version information it already had. If it has none, the +// provided regoVersion will be applied to it. +// If usePath is true, per-file rego-versions will be calculated using the file's ModuleFile.Path; otherwise, the file's +// ModuleFile.URL will be used. +func MergeWithRegoVersion(bundles []*Bundle, regoVersion ast.RegoVersion, usePath bool) (*Bundle, error) { + + if len(bundles) == 0 { + return nil, errors.New("expected at least one bundle") + } + + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion + } + + if len(bundles) == 1 { + result := bundles[0] + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + fileRegoVersions, err := bundleRegoVersions(result, result.RegoVersion(regoVersion), usePath) + if err != nil { + return nil, err + } + result.Manifest.FileRegoVersions = fileRegoVersions + return result, nil + } + + var roots []string + var result Bundle + + for _, b := range bundles { + + if b.Manifest.Roots == nil { + return nil, errors.New("bundle manifest not initialized") + } + + roots = append(roots, *b.Manifest.Roots...) + + result.Modules = append(result.Modules, b.Modules...) + + for _, root := range *b.Manifest.Roots { + key := strings.Split(root, "/") + if val := b.readData(key); val != nil { + if err := result.insertData(key, *val); err != nil { + return nil, err + } + } + } + + result.Manifest.WasmResolvers = append(result.Manifest.WasmResolvers, b.Manifest.WasmResolvers...) + result.WasmModules = append(result.WasmModules, b.WasmModules...) + result.PlanModules = append(result.PlanModules, b.PlanModules...) + + if b.Manifest.RegoVersion != nil || len(b.Manifest.FileRegoVersions) > 0 { + if result.Manifest.FileRegoVersions == nil { + result.Manifest.FileRegoVersions = map[string]int{} + } + + fileRegoVersions, err := bundleRegoVersions(b, regoVersion, usePath) + if err != nil { + return nil, err + } + for k, v := range fileRegoVersions { + result.Manifest.FileRegoVersions[k] = v + } + } + } + + // We respect the bundle rego-version, defaulting to the provided rego version if not set. + result.SetRegoVersion(result.RegoVersion(regoVersion)) + + if result.Data == nil { + result.Data = map[string]interface{}{} + } + + result.Manifest.Roots = &roots + + if err := result.Manifest.validateAndInjectDefaults(result); err != nil { + return nil, err + } + + return &result, nil +} + +func bundleRegoVersions(bundle *Bundle, regoVersion ast.RegoVersion, usePath bool) (map[string]int, error) { + fileRegoVersions := map[string]int{} + + // we drop the bundle-global rego versions and record individual rego versions for each module. + for _, m := range bundle.Modules { + // We fetch rego-version by the path relative to the bundle root, as the complete path of the module might + // contain the path between OPA working directory and the bundle root. + v, err := bundle.RegoVersionForFile(bundleRelativePath(m, usePath), bundle.RegoVersion(regoVersion)) + if err != nil { + return nil, err + } + // only record the rego version if it's different from one applied globally to the result bundle + if v != regoVersion { + // We store the rego version by the absolute path to the bundle root, as this will be the - possibly new - path + // to the module inside the merged bundle. + fileRegoVersions[bundleAbsolutePath(m, usePath)] = v.Int() + } + } + + return fileRegoVersions, nil +} + +func bundleRelativePath(m ModuleFile, usePath bool) string { + p := m.RelativePath + if p == "" { + if usePath { + p = m.Path + } else { + p = m.URL + } + } + return p +} + +func bundleAbsolutePath(m ModuleFile, usePath bool) string { + var p string + if usePath { + p = m.Path + } else { + p = m.URL + } + if !path.IsAbs(p) { + p = "/" + p + } + return path.Clean(p) +} + +// RootPathsOverlap takes in two bundle root paths and returns true if they overlap. +func RootPathsOverlap(pathA string, pathB string) bool { + a := rootPathSegments(pathA) + b := rootPathSegments(pathB) + return rootContains(a, b) || rootContains(b, a) +} + +// RootPathsContain takes a set of bundle root paths and returns true if the path is contained. +func RootPathsContain(roots []string, path string) bool { + segments := rootPathSegments(path) + for i := range roots { + if rootContains(rootPathSegments(roots[i]), segments) { + return true + } + } + return false +} + +func rootPathSegments(path string) []string { + return strings.Split(path, "/") +} + +func rootContains(root []string, other []string) bool { + + // A single segment, empty string root always contains the other. + if len(root) == 1 && root[0] == "" { + return true + } + + if len(root) > len(other) { + return false + } + + for j := range root { + if root[j] != other[j] { + return false + } + } + + return true +} + +func insertValue(b *Bundle, path string, value interface{}) error { + if err := b.insertData(getNormalizedPath(path), value); err != nil { + return fmt.Errorf("bundle load failed on %v: %w", path, err) + } + return nil +} + +func getNormalizedPath(path string) []string { + // Remove leading / and . characters from the directory path. If the bundle + // was written with OPA then the paths will contain a leading slash. On the + // other hand, if the path is empty, filepath.Dir will return '.'. + // Note: filepath.Dir can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + dirpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + var key []string + if dirpath != "" { + key = strings.Split(dirpath, "/") + } + return key +} + +func dfs(value interface{}, path string, fn func(string, interface{}) (bool, error)) error { + if stop, err := fn(path, value); err != nil { + return err + } else if stop { + return nil + } + obj, ok := value.(map[string]interface{}) + if !ok { + return nil + } + for key := range obj { + if err := dfs(obj[key], path+"/"+key, fn); err != nil { + return err + } + } + return nil +} + +func modulePathWithPrefix(bundleName string, modulePath string) string { + // Default prefix is just the bundle name + prefix := bundleName + + // Bundle names are sometimes just file paths, some of which + // are full urls (file:///foo/). Parse these and only use the path. + parsed, err := url.Parse(bundleName) + if err == nil { + prefix = filepath.Join(parsed.Host, parsed.Path) + } + + // Note: filepath.Join can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + return normalizePath(filepath.Join(prefix, modulePath)) +} + +// IsStructuredDoc checks if the file name equals a structured file extension ex. ".json" +func IsStructuredDoc(name string) bool { + return filepath.Base(name) == dataFile || filepath.Base(name) == yamlDataFile || + filepath.Base(name) == SignaturesFile || filepath.Base(name) == ManifestExt +} + +func preProcessBundle(loader DirectoryLoader, skipVerify bool, sizeLimitBytes int64) (SignaturesConfig, Patch, []*Descriptor, error) { + descriptors := []*Descriptor{} + var signatures SignaturesConfig + var patch Patch + + for { + f, err := loader.NextFile() + if err == io.EOF { + break + } + + if err != nil { + return signatures, patch, nil, fmt.Errorf("bundle read failed: %w", err) + } + + // check for the signatures file + if !skipVerify && strings.HasSuffix(f.Path(), SignaturesFile) { + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return signatures, patch, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&signatures); err != nil { + return signatures, patch, nil, fmt.Errorf("bundle load failed on signatures decode: %w", err) + } + } else if !strings.HasSuffix(f.Path(), SignaturesFile) { + descriptors = append(descriptors, f) + + if filepath.Base(f.Path()) == patchFile { + + var b bytes.Buffer + tee := io.TeeReader(f.reader, &b) + f.reader = tee + + buf, err := readFile(f, sizeLimitBytes) + if err != nil { + return signatures, patch, nil, err + } + + if err := util.NewJSONDecoder(&buf).Decode(&patch); err != nil { + return signatures, patch, nil, fmt.Errorf("bundle load failed on patch decode: %w", err) + } + + f.reader = &b + } + } + } + return signatures, patch, descriptors, nil +} + +func readFile(f *Descriptor, sizeLimitBytes int64) (bytes.Buffer, error) { + // Case for pre-loaded byte buffers, like those from the tarballLoader. + if bb, ok := f.reader.(*bytes.Buffer); ok { + _ = f.Close() // always close, even on error + + if int64(bb.Len()) >= sizeLimitBytes { + return *bb, fmt.Errorf("bundle file '%v' size (%d bytes) exceeded max size (%v bytes)", + strings.TrimPrefix(f.Path(), "/"), bb.Len(), sizeLimitBytes-1) + } + + return *bb, nil + } + + // Case for *lazyFile readers: + if lf, ok := f.reader.(*lazyFile); ok { + var buf bytes.Buffer + if lf.file == nil { + var err error + if lf.file, err = os.Open(lf.path); err != nil { + return buf, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + // Bail out if we can't read the whole file-- there's nothing useful we can do at that point! + fileSize, _ := fstatFileSize(lf.file) + if fileSize > sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), fileSize, sizeLimitBytes-1) + } + // Prealloc the buffer for the file read. + buffer := make([]byte, fileSize) + _, err := io.ReadFull(lf.file, buffer) + if err != nil { + return buf, err + } + _ = lf.file.Close() // always close, even on error + + // Note(philipc): Replace the lazyFile reader in the *Descriptor with a + // pointer to the wrapping bytes.Buffer, so that we don't re-read the + // file on disk again by accident. + buf = *bytes.NewBuffer(buffer) + f.reader = &buf + return buf, nil + } + + // Fallback case: + var buf bytes.Buffer + n, err := f.Read(&buf, sizeLimitBytes) + _ = f.Close() // always close, even on error + + if err != nil && err != io.EOF { + return buf, err + } else if err == nil && n >= sizeLimitBytes { + return buf, fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(f.Path(), "/"), n, sizeLimitBytes-1) + } + + return buf, nil +} + +// Takes an already open file handle and invokes the os.Stat system call on it +// to determine the file's size. Passes any errors from *File.Stat on up to the +// caller. +func fstatFileSize(f *os.File) (int64, error) { + fileInfo, err := f.Stat() + if err != nil { + return 0, err + } + return fileInfo.Size(), nil +} + +func normalizePath(p string) string { + return filepath.ToSlash(p) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go new file mode 100644 index 000000000..12e159254 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/file.go @@ -0,0 +1,517 @@ +package bundle + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" + + "github.com/open-policy-agent/opa/v1/storage" +) + +const maxSizeLimitBytesErrMsg = "bundle file %s size (%d bytes) exceeds configured size_limit_bytes (%d bytes)" + +// Descriptor contains information about a file and +// can be used to read the file contents. +type Descriptor struct { + url string + path string + reader io.Reader + closer io.Closer + closeOnce *sync.Once +} + +// lazyFile defers reading the file until the first call of Read +type lazyFile struct { + path string + file *os.File +} + +// newLazyFile creates a new instance of lazyFile +func newLazyFile(path string) *lazyFile { + return &lazyFile{path: path} +} + +// Read implements io.Reader. It will check if the file has been opened +// and open it if it has not before attempting to read using the file's +// read method +func (f *lazyFile) Read(b []byte) (int, error) { + var err error + + if f.file == nil { + if f.file, err = os.Open(f.path); err != nil { + return 0, fmt.Errorf("failed to open file %s: %w", f.path, err) + } + } + + return f.file.Read(b) +} + +// Close closes the lazy file if it has been opened using the file's +// close method +func (f *lazyFile) Close() error { + if f.file != nil { + return f.file.Close() + } + + return nil +} + +func NewDescriptor(url, path string, reader io.Reader) *Descriptor { + return &Descriptor{ + url: url, + path: path, + reader: reader, + } +} + +func (d *Descriptor) WithCloser(closer io.Closer) *Descriptor { + d.closer = closer + d.closeOnce = new(sync.Once) + return d +} + +// Path returns the path of the file. +func (d *Descriptor) Path() string { + return d.path +} + +// URL returns the url of the file. +func (d *Descriptor) URL() string { + return d.url +} + +// Read will read all the contents from the file the Descriptor refers to +// into the dest writer up n bytes. Will return an io.EOF error +// if EOF is encountered before n bytes are read. +func (d *Descriptor) Read(dest io.Writer, n int64) (int64, error) { + n, err := io.CopyN(dest, d.reader, n) + return n, err +} + +// Close the file, on some Loader implementations this might be a no-op. +// It should *always* be called regardless of file. +func (d *Descriptor) Close() error { + var err error + if d.closer != nil { + d.closeOnce.Do(func() { + err = d.closer.Close() + }) + } + return err +} + +type PathFormat int64 + +const ( + Chrooted PathFormat = iota + SlashRooted + Passthrough +) + +// DirectoryLoader defines an interface which can be used to load +// files from a directory by iterating over each one in the tree. +type DirectoryLoader interface { + // NextFile must return io.EOF if there is no next value. The returned + // descriptor should *always* be closed when no longer needed. + NextFile() (*Descriptor, error) + WithFilter(filter filter.LoaderFilter) DirectoryLoader + WithPathFormat(PathFormat) DirectoryLoader + WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader + WithFollowSymlinks(followSymlinks bool) DirectoryLoader +} + +type dirLoader struct { + root string + files []string + idx int + filter filter.LoaderFilter + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// Normalize root directory, ex "./src/bundle" -> "src/bundle" +// We don't need an absolute path, but this makes the joined/trimmed +// paths more uniform. +func normalizeRootDirectory(root string) string { + if len(root) > 1 { + if root[0] == '.' && root[1] == filepath.Separator { + if len(root) == 2 { + root = root[:1] // "./" -> "." + } else { + root = root[2:] // remove leading "./" + } + } + } + return root +} + +// NewDirectoryLoader returns a basic DirectoryLoader implementation +// that will load files from a given root directory path. +func NewDirectoryLoader(root string) DirectoryLoader { + d := dirLoader{ + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + return &d +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the directory to read +func (d *dirLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +// WithFollowSymlinks specifies whether to follow symlinks when loading files from the directory +func (d *dirLoader) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +func formatPath(fileName string, root string, pathFormat PathFormat) string { + switch pathFormat { + case SlashRooted: + if !strings.HasPrefix(fileName, string(filepath.Separator)) { + return string(filepath.Separator) + fileName + } + return fileName + case Chrooted: + // Trim off the root directory and return path as if chrooted + result := strings.TrimPrefix(fileName, filepath.FromSlash(root)) + if root == "." && filepath.Base(fileName) == ManifestExt { + result = fileName + } + if !strings.HasPrefix(result, string(filepath.Separator)) { + result = string(filepath.Separator) + result + } + return result + case Passthrough: + fallthrough + default: + return fileName + } +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoader) NextFile() (*Descriptor, error) { + // build a list of all files we will iterate over and read, but only one time + if d.files == nil { + d.files = []string{} + err := filepath.Walk(d.root, func(path string, info os.FileInfo, _ error) error { + if info == nil { + return nil + } + + if info.Mode().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if d.followSymlinks && info.Mode().Type()&fs.ModeSymlink == fs.ModeSymlink { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf(maxSizeLimitBytesErrMsg, strings.TrimPrefix(path, "/"), info.Size(), d.maxSizeLimitBytes) + } + d.files = append(d.files, path) + } else if info.Mode().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return filepath.SkipDir + } + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + fh := newLazyFile(fileName) + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(filepath.Join(d.root, cleanedPath), cleanedPath, fh).WithCloser(fh) + return f, nil +} + +type tarballLoader struct { + baseURL string + r io.Reader + tr *tar.Reader + files []file + idx int + filter filter.LoaderFilter + skipDir map[string]struct{} + pathFormat PathFormat + maxSizeLimitBytes int64 +} + +type file struct { + name string + reader io.Reader + path storage.Path + raw []byte +} + +// NewTarballLoader is deprecated. Use NewTarballLoaderWithBaseURL instead. +func NewTarballLoader(r io.Reader) DirectoryLoader { + l := tarballLoader{ + r: r, + pathFormat: Passthrough, + } + return &l +} + +// NewTarballLoaderWithBaseURL returns a new DirectoryLoader that reads +// files out of a gzipped tar archive. The file URLs will be prefixed +// with the baseURL. +func NewTarballLoaderWithBaseURL(r io.Reader, baseURL string) DirectoryLoader { + l := tarballLoader{ + baseURL: strings.TrimSuffix(baseURL, "/"), + r: r, + pathFormat: Passthrough, + } + return &l +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (t *tarballLoader) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + t.filter = filter + return t +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (t *tarballLoader) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + t.pathFormat = pathFormat + return t +} + +// WithSizeLimitBytes specifies the maximum size of any file in the tarball to read +func (t *tarballLoader) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + t.maxSizeLimitBytes = sizeLimitBytes + return t +} + +// WithFollowSymlinks is a no-op for tarballLoader +func (t *tarballLoader) WithFollowSymlinks(_ bool) DirectoryLoader { + return t +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (t *tarballLoader) NextFile() (*Descriptor, error) { + if t.tr == nil { + gr, err := gzip.NewReader(t.r) + if err != nil { + return nil, fmt.Errorf("archive read failed: %w", err) + } + + t.tr = tar.NewReader(gr) + } + + if t.files == nil { + t.files = []file{} + + if t.skipDir == nil { + t.skipDir = map[string]struct{}{} + } + + for { + header, err := t.tr.Next() + + if err == io.EOF { + break + } + + if err != nil { + return nil, err + } + + // Keep iterating on the archive until we find a normal file + if header.Typeflag == tar.TypeReg { + + if t.filter != nil { + + if t.filter(filepath.ToSlash(header.Name), header.FileInfo(), getdepth(header.Name, false)) { + continue + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(header.Name)), "/") + + // check if the directory is to be skipped + if _, ok := t.skipDir[basePath]; ok { + continue + } + + match := false + for p := range t.skipDir { + if strings.HasPrefix(basePath, p) { + match = true + break + } + } + + if match { + continue + } + } + + if t.maxSizeLimitBytes > 0 && header.Size > t.maxSizeLimitBytes { + return nil, fmt.Errorf(maxSizeLimitBytesErrMsg, header.Name, header.Size, t.maxSizeLimitBytes) + } + + f := file{name: header.Name} + + // Note(philipc): We rely on the previous size check in this loop for safety. + buf := bytes.NewBuffer(make([]byte, 0, header.Size)) + if _, err := io.Copy(buf, t.tr); err != nil { + return nil, fmt.Errorf("failed to copy file %s: %w", header.Name, err) + } + + f.reader = buf + + t.files = append(t.files, f) + } else if header.Typeflag == tar.TypeDir { + cleanedPath := filepath.ToSlash(header.Name) + if t.filter != nil && t.filter(cleanedPath, header.FileInfo(), getdepth(header.Name, true)) { + t.skipDir[strings.Trim(cleanedPath, "/")] = struct{}{} + } + } + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if t.idx >= len(t.files) { + return nil, io.EOF + } + + f := t.files[t.idx] + t.idx++ + + cleanedPath := formatPath(f.name, "", t.pathFormat) + d := NewDescriptor(filepath.Join(t.baseURL, cleanedPath), cleanedPath, f.reader) + return d, nil +} + +// Next implements the storage.Iterator interface. +// It iterates to the next policy or data file in the directory tree +// and returns a storage.Update for the file. +func (it *iterator) Next() (*storage.Update, error) { + if it.files == nil { + it.files = []file{} + + for _, item := range it.raw { + f := file{name: item.Path} + + p, err := getFileStoragePath(f.name) + if err != nil { + return nil, err + } + + f.path = p + + f.raw = item.Value + + it.files = append(it.files, f) + } + + sortFilePathAscend(it.files) + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if it.idx >= len(it.files) { + return nil, io.EOF + } + + f := it.files[it.idx] + it.idx++ + + isPolicy := false + if strings.HasSuffix(f.name, RegoExt) { + isPolicy = true + } + + return &storage.Update{ + Path: f.path, + Value: f.raw, + IsPolicy: isPolicy, + }, nil +} + +type iterator struct { + raw []Raw + files []file + idx int +} + +func NewIterator(raw []Raw) storage.Iterator { + it := iterator{ + raw: raw, + } + return &it +} + +func sortFilePathAscend(files []file) { + sort.Slice(files, func(i, j int) bool { + return len(files[i].path) < len(files[j].path) + }) +} + +func getdepth(path string, isDir bool) int { + if isDir { + cleanedPath := strings.Trim(filepath.ToSlash(path), "/") + return len(strings.Split(cleanedPath, "/")) + } + + basePath := strings.Trim(filepath.Dir(filepath.ToSlash(path)), "/") + return len(strings.Split(basePath, "/")) +} + +func getFileStoragePath(path string) (storage.Path, error) { + fpath := strings.TrimLeft(normalizePath(filepath.Dir(path)), "/.") + if strings.HasSuffix(path, RegoExt) { + fpath = strings.Trim(normalizePath(path), "/") + } + + p, ok := storage.ParsePathEscaped("/" + fpath) + if !ok { + return nil, fmt.Errorf("storage path invalid: %v", path) + } + return p, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go new file mode 100644 index 000000000..7ab3de989 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/filefs.go @@ -0,0 +1,143 @@ +//go:build go1.16 +// +build go1.16 + +package bundle + +import ( + "fmt" + "io" + "io/fs" + "path/filepath" + "sync" + + "github.com/open-policy-agent/opa/v1/loader/filter" +) + +const ( + defaultFSLoaderRoot = "." +) + +type dirLoaderFS struct { + sync.Mutex + filesystem fs.FS + files []string + idx int + filter filter.LoaderFilter + root string + pathFormat PathFormat + maxSizeLimitBytes int64 + followSymlinks bool +} + +// NewFSLoader returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface +func NewFSLoader(filesystem fs.FS) (DirectoryLoader, error) { + return NewFSLoaderWithRoot(filesystem, defaultFSLoaderRoot), nil +} + +// NewFSLoaderWithRoot returns a basic DirectoryLoader implementation +// that will load files from a fs.FS interface at the supplied root +func NewFSLoaderWithRoot(filesystem fs.FS, root string) DirectoryLoader { + d := dirLoaderFS{ + filesystem: filesystem, + root: normalizeRootDirectory(root), + pathFormat: Chrooted, + } + + return &d +} + +func (d *dirLoaderFS) walkDir(path string, dirEntry fs.DirEntry, err error) error { + if err != nil { + return err + } + + if dirEntry != nil { + info, err := dirEntry.Info() + if err != nil { + return err + } + + if dirEntry.Type().IsRegular() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type()&fs.ModeSymlink != 0 && d.followSymlinks { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, false)) { + return nil + } + + if d.maxSizeLimitBytes > 0 && info.Size() > d.maxSizeLimitBytes { + return fmt.Errorf("file %s size %d exceeds limit of %d", path, info.Size(), d.maxSizeLimitBytes) + } + + d.files = append(d.files, path) + } else if dirEntry.Type().IsDir() { + if d.filter != nil && d.filter(filepath.ToSlash(path), info, getdepth(path, true)) { + return fs.SkipDir + } + } + } + return nil +} + +// WithFilter specifies the filter object to use to filter files while loading bundles +func (d *dirLoaderFS) WithFilter(filter filter.LoaderFilter) DirectoryLoader { + d.filter = filter + return d +} + +// WithPathFormat specifies how a path is formatted in a Descriptor +func (d *dirLoaderFS) WithPathFormat(pathFormat PathFormat) DirectoryLoader { + d.pathFormat = pathFormat + return d +} + +// WithSizeLimitBytes specifies the maximum size of any file in the filesystem directory to read +func (d *dirLoaderFS) WithSizeLimitBytes(sizeLimitBytes int64) DirectoryLoader { + d.maxSizeLimitBytes = sizeLimitBytes + return d +} + +func (d *dirLoaderFS) WithFollowSymlinks(followSymlinks bool) DirectoryLoader { + d.followSymlinks = followSymlinks + return d +} + +// NextFile iterates to the next file in the directory tree +// and returns a file Descriptor for the file. +func (d *dirLoaderFS) NextFile() (*Descriptor, error) { + d.Lock() + defer d.Unlock() + + if d.files == nil { + err := fs.WalkDir(d.filesystem, d.root, d.walkDir) + if err != nil { + return nil, fmt.Errorf("failed to list files: %w", err) + } + } + + // If done reading files then just return io.EOF + // errors for each NextFile() call + if d.idx >= len(d.files) { + return nil, io.EOF + } + + fileName := d.files[d.idx] + d.idx++ + + fh, err := d.filesystem.Open(fileName) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %w", fileName, err) + } + + cleanedPath := formatPath(fileName, d.root, d.pathFormat) + f := NewDescriptor(cleanedPath, cleanedPath, fh).WithCloser(fh) + return f, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go new file mode 100644 index 000000000..ab6fcd0f3 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/hash.go @@ -0,0 +1,136 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/json" + "fmt" + "hash" + "io" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// HashingAlgorithm represents a subset of hashing algorithms implemented in Go +type HashingAlgorithm string + +// Supported values for HashingAlgorithm +const ( + MD5 HashingAlgorithm = "MD5" + SHA1 HashingAlgorithm = "SHA-1" + SHA224 HashingAlgorithm = "SHA-224" + SHA256 HashingAlgorithm = "SHA-256" + SHA384 HashingAlgorithm = "SHA-384" + SHA512 HashingAlgorithm = "SHA-512" + SHA512224 HashingAlgorithm = "SHA-512-224" + SHA512256 HashingAlgorithm = "SHA-512-256" +) + +// String returns the string representation of a HashingAlgorithm +func (alg HashingAlgorithm) String() string { + return string(alg) +} + +// SignatureHasher computes a signature digest for a file with (structured or unstructured) data and policy +type SignatureHasher interface { + HashFile(v interface{}) ([]byte, error) +} + +type hasher struct { + h func() hash.Hash // hash function factory +} + +// NewSignatureHasher returns a signature hasher suitable for a particular hashing algorithm +func NewSignatureHasher(alg HashingAlgorithm) (SignatureHasher, error) { + h := &hasher{} + + switch alg { + case MD5: + h.h = md5.New + case SHA1: + h.h = sha1.New + case SHA224: + h.h = sha256.New224 + case SHA256: + h.h = sha256.New + case SHA384: + h.h = sha512.New384 + case SHA512: + h.h = sha512.New + case SHA512224: + h.h = sha512.New512_224 + case SHA512256: + h.h = sha512.New512_256 + default: + return nil, fmt.Errorf("unsupported hashing algorithm: %s", alg) + } + + return h, nil +} + +// HashFile hashes the file content, JSON or binary, both in golang native format. +func (h *hasher) HashFile(v interface{}) ([]byte, error) { + hf := h.h() + walk(v, hf) + return hf.Sum(nil), nil +} + +// walk hashes the file content, JSON or binary, both in golang native format. +// +// Computation for unstructured documents is a hash of the document. +// +// Computation for the types of structured JSON document is as follows: +// +// object: Hash {, then each key (in alphabetical order) and digest of the value, then comma (between items) and finally }. +// +// array: Hash [, then digest of the value, then comma (between items) and finally ]. +func walk(v interface{}, h io.Writer) { + + switch x := v.(type) { + case map[string]interface{}: + _, _ = h.Write([]byte("{")) + + for i, key := range util.KeysSorted(x) { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + + _, _ = h.Write(encodePrimitive(key)) + _, _ = h.Write([]byte(":")) + walk(x[key], h) + } + + _, _ = h.Write([]byte("}")) + case []interface{}: + _, _ = h.Write([]byte("[")) + + for i, e := range x { + if i > 0 { + _, _ = h.Write([]byte(",")) + } + walk(e, h) + } + + _, _ = h.Write([]byte("]")) + case []byte: + _, _ = h.Write(x) + default: + _, _ = h.Write(encodePrimitive(x)) + } +} + +func encodePrimitive(v interface{}) []byte { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + _ = encoder.Encode(v) + return []byte(strings.Trim(buf.String(), "\n")) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go new file mode 100644 index 000000000..aad30a675 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/keys.go @@ -0,0 +1,144 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in creating the verification and signing key configuration +package bundle + +import ( + "encoding/pem" + "fmt" + "os" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws/sign" + "github.com/open-policy-agent/opa/v1/keys" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultTokenSigningAlg = "RS256" +) + +// KeyConfig holds the keys used to sign or verify bundles and tokens +// Moved to own package, alias kept for backwards compatibility +type KeyConfig = keys.Config + +// VerificationConfig represents the key configuration used to verify a signed bundle +type VerificationConfig struct { + PublicKeys map[string]*KeyConfig + KeyID string `json:"keyid"` + Scope string `json:"scope"` + Exclude []string `json:"exclude_files"` +} + +// NewVerificationConfig return a new VerificationConfig +func NewVerificationConfig(keys map[string]*KeyConfig, id, scope string, exclude []string) *VerificationConfig { + return &VerificationConfig{ + PublicKeys: keys, + KeyID: id, + Scope: scope, + Exclude: exclude, + } +} + +// ValidateAndInjectDefaults validates the config and inserts default values +func (vc *VerificationConfig) ValidateAndInjectDefaults(keys map[string]*KeyConfig) error { + vc.PublicKeys = keys + + if vc.KeyID != "" { + found := false + for key := range keys { + if key == vc.KeyID { + found = true + break + } + } + + if !found { + return fmt.Errorf("key id %s not found", vc.KeyID) + } + } + return nil +} + +// GetPublicKey returns the public key corresponding to the given key id +func (vc *VerificationConfig) GetPublicKey(id string) (*KeyConfig, error) { + var kc *KeyConfig + var ok bool + + if kc, ok = vc.PublicKeys[id]; !ok { + return nil, fmt.Errorf("verification key corresponding to ID %v not found", id) + } + return kc, nil +} + +// SigningConfig represents the key configuration used to generate a signed bundle +type SigningConfig struct { + Plugin string + Key string + Algorithm string + ClaimsPath string +} + +// NewSigningConfig return a new SigningConfig +func NewSigningConfig(key, alg, claimsPath string) *SigningConfig { + if alg == "" { + alg = defaultTokenSigningAlg + } + + return &SigningConfig{ + Plugin: defaultSignerID, + Key: key, + Algorithm: alg, + ClaimsPath: claimsPath, + } +} + +// WithPlugin sets the signing plugin in the signing config +func (s *SigningConfig) WithPlugin(plugin string) *SigningConfig { + if plugin != "" { + s.Plugin = plugin + } + return s +} + +// GetPrivateKey returns the private key or secret from the signing config +func (s *SigningConfig) GetPrivateKey() (interface{}, error) { + + block, _ := pem.Decode([]byte(s.Key)) + if block != nil { + return sign.GetSigningKey(s.Key, jwa.SignatureAlgorithm(s.Algorithm)) + } + + var priv string + if _, err := os.Stat(s.Key); err == nil { + bs, err := os.ReadFile(s.Key) + if err != nil { + return nil, err + } + priv = string(bs) + } else if os.IsNotExist(err) { + priv = s.Key + } else { + return nil, err + } + + return sign.GetSigningKey(priv, jwa.SignatureAlgorithm(s.Algorithm)) +} + +// GetClaims returns the claims by reading the file specified in the signing config +func (s *SigningConfig) GetClaims() (map[string]interface{}, error) { + var claims map[string]interface{} + + bs, err := os.ReadFile(s.ClaimsPath) + if err != nil { + return claims, err + } + + if err := util.UnmarshalJSON(bs, &claims); err != nil { + return claims, err + } + return claims, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go new file mode 100644 index 000000000..cf9a3e183 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/sign.go @@ -0,0 +1,135 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the creating a signed bundle +package bundle + +import ( + "crypto/rand" + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws" +) + +const defaultSignerID = "_default" + +var signers map[string]Signer + +// Signer is the interface expected for implementations that generate bundle signatures. +type Signer interface { + GenerateSignedToken([]FileInfo, *SigningConfig, string) (string, error) +} + +// GenerateSignedToken will retrieve the Signer implementation based on the Plugin specified +// in SigningConfig, and call its implementation of GenerateSignedToken. The signer generates +// a signed token given the list of files to be included in the payload and the bundle +// signing config. The keyID if non-empty, represents the value for the "keyid" claim in the token. +func GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultSignerID + } else { + plugin = sc.Plugin + } + signer, err := GetSigner(plugin) + if err != nil { + return "", err + } + return signer.GenerateSignedToken(files, sc, keyID) +} + +// DefaultSigner is the default bundle signing implementation. It signs bundles by generating +// a JWT and signing it using a locally-accessible private key. +type DefaultSigner struct{} + +// GenerateSignedToken generates a signed token given the list of files to be +// included in the payload and the bundle signing config. The keyID if non-empty, +// represents the value for the "keyid" claim in the token +func (*DefaultSigner) GenerateSignedToken(files []FileInfo, sc *SigningConfig, keyID string) (string, error) { + payload, err := generatePayload(files, sc, keyID) + if err != nil { + return "", err + } + + privateKey, err := sc.GetPrivateKey() + if err != nil { + return "", err + } + + var headers jws.StandardHeaders + + if err := headers.Set(jws.AlgorithmKey, jwa.SignatureAlgorithm(sc.Algorithm)); err != nil { + return "", err + } + + if keyID != "" { + if err := headers.Set(jws.KeyIDKey, keyID); err != nil { + return "", err + } + } + + hdr, err := json.Marshal(headers) + if err != nil { + return "", err + } + + token, err := jws.SignLiteral(payload, + jwa.SignatureAlgorithm(sc.Algorithm), + privateKey, + hdr, + rand.Reader) + if err != nil { + return "", err + } + return string(token), nil +} + +func generatePayload(files []FileInfo, sc *SigningConfig, keyID string) ([]byte, error) { + payload := make(map[string]interface{}) + payload["files"] = files + + if sc.ClaimsPath != "" { + claims, err := sc.GetClaims() + if err != nil { + return nil, err + } + + for claim, value := range claims { + payload[claim] = value + } + } else { + if keyID != "" { + // keyid claim is deprecated but include it for backwards compatibility. + payload["keyid"] = keyID + } + } + return json.Marshal(payload) +} + +// GetSigner returns the Signer registered under the given id +func GetSigner(id string) (Signer, error) { + signer, ok := signers[id] + if !ok { + return nil, fmt.Errorf("no signer exists under id %s", id) + } + return signer, nil +} + +// RegisterSigner registers a Signer under the given id +func RegisterSigner(id string, s Signer) error { + if id == defaultSignerID { + return fmt.Errorf("signer id %s is reserved, use a different id", id) + } + signers[id] = s + return nil +} + +func init() { + signers = map[string]Signer{ + defaultSignerID: &DefaultSigner{}, + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go new file mode 100644 index 000000000..e77c052d9 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/store.go @@ -0,0 +1,1165 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package bundle + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "path/filepath" + "strings" + + iCompiler "github.com/open-policy-agent/opa/internal/compiler" + "github.com/open-policy-agent/opa/internal/json/patch" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +// BundlesBasePath is the storage path used for storing bundle metadata +var BundlesBasePath = storage.MustParsePath("/system/bundles") + +var ModulesInfoBasePath = storage.MustParsePath("/system/modules") + +// Note: As needed these helpers could be memoized. + +// ManifestStoragePath is the storage path used for the given named bundle manifest. +func ManifestStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest") +} + +// EtagStoragePath is the storage path used for the given named bundle etag. +func EtagStoragePath(name string) storage.Path { + return append(BundlesBasePath, name, "etag") +} + +func namedBundlePath(name string) storage.Path { + return append(BundlesBasePath, name) +} + +func rootsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "roots") +} + +func revisionPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "revision") +} + +func wasmModulePath(name string) storage.Path { + return append(BundlesBasePath, name, "wasm") +} + +func wasmEntrypointsPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "wasm") +} + +func metadataPath(name string) storage.Path { + return append(BundlesBasePath, name, "manifest", "metadata") +} + +func moduleRegoVersionPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/"), "rego_version") +} + +func moduleInfoPath(id string) storage.Path { + return append(ModulesInfoBasePath, strings.Trim(id, "/")) +} + +func read(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (interface{}, error) { + value, err := store.Read(ctx, txn, path) + if err != nil { + return nil, err + } + + if astValue, ok := value.(ast.Value); ok { + value, err = ast.JSON(astValue) + if err != nil { + return nil, err + } + } + + return value, nil +} + +// ReadBundleNamesFromStore will return a list of bundle names which have had their metadata stored. +func ReadBundleNamesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) ([]string, error) { + value, err := read(ctx, store, txn, BundlesBasePath) + if err != nil { + return nil, err + } + + bundleMap, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("corrupt manifest roots") + } + + bundles := make([]string, len(bundleMap)) + idx := 0 + for name := range bundleMap { + bundles[idx] = name + idx++ + } + return bundles, nil +} + +// WriteManifestToStore will write the manifest into the storage. This function is called when +// the bundle is activated. +func WriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, manifest Manifest) error { + return write(ctx, store, txn, ManifestStoragePath(name), manifest) +} + +// WriteEtagToStore will write the bundle etag into the storage. This function is called when the bundle is activated. +func WriteEtagToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name, etag string) error { + return write(ctx, store, txn, EtagStoragePath(name), etag) +} + +func write(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path, value interface{}) error { + if err := util.RoundTrip(&value); err != nil { + return err + } + + var dir []string + if len(path) > 1 { + dir = path[:len(path)-1] + } + + if err := storage.MakeDir(ctx, store, txn, dir); err != nil { + return err + } + + return store.Write(ctx, txn, storage.AddOp, path, value) +} + +// EraseManifestFromStore will remove the manifest from storage. This function is called +// when the bundle is deactivated. +func EraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := namedBundlePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +// eraseBundleEtagFromStore will remove the bundle etag from storage. This function is called +// when the bundle is deactivated. +func eraseBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := EtagStoragePath(name) + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func suppressNotFound(err error) error { + if err == nil || storage.IsNotFound(err) { + return nil + } + return err +} + +func writeWasmModulesToStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string, b *Bundle) error { + basePath := wasmModulePath(name) + for _, wm := range b.WasmModules { + path := append(basePath, wm.Path) + err := write(ctx, store, txn, path, base64.StdEncoding.EncodeToString(wm.Raw)) + if err != nil { + return err + } + } + return nil +} + +func eraseWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) error { + path := wasmModulePath(name) + + err := store.Write(ctx, txn, storage.RemoveOp, path, nil) + return suppressNotFound(err) +} + +func eraseModuleRegoVersionsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, modules []string) error { + for _, module := range modules { + err := store.Write(ctx, txn, storage.RemoveOp, moduleInfoPath(module), nil) + if err := suppressNotFound(err); err != nil { + return err + } + } + return nil +} + +// ReadWasmMetadataFromStore will read Wasm module resolver metadata from the store. +func ReadWasmMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]WasmResolver, error) { + path := wasmEntrypointsPath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + bs, err := json.Marshal(value) + if err != nil { + return nil, fmt.Errorf("corrupt wasm manifest data") + } + + var wasmMetadata []WasmResolver + + err = util.UnmarshalJSON(bs, &wasmMetadata) + if err != nil { + return nil, fmt.Errorf("corrupt wasm manifest data") + } + + return wasmMetadata, nil +} + +// ReadWasmModulesFromStore will write Wasm module resolver metadata from the store. +func ReadWasmModulesFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string][]byte, error) { + path := wasmModulePath(name) + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, err + } + + encodedModules, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("corrupt wasm modules") + } + + rawModules := map[string][]byte{} + for path, enc := range encodedModules { + encStr, ok := enc.(string) + if !ok { + return nil, fmt.Errorf("corrupt wasm modules") + } + bs, err := base64.StdEncoding.DecodeString(encStr) + if err != nil { + return nil, err + } + rawModules[path] = bs + } + return rawModules, nil +} + +// ReadBundleRootsFromStore returns the roots in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRootsFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) ([]string, error) { + value, err := read(ctx, store, txn, rootsPath(name)) + if err != nil { + return nil, err + } + + sl, ok := value.([]interface{}) + if !ok { + return nil, fmt.Errorf("corrupt manifest roots") + } + + roots := make([]string, len(sl)) + + for i := range sl { + roots[i], ok = sl[i].(string) + if !ok { + return nil, fmt.Errorf("corrupt manifest root") + } + } + + return roots, nil +} + +// ReadBundleRevisionFromStore returns the revision in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readRevisionFromStore(ctx, store, txn, revisionPath(name)) +} + +func readRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", fmt.Errorf("corrupt manifest revision") + } + + return str, nil +} + +// ReadBundleMetadataFromStore returns the metadata in the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (map[string]interface{}, error) { + return readMetadataFromStore(ctx, store, txn, metadataPath(name)) +} + +func readMetadataFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (map[string]interface{}, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return nil, suppressNotFound(err) + } + + data, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("corrupt manifest metadata") + } + + return data, nil +} + +// ReadBundleEtagFromStore returns the etag for the specified bundle. +// If the bundle is not activated, this function will return +// storage NotFound error. +func ReadBundleEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, name string) (string, error) { + return readEtagFromStore(ctx, store, txn, EtagStoragePath(name)) +} + +func readEtagFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, path storage.Path) (string, error) { + value, err := read(ctx, store, txn, path) + if err != nil { + return "", err + } + + str, ok := value.(string) + if !ok { + return "", fmt.Errorf("corrupt bundle etag") + } + + return str, nil +} + +// ActivateOpts defines options for the Activate API call. +type ActivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + TxnCtx *storage.Context + Compiler *ast.Compiler + Metrics metrics.Metrics + Bundles map[string]*Bundle // Optional + ExtraModules map[string]*ast.Module // Optional + AuthorizationDecisionRef ast.Ref + ParserOptions ast.ParserOptions + + legacy bool +} + +// Activate the bundle(s) by loading into the given Store. This will load policies, data, and record +// the manifest in storage. The compiler provided will have had the polices compiled on it. +func Activate(opts *ActivateOpts) error { + opts.legacy = false + return activateBundles(opts) +} + +// DeactivateOpts defines options for the Deactivate API call +type DeactivateOpts struct { + Ctx context.Context + Store storage.Store + Txn storage.Transaction + BundleNames map[string]struct{} + ParserOptions ast.ParserOptions +} + +// Deactivate the bundle(s). This will erase associated data, policies, and the manifest entry from the store. +func Deactivate(opts *DeactivateOpts) error { + erase := map[string]struct{}{} + for name := range opts.BundleNames { + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + } + _, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, opts.BundleNames, erase) + return err +} + +func activateBundles(opts *ActivateOpts) error { + + // Build collections of bundle names, modules, and roots to erase + erase := map[string]struct{}{} + names := map[string]struct{}{} + deltaBundles := map[string]*Bundle{} + snapshotBundles := map[string]*Bundle{} + + for name, b := range opts.Bundles { + if b.Type() == DeltaBundleType { + deltaBundles[name] = b + } else { + snapshotBundles[name] = b + names[name] = struct{}{} + + roots, err := ReadBundleRootsFromStore(opts.Ctx, opts.Store, opts.Txn, name) + if suppressNotFound(err) != nil { + return err + } + for _, root := range roots { + erase[root] = struct{}{} + } + + // Erase data at new roots to prepare for writing the new data + for _, root := range *b.Manifest.Roots { + erase[root] = struct{}{} + } + } + } + + // Before changing anything make sure the roots don't collide with any + // other bundles that already are activated or other bundles being activated. + err := hasRootsOverlap(opts.Ctx, opts.Store, opts.Txn, opts.Bundles) + if err != nil { + return err + } + + if len(deltaBundles) != 0 { + err := activateDeltaBundles(opts, deltaBundles) + if err != nil { + return err + } + } + + // Erase data and policies at new + old roots, and remove the old + // manifests before activating a new snapshot bundle. + remaining, err := eraseBundles(opts.Ctx, opts.Store, opts.Txn, opts.ParserOptions, names, erase) + if err != nil { + return err + } + + // Validate data in bundle does not contain paths outside the bundle's roots. + for _, b := range snapshotBundles { + + if b.lazyLoadingMode { + + for _, item := range b.Raw { + path := filepath.ToSlash(item.Path) + + if filepath.Base(path) == dataFile || filepath.Base(path) == yamlDataFile { + var val map[string]json.RawMessage + err = util.Unmarshal(item.Value, &val) + if err == nil { + err = doDFS(val, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } else { + // Build an object for the value + p := getNormalizedPath(path) + + if len(p) == 0 { + return fmt.Errorf("root value must be object") + } + + // verify valid YAML or JSON value + var x interface{} + err := util.Unmarshal(item.Value, &x) + if err != nil { + return err + } + + value := item.Value + dir := map[string]json.RawMessage{} + for i := len(p) - 1; i > 0; i-- { + dir[p[i]] = value + + bs, err := json.Marshal(dir) + if err != nil { + return err + } + + value = bs + dir = map[string]json.RawMessage{} + } + dir[p[0]] = value + + err = doDFS(dir, filepath.Dir(strings.Trim(path, "/")), *b.Manifest.Roots) + if err != nil { + return err + } + } + } + } + } + } + + // Compile the modules all at once to avoid having to re-do work. + remainingAndExtra := make(map[string]*ast.Module) + for name, mod := range remaining { + remainingAndExtra[name] = mod + } + for name, mod := range opts.ExtraModules { + remainingAndExtra[name] = mod + } + + err = compileModules(opts.Compiler, opts.Metrics, snapshotBundles, remainingAndExtra, opts.legacy, opts.AuthorizationDecisionRef) + if err != nil { + return err + } + + if err := writeDataAndModules(opts.Ctx, opts.Store, opts.Txn, opts.TxnCtx, snapshotBundles, opts.legacy, opts.ParserOptions.RegoVersion); err != nil { + return err + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range snapshotBundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + + if err := writeWasmModulesToStore(opts.Ctx, opts.Store, opts.Txn, name, b); err != nil { + return err + } + } + + return nil +} + +func doDFS(obj map[string]json.RawMessage, path string, roots []string) error { + if len(roots) == 1 && roots[0] == "" { + return nil + } + + for key := range obj { + + newPath := filepath.Join(strings.Trim(path, "/"), key) + + // Note: filepath.Join can return paths with '\' separators, always use + // filepath.ToSlash to keep them normalized. + newPath = strings.TrimLeft(normalizePath(newPath), "/.") + + contains := false + prefix := false + if RootPathsContain(roots, newPath) { + contains = true + } else { + for i := range roots { + if strings.HasPrefix(strings.Trim(roots[i], "/"), newPath) { + prefix = true + break + } + } + } + + if !contains && !prefix { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if contains { + continue + } + + var next map[string]json.RawMessage + err := util.Unmarshal(obj[key], &next) + if err != nil { + return fmt.Errorf("manifest roots %v do not permit data at path '/%s' (hint: check bundle directory structure)", roots, newPath) + } + + if err := doDFS(next, newPath, roots); err != nil { + return err + } + } + return nil +} + +func activateDeltaBundles(opts *ActivateOpts, bundles map[string]*Bundle) error { + + // Check that the manifest roots and wasm resolvers in the delta bundle + // match with those currently in the store + for name, b := range bundles { + value, err := opts.Store.Read(opts.Ctx, opts.Txn, ManifestStoragePath(name)) + if err != nil { + if storage.IsNotFound(err) { + continue + } + return err + } + + manifest, err := valueToManifest(value) + if err != nil { + return fmt.Errorf("corrupt manifest data: %w", err) + } + + if !b.Manifest.equalWasmResolversAndRoots(manifest) { + return fmt.Errorf("delta bundle '%s' has wasm resolvers or manifest roots that are different from those in the store", name) + } + } + + for _, b := range bundles { + err := applyPatches(opts.Ctx, opts.Store, opts.Txn, b.Patch.Data) + if err != nil { + return err + } + } + + if err := ast.CheckPathConflicts(opts.Compiler, storage.NonEmpty(opts.Ctx, opts.Store, opts.Txn)); len(err) > 0 { + return err + } + + for name, b := range bundles { + if err := writeManifestToStore(opts, name, b.Manifest); err != nil { + return err + } + + if err := writeEtagToStore(opts, name, b.Etag); err != nil { + return err + } + } + + return nil +} + +func valueToManifest(v interface{}) (Manifest, error) { + if astV, ok := v.(ast.Value); ok { + var err error + v, err = ast.JSON(astV) + if err != nil { + return Manifest{}, err + } + } + + var manifest Manifest + + bs, err := json.Marshal(v) + if err != nil { + return Manifest{}, err + } + + err = util.UnmarshalJSON(bs, &manifest) + if err != nil { + return Manifest{}, err + } + + return manifest, nil +} + +// erase bundles by name and roots. This will clear all policies and data at its roots and remove its +// manifest from storage. +func eraseBundles(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, names map[string]struct{}, roots map[string]struct{}) (map[string]*ast.Module, error) { + + if err := eraseData(ctx, store, txn, roots); err != nil { + return nil, err + } + + remaining, removed, err := erasePolicies(ctx, store, txn, parserOpts, roots) + if err != nil { + return nil, err + } + + for name := range names { + if err := EraseManifestFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := LegacyEraseManifestFromStore(ctx, store, txn); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseBundleEtagFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + + if err := eraseWasmModulesFromStore(ctx, store, txn, name); suppressNotFound(err) != nil { + return nil, err + } + } + + err = eraseModuleRegoVersionsFromStore(ctx, store, txn, removed) + if err != nil { + return nil, err + } + + return remaining, nil +} + +func eraseData(ctx context.Context, store storage.Store, txn storage.Transaction, roots map[string]struct{}) error { + for root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + + if len(path) > 0 { + if err := store.Write(ctx, txn, storage.RemoveOp, path, nil); suppressNotFound(err) != nil { + return err + } + } + } + return nil +} + +type moduleInfo struct { + RegoVersion ast.RegoVersion `json:"rego_version"` +} + +func readModuleInfoFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (map[string]moduleInfo, error) { + value, err := read(ctx, store, txn, ModulesInfoBasePath) + if suppressNotFound(err) != nil { + return nil, err + } + + if value == nil { + return nil, nil + } + + if m, ok := value.(map[string]any); ok { + versions := make(map[string]moduleInfo, len(m)) + + for k, v := range m { + if m0, ok := v.(map[string]any); ok { + if ver, ok := m0["rego_version"]; ok { + if vs, ok := ver.(json.Number); ok { + i, err := vs.Int64() + if err != nil { + return nil, fmt.Errorf("corrupt rego version") + } + versions[k] = moduleInfo{RegoVersion: ast.RegoVersionFromInt(int(i))} + } + } + } + } + return versions, nil + } + + return nil, fmt.Errorf("corrupt rego version") +} + +func erasePolicies(ctx context.Context, store storage.Store, txn storage.Transaction, parserOpts ast.ParserOptions, roots map[string]struct{}) (map[string]*ast.Module, []string, error) { + + ids, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, nil, err + } + + modulesInfo, err := readModuleInfoFromStore(ctx, store, txn) + if err != nil { + return nil, nil, fmt.Errorf("failed to read module info from store: %w", err) + } + + getRegoVersion := func(modId string) (ast.RegoVersion, bool) { + info, ok := modulesInfo[modId] + if !ok { + return ast.RegoUndefined, false + } + return info.RegoVersion, true + } + + remaining := map[string]*ast.Module{} + var removed []string + + for _, id := range ids { + bs, err := store.GetPolicy(ctx, txn, id) + if err != nil { + return nil, nil, err + } + + parserOptsCpy := parserOpts + if regoVersion, ok := getRegoVersion(id); ok { + parserOptsCpy.RegoVersion = regoVersion + } + + module, err := ast.ParseModuleWithOpts(id, string(bs), parserOptsCpy) + if err != nil { + return nil, nil, err + } + path, err := module.Package.Path.Ptr() + if err != nil { + return nil, nil, err + } + deleted := false + for root := range roots { + if RootPathsContain([]string{root}, path) { + if err := store.DeletePolicy(ctx, txn, id); err != nil { + return nil, nil, err + } + deleted = true + break + } + } + + if deleted { + removed = append(removed, id) + } else { + remaining[id] = module + } + } + + return remaining, removed, nil +} + +func writeManifestToStore(opts *ActivateOpts, name string, manifest Manifest) error { + // Always write manifests to the named location. If the plugin is in the older style config + // then also write to the old legacy unnamed location. + if err := WriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, name, manifest); err != nil { + return err + } + + if opts.legacy { + if err := LegacyWriteManifestToStore(opts.Ctx, opts.Store, opts.Txn, manifest); err != nil { + return err + } + } + + return nil +} + +func writeEtagToStore(opts *ActivateOpts, name, etag string) error { + if err := WriteEtagToStore(opts.Ctx, opts.Store, opts.Txn, name, etag); err != nil { + return err + } + + return nil +} + +func writeModuleRegoVersionToStore(ctx context.Context, store storage.Store, txn storage.Transaction, b *Bundle, + mf ModuleFile, storagePath string, runtimeRegoVersion ast.RegoVersion) error { + + var regoVersion ast.RegoVersion + if mf.Parsed != nil { + regoVersion = mf.Parsed.RegoVersion() + } + + if regoVersion == ast.RegoUndefined { + var err error + regoVersion, err = b.RegoVersionForFile(mf.Path, ast.RegoUndefined) + if err != nil { + return fmt.Errorf("failed to get rego version for module '%s' in bundle: %w", mf.Path, err) + } + } + + if regoVersion != ast.RegoUndefined && regoVersion != runtimeRegoVersion { + if err := write(ctx, store, txn, moduleRegoVersionPath(storagePath), regoVersion.Int()); err != nil { + return fmt.Errorf("failed to write rego version for module '%s': %w", storagePath, err) + } + } + return nil +} + +func writeDataAndModules(ctx context.Context, store storage.Store, txn storage.Transaction, txnCtx *storage.Context, bundles map[string]*Bundle, legacy bool, runtimeRegoVersion ast.RegoVersion) error { + params := storage.WriteParams + params.Context = txnCtx + + for name, b := range bundles { + if len(b.Raw) == 0 { + // Write data from each new bundle into the store. Only write under the + // roots contained in their manifest. + if err := writeData(ctx, store, txn, *b.Manifest.Roots, b.Data); err != nil { + return err + } + + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(name, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, mf, path, runtimeRegoVersion); err != nil { + return err + } + } + } else { + params.BasePaths = *b.Manifest.Roots + + err := store.Truncate(ctx, txn, params, NewIterator(b.Raw)) + if err != nil { + return fmt.Errorf("store truncate failed for bundle '%s': %v", name, err) + } + + for _, f := range b.Raw { + if strings.HasSuffix(f.Path, RegoExt) { + p, err := getFileStoragePath(f.Path) + if err != nil { + return fmt.Errorf("failed get storage path for module '%s' in bundle '%s': %w", f.Path, name, err) + } + + if m := f.module; m != nil { + // 'f.module.Path' contains the module's path as it relates to the bundle root, and can be used for looking up the rego-version. + // 'f.Path' can differ, based on how the bundle reader was initialized. + if err := writeModuleRegoVersionToStore(ctx, store, txn, b, *m, p.String(), runtimeRegoVersion); err != nil { + return err + } + } + } + } + } + } + + return nil +} + +func writeData(ctx context.Context, store storage.Store, txn storage.Transaction, roots []string, data map[string]interface{}) error { + for _, root := range roots { + path, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("manifest root path invalid: %v", root) + } + if value, ok := lookup(path, data); ok { + if len(path) > 0 { + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + if err := store.Write(ctx, txn, storage.AddOp, path, value); err != nil { + return err + } + } + } + return nil +} + +func compileModules(compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool, authorizationDecisionRef ast.Ref) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := map[string]*ast.Module{} + + // preserve any modules already on the compiler + for name, module := range compiler.Modules { + modules[name] = module + } + + // preserve any modules passed in from the store + for name, module := range extraModules { + modules[name] = module + } + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + for name, module := range b.ParsedModules(bundleName) { + modules[name] = module + } + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + + if authorizationDecisionRef.Equal(ast.EmptyRef()) { + return nil + } + + return iCompiler.VerifyAuthorizationPolicySchema(compiler, authorizationDecisionRef) +} + +func writeModules(ctx context.Context, store storage.Store, txn storage.Transaction, compiler *ast.Compiler, m metrics.Metrics, bundles map[string]*Bundle, extraModules map[string]*ast.Module, legacy bool) error { + + m.Timer(metrics.RegoModuleCompile).Start() + defer m.Timer(metrics.RegoModuleCompile).Stop() + + modules := map[string]*ast.Module{} + + // preserve any modules already on the compiler + for name, module := range compiler.Modules { + modules[name] = module + } + + // preserve any modules passed in from the store + for name, module := range extraModules { + modules[name] = module + } + + // include all the new bundle modules + for bundleName, b := range bundles { + if legacy { + for _, mf := range b.Modules { + modules[mf.Path] = mf.Parsed + } + } else { + for name, module := range b.ParsedModules(bundleName) { + modules[name] = module + } + } + } + + if compiler.Compile(modules); compiler.Failed() { + return compiler.Errors + } + for bundleName, b := range bundles { + for _, mf := range b.Modules { + var path string + + // For backwards compatibility, in legacy mode, upsert policies to + // the unprefixed path. + if legacy { + path = mf.Path + } else { + path = modulePathWithPrefix(bundleName, mf.Path) + } + + if err := store.UpsertPolicy(ctx, txn, path, mf.Raw); err != nil { + return err + } + } + } + return nil +} + +func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { + if len(path) == 0 { + return data, true + } + for i := 0; i < len(path)-1; i++ { + value, ok := data[path[i]] + if !ok { + return nil, false + } + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, false + } + data = obj + } + value, ok := data[path[len(path)-1]] + return value, ok +} + +func hasRootsOverlap(ctx context.Context, store storage.Store, txn storage.Transaction, bundles map[string]*Bundle) error { + collisions := map[string][]string{} + allBundles, err := ReadBundleNamesFromStore(ctx, store, txn) + if suppressNotFound(err) != nil { + return err + } + + allRoots := map[string][]string{} + + // Build a map of roots for existing bundles already in the system + for _, name := range allBundles { + roots, err := ReadBundleRootsFromStore(ctx, store, txn, name) + if suppressNotFound(err) != nil { + return err + } + allRoots[name] = roots + } + + // Add in any bundles that are being activated, overwrite existing roots + // with new ones where bundles are in both groups. + for name, bundle := range bundles { + allRoots[name] = *bundle.Manifest.Roots + } + + // Now check for each new bundle if it conflicts with any of the others + for name, bundle := range bundles { + for otherBundle, otherRoots := range allRoots { + if name == otherBundle { + // Skip the current bundle being checked + continue + } + + // Compare the "new" roots with other existing (or a different bundles new roots) + for _, newRoot := range *bundle.Manifest.Roots { + for _, otherRoot := range otherRoots { + if RootPathsOverlap(newRoot, otherRoot) { + collisions[otherBundle] = append(collisions[otherBundle], newRoot) + } + } + } + } + } + + if len(collisions) > 0 { + var bundleNames []string + for name := range collisions { + bundleNames = append(bundleNames, name) + } + return fmt.Errorf("detected overlapping roots in bundle manifest with: %s", bundleNames) + } + return nil +} + +func applyPatches(ctx context.Context, store storage.Store, txn storage.Transaction, patches []PatchOperation) error { + for _, pat := range patches { + + // construct patch path + path, ok := patch.ParsePatchPathEscaped("/" + strings.Trim(pat.Path, "/")) + if !ok { + return fmt.Errorf("error parsing patch path") + } + + var op storage.PatchOp + switch pat.Op { + case "upsert": + op = storage.AddOp + + _, err := store.Read(ctx, txn, path[:len(path)-1]) + if err != nil { + if !storage.IsNotFound(err) { + return err + } + + if err := storage.MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + } + case "remove": + op = storage.RemoveOp + case "replace": + op = storage.ReplaceOp + default: + return fmt.Errorf("bad patch operation: %v", pat.Op) + } + + // apply the patch + if err := store.Write(ctx, txn, op, path, pat.Value); err != nil { + return err + } + } + + return nil +} + +// Helpers for the older single (unnamed) bundle style manifest storage. + +// LegacyManifestStoragePath is the older unnamed bundle path for manifests to be stored. +// Deprecated: Use ManifestStoragePath and named bundles instead. +var legacyManifestStoragePath = storage.MustParsePath("/system/bundle/manifest") +var legacyRevisionStoragePath = append(legacyManifestStoragePath, "revision") + +// LegacyWriteManifestToStore will write the bundle manifest to the older single (unnamed) bundle manifest location. +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyWriteManifestToStore(ctx context.Context, store storage.Store, txn storage.Transaction, manifest Manifest) error { + return write(ctx, store, txn, legacyManifestStoragePath, manifest) +} + +// LegacyEraseManifestFromStore will erase the bundle manifest from the older single (unnamed) bundle manifest location. +// Deprecated: Use WriteManifestToStore and named bundles instead. +func LegacyEraseManifestFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) error { + err := store.Write(ctx, txn, storage.RemoveOp, legacyManifestStoragePath, nil) + if err != nil { + return err + } + return nil +} + +// LegacyReadRevisionFromStore will read the bundle manifest revision from the older single (unnamed) bundle manifest location. +// Deprecated: Use ReadBundleRevisionFromStore and named bundles instead. +func LegacyReadRevisionFromStore(ctx context.Context, store storage.Store, txn storage.Transaction) (string, error) { + return readRevisionFromStore(ctx, store, txn, legacyRevisionStoragePath) +} + +// ActivateLegacy calls Activate for the bundles but will also write their manifest to the older unnamed store location. +// Deprecated: Use Activate with named bundles instead. +func ActivateLegacy(opts *ActivateOpts) error { + opts.legacy = true + return activateBundles(opts) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go new file mode 100644 index 000000000..2a4bb02c0 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/bundle/verify.go @@ -0,0 +1,231 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package bundle provide helpers that assist in the bundle signature verification process +package bundle + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/internal/jwx/jwa" + "github.com/open-policy-agent/opa/internal/jwx/jws" + "github.com/open-policy-agent/opa/internal/jwx/jws/verify" + "github.com/open-policy-agent/opa/v1/util" +) + +const defaultVerifierID = "_default" + +var verifiers map[string]Verifier + +// Verifier is the interface expected for implementations that verify bundle signatures. +type Verifier interface { + VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error) +} + +// VerifyBundleSignature will retrieve the Verifier implementation based +// on the Plugin specified in SignaturesConfig, and call its implementation +// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature +// using the given public keys or secret. If a signature is verified, it keeps +// track of the files specified in the JWT payload +func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + // default implementation does not return a nil for map, so don't + // do it here either + files := make(map[string]FileInfo) + var plugin string + // for backwards compatibility, check if there is no plugin specified, and use default + if sc.Plugin == "" { + plugin = defaultVerifierID + } else { + plugin = sc.Plugin + } + verifier, err := GetVerifier(plugin) + if err != nil { + return files, err + } + return verifier.VerifyBundleSignature(sc, bvc) +} + +// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking +// the JWT signature using a locally-accessible public key. +type DefaultVerifier struct{} + +// VerifyBundleSignature verifies the bundle signature using the given public keys or secret. +// If a signature is verified, it keeps track of the files specified in the JWT payload +func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) { + files := make(map[string]FileInfo) + + if len(sc.Signatures) == 0 { + return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)") + } + + if len(sc.Signatures) > 1 { + return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)") + } + + for _, token := range sc.Signatures { + payload, err := verifyJWTSignature(token, bvc) + if err != nil { + return files, err + } + + for _, file := range payload.Files { + files[file.Name] = file + } + } + return files, nil +} + +func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) { + // decode JWT to check if the header specifies the key to use and/or if claims have the scope. + + parts, err := jws.SplitCompact(token) + if err != nil { + return nil, err + } + + var decodedHeader []byte + if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil { + return nil, fmt.Errorf("failed to base64 decode JWT headers: %w", err) + } + + var hdr jws.StandardHeaders + if err := json.Unmarshal(decodedHeader, &hdr); err != nil { + return nil, fmt.Errorf("failed to parse JWT headers: %w", err) + } + + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return nil, err + } + + var ds DecodedSignature + if err := json.Unmarshal(payload, &ds); err != nil { + return nil, err + } + + // check for the id of the key to use for JWT signature verification + // first in the OPA config. If not found, then check the JWT kid. + keyID := bvc.KeyID + if keyID == "" { + keyID = hdr.KeyID + } + if keyID == "" { + // If header has no key id, check the deprecated key claim. + keyID = ds.KeyID + } + + if keyID == "" { + return nil, fmt.Errorf("verification key ID is empty") + } + + // now that we have the keyID, fetch the actual key + keyConfig, err := bvc.GetPublicKey(keyID) + if err != nil { + return nil, err + } + + // verify JWT signature + alg := jwa.SignatureAlgorithm(keyConfig.Algorithm) + key, err := verify.GetSigningKey(keyConfig.Key, alg) + if err != nil { + return nil, err + } + + _, err = jws.Verify([]byte(token), alg, key) + if err != nil { + return nil, err + } + + // verify the scope + scope := bvc.Scope + if scope == "" { + scope = keyConfig.Scope + } + + if ds.Scope != scope { + return nil, fmt.Errorf("scope mismatch") + } + return &ds, nil +} + +// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature +func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error { + var file FileInfo + var ok bool + + if file, ok = files[path]; !ok { + return fmt.Errorf("file %v not included in bundle signature", path) + } + + if file.Algorithm == "" { + return fmt.Errorf("no hashing algorithm provided for file %v", path) + } + + hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm)) + if err != nil { + return err + } + + // hash the file content + // For unstructured files, hash the byte stream of the file + // For structured files, read the byte stream and parse into a JSON structure; + // then recursively order the fields of all objects alphabetically and then apply + // the hash function to result to compute the hash. This ensures that the digital signature is + // independent of whitespace and other non-semantic JSON features. + var value interface{} + if IsStructuredDoc(path) { + err := util.Unmarshal(data.Bytes(), &value) + if err != nil { + return err + } + } else { + value = data.Bytes() + } + + bs, err := hash.HashFile(value) + if err != nil { + return err + } + + // compare file hash with same file in the JWT payloads + fb, err := hex.DecodeString(file.Hash) + if err != nil { + return err + } + + if !bytes.Equal(fb, bs) { + return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs) + } + + delete(files, path) + return nil +} + +// GetVerifier returns the Verifier registered under the given id +func GetVerifier(id string) (Verifier, error) { + verifier, ok := verifiers[id] + if !ok { + return nil, fmt.Errorf("no verifier exists under id %s", id) + } + return verifier, nil +} + +// RegisterVerifier registers a Verifier under the given id +func RegisterVerifier(id string, v Verifier) error { + if id == defaultVerifierID { + return fmt.Errorf("verifier id %s is reserved, use a different id", id) + } + verifiers[id] = v + return nil +} + +func init() { + verifiers = map[string]Verifier{ + defaultVerifierID: &DefaultVerifier{}, + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go new file mode 100644 index 000000000..5b0bb1ea5 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/capabilities/capabilities.go @@ -0,0 +1,18 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +//go:build go1.16 +// +build go1.16 + +package capabilities + +import ( + v0 "github.com/open-policy-agent/opa/capabilities" +) + +// FS contains the embedded capabilities/ directory of the built version, +// which has all the capabilities of previous versions: +// "v0.18.0.json" contains the capabilities JSON of version v0.18.0, etc + +var FS = v0.FS diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/config/config.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/config/config.go new file mode 100644 index 000000000..09adb556f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/config/config.go @@ -0,0 +1,259 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package config implements OPA configuration file parsing and validation. +package config + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + + "github.com/open-policy-agent/opa/internal/ref" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" + "github.com/open-policy-agent/opa/v1/version" +) + +// Config represents the configuration file that OPA can be started with. +type Config struct { + Services json.RawMessage `json:"services,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Discovery json.RawMessage `json:"discovery,omitempty"` + Bundle json.RawMessage `json:"bundle,omitempty"` // Deprecated: Use `bundles` instead + Bundles json.RawMessage `json:"bundles,omitempty"` + DecisionLogs json.RawMessage `json:"decision_logs,omitempty"` + Status json.RawMessage `json:"status,omitempty"` + Plugins map[string]json.RawMessage `json:"plugins,omitempty"` + Keys json.RawMessage `json:"keys,omitempty"` + DefaultDecision *string `json:"default_decision,omitempty"` + DefaultAuthorizationDecision *string `json:"default_authorization_decision,omitempty"` + Caching json.RawMessage `json:"caching,omitempty"` + NDBuiltinCache bool `json:"nd_builtin_cache,omitempty"` + PersistenceDirectory *string `json:"persistence_directory,omitempty"` + DistributedTracing json.RawMessage `json:"distributed_tracing,omitempty"` + Server *struct { + Encoding json.RawMessage `json:"encoding,omitempty"` + Decoding json.RawMessage `json:"decoding,omitempty"` + Metrics json.RawMessage `json:"metrics,omitempty"` + } `json:"server,omitempty"` + Storage *struct { + Disk json.RawMessage `json:"disk,omitempty"` + } `json:"storage,omitempty"` + Extra map[string]json.RawMessage `json:"-"` +} + +// ParseConfig returns a valid Config object with defaults injected. The id +// and version parameters will be set in the labels map. +func ParseConfig(raw []byte, id string) (*Config, error) { + // NOTE(sr): based on https://stackoverflow.com/a/33499066/993018 + var result Config + objValue := reflect.ValueOf(&result).Elem() + knownFields := map[string]reflect.Value{} + for i := 0; i != objValue.NumField(); i++ { + jsonName := strings.Split(objValue.Type().Field(i).Tag.Get("json"), ",")[0] + knownFields[jsonName] = objValue.Field(i) + } + + if err := util.Unmarshal(raw, &result.Extra); err != nil { + return nil, err + } + + for key, chunk := range result.Extra { + if field, found := knownFields[key]; found { + if err := util.Unmarshal(chunk, field.Addr().Interface()); err != nil { + return nil, err + } + delete(result.Extra, key) + } + } + if len(result.Extra) == 0 { + result.Extra = nil + } + return &result, result.validateAndInjectDefaults(id) +} + +// PluginNames returns a sorted list of names of enabled plugins. +func (c Config) PluginNames() (result []string) { + if c.Bundle != nil || c.Bundles != nil { + result = append(result, "bundles") + } + if c.Status != nil { + result = append(result, "status") + } + if c.DecisionLogs != nil { + result = append(result, "decision_logs") + } + for name := range c.Plugins { + result = append(result, name) + } + sort.Strings(result) + return result +} + +// PluginsEnabled returns true if one or more plugin features are enabled. +// +// Deprecated. Use PluginNames instead. +func (c Config) PluginsEnabled() bool { + return c.Bundle != nil || c.Bundles != nil || c.DecisionLogs != nil || c.Status != nil || len(c.Plugins) > 0 +} + +// DefaultDecisionRef returns the default decision as a reference. +func (c Config) DefaultDecisionRef() ast.Ref { + r, _ := ref.ParseDataPath(*c.DefaultDecision) + return r +} + +// DefaultAuthorizationDecisionRef returns the default authorization decision +// as a reference. +func (c Config) DefaultAuthorizationDecisionRef() ast.Ref { + r, _ := ref.ParseDataPath(*c.DefaultAuthorizationDecision) + return r +} + +// NDBuiltinCacheEnabled returns if the ND builtins cache should be used. +func (c Config) NDBuiltinCacheEnabled() bool { + return c.NDBuiltinCache +} + +func (c *Config) validateAndInjectDefaults(id string) error { + + if c.DefaultDecision == nil { + s := defaultDecisionPath + c.DefaultDecision = &s + } + + _, err := ref.ParseDataPath(*c.DefaultDecision) + if err != nil { + return err + } + + if c.DefaultAuthorizationDecision == nil { + s := defaultAuthorizationDecisionPath + c.DefaultAuthorizationDecision = &s + } + + _, err = ref.ParseDataPath(*c.DefaultAuthorizationDecision) + if err != nil { + return err + } + + if c.Labels == nil { + c.Labels = map[string]string{} + } + + c.Labels["id"] = id + c.Labels["version"] = version.Version + + return nil +} + +// GetPersistenceDirectory returns the configured persistence directory, or $PWD/.opa if none is configured +func (c Config) GetPersistenceDirectory() (string, error) { + if c.PersistenceDirectory == nil { + pwd, err := os.Getwd() + if err != nil { + return "", err + } + return filepath.Join(pwd, ".opa"), nil + } + return *c.PersistenceDirectory, nil +} + +// ActiveConfig returns OPA's active configuration +// with the credentials and crypto keys removed +func (c *Config) ActiveConfig() (interface{}, error) { + bs, err := json.Marshal(c) + if err != nil { + return nil, err + } + + var result map[string]interface{} + if err := util.UnmarshalJSON(bs, &result); err != nil { + return nil, err + } + for k, e := range c.Extra { + var v any + if err := util.UnmarshalJSON(e, &v); err != nil { + return nil, err + } + result[k] = v + } + + if err := removeServiceCredentials(result["services"]); err != nil { + return nil, err + } + + if err := removeCryptoKeys(result["keys"]); err != nil { + return nil, err + } + + return result, nil +} + +func removeServiceCredentials(x interface{}) error { + switch x := x.(type) { + case nil: + return nil + case []interface{}: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + + case map[string]interface{}: + for _, v := range x { + err := removeKey(v, "credentials") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal service config type: %T", x) + } + + return nil +} + +func removeCryptoKeys(x interface{}) error { + switch x := x.(type) { + case nil: + return nil + case map[string]interface{}: + for _, v := range x { + err := removeKey(v, "key", "private_key") + if err != nil { + return err + } + } + default: + return fmt.Errorf("illegal keys config type: %T", x) + } + + return nil +} + +func removeKey(x interface{}, keys ...string) error { + val, ok := x.(map[string]interface{}) + if !ok { + return fmt.Errorf("type assertion error") + } + + for _, key := range keys { + delete(val, key) + } + + return nil +} + +const ( + defaultDecisionPath = "/system/main" + defaultAuthorizationDecisionPath = "/system/authz/allow" +) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/format/format.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/format/format.go new file mode 100644 index 000000000..e86964d1b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/format/format.go @@ -0,0 +1,1673 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package format implements formatting of Rego source files. +package format + +import ( + "bytes" + "fmt" + "regexp" + "sort" + "strings" + "unicode" + + "github.com/open-policy-agent/opa/internal/future" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/types" +) + +// Opts lets you control the code formatting via `AstWithOpts()`. +type Opts struct { + // IgnoreLocations instructs the formatter not to use the AST nodes' locations + // into account when laying out the code: notably, when the input is the result + // of partial evaluation, arguments maybe have been shuffled around, but still + // carry along their original source locations. + IgnoreLocations bool + + // RegoVersion is the version of Rego to format code for. + RegoVersion ast.RegoVersion + + // ParserOptions is the parser options used when parsing the module to be formatted. + ParserOptions *ast.ParserOptions + + // DropV0Imports instructs the formatter to drop all v0 imports from the module; i.e. 'rego.v1' and 'future.keywords' imports. + // Imports are only removed if [Opts.RegoVersion] makes them redundant. + DropV0Imports bool +} + +func (o Opts) effectiveRegoVersion() ast.RegoVersion { + if o.RegoVersion == ast.RegoUndefined { + return ast.DefaultRegoVersion + } + return o.RegoVersion +} + +// defaultLocationFile is the file name used in `Ast()` for terms +// without a location, as could happen when pretty-printing the +// results of partial eval. +const defaultLocationFile = "__format_default__" + +// Source formats a Rego source file. The bytes provided must describe a complete +// Rego module. If they don't, Source will return an error resulting from the attempt +// to parse the bytes. +func Source(filename string, src []byte) ([]byte, error) { + return SourceWithOpts(filename, src, Opts{}) +} + +func SourceWithOpts(filename string, src []byte, opts Opts) ([]byte, error) { + regoVersion := opts.effectiveRegoVersion() + + var parserOpts ast.ParserOptions + if opts.ParserOptions != nil { + parserOpts = *opts.ParserOptions + } else { + if regoVersion == ast.RegoV1 { + // If the rego version is V1, we need to parse it as such, to allow for future keywords not being imported. + // Otherwise, we'll default to the default rego-version. + parserOpts.RegoVersion = ast.RegoV1 + } + } + + if parserOpts.RegoVersion == ast.RegoUndefined { + parserOpts.RegoVersion = ast.DefaultRegoVersion + } + + module, err := ast.ParseModuleWithOpts(filename, string(src), parserOpts) + if err != nil { + return nil, err + } + + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { + checkOpts := ast.NewRegoCheckOptions() + // The module is parsed as v0, so we need to disable checks that will be automatically amended by the AstWithOpts call anyways. + checkOpts.RequireIfKeyword = false + checkOpts.RequireContainsKeyword = false + checkOpts.RequireRuleBodyOrValue = false + errors := ast.CheckRegoV1WithOptions(module, checkOpts) + if len(errors) > 0 { + return nil, errors + } + } + + formatted, err := AstWithOpts(module, opts) + if err != nil { + return nil, fmt.Errorf("%s: %v", filename, err) + } + + return formatted, nil +} + +// MustAst is a helper function to format a Rego AST element. If any errors +// occurs this function will panic. This is mostly used for test +func MustAst(x interface{}) []byte { + bs, err := Ast(x) + if err != nil { + panic(err) + } + return bs +} + +// MustAstWithOpts is a helper function to format a Rego AST element. If any errors +// occurs this function will panic. This is mostly used for test +func MustAstWithOpts(x interface{}, opts Opts) []byte { + bs, err := AstWithOpts(x, opts) + if err != nil { + panic(err) + } + return bs +} + +// Ast formats a Rego AST element. If the passed value is not a valid AST +// element, Ast returns nil and an error. If AST nodes are missing locations +// an arbitrary location will be used. +func Ast(x interface{}) ([]byte, error) { + return AstWithOpts(x, Opts{}) +} + +type fmtOpts struct { + // When the future keyword "contains" is imported, all the pretty-printed + // modules will use that format for partial sets. + // NOTE(sr): For ref-head rules, this will be the default behaviour, since + // we need "contains" to disambiguate complete rules from partial sets. + contains bool + + // Same logic applies as for "contains": if `future.keywords.if` (or all + // future keywords) is imported, we'll render rules that can use `if` with + // `if`. + ifs bool + + // We check all rule ref heads to see if any of them _requires_ support + // for ref heads -- if they do, we'll print all of them in a different way + // than if they don't. + refHeads bool + + regoV1 bool + regoV1Imported bool + futureKeywords []string +} + +func (o fmtOpts) keywords() []string { + if o.regoV1 { + return ast.KeywordsV1[:] + } + kws := ast.KeywordsV0[:] + return append(kws, o.futureKeywords...) +} + +func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { + // The node has to be deep copied because it may be mutated below. Alternatively, + // we could avoid the copy by checking if mutation will occur first. For now, + // since format is not latency sensitive, just deep copy in all cases. + x = ast.Copy(x) + + wildcards := map[ast.Var]*ast.Term{} + + // NOTE(sr): When the formatter encounters a call to internal.member_2 + // or internal.member_3, it will sugarize them into usage of the `in` + // operator. It has to ensure that the proper future keyword import is + // present. + extraFutureKeywordImports := map[string]struct{}{} + + o := fmtOpts{} + + regoVersion := opts.effectiveRegoVersion() + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 { + o.regoV1 = true + o.ifs = true + o.contains = true + } + + // Preprocess the AST. Set any required defaults and calculate + // values required for printing the formatted output. + ast.WalkNodes(x, func(x ast.Node) bool { + switch n := x.(type) { + case ast.Body: + if len(n) == 0 { + return false + } + case *ast.Term: + unmangleWildcardVar(wildcards, n) + + case *ast.Expr: + switch { + case n.IsCall() && ast.Member.Ref().Equal(n.Operator()) || ast.MemberWithKey.Ref().Equal(n.Operator()): + extraFutureKeywordImports["in"] = struct{}{} + case n.IsEvery(): + extraFutureKeywordImports["every"] = struct{}{} + } + + case *ast.Import: + if kw, ok := future.WhichFutureKeyword(n); ok { + o.futureKeywords = append(o.futureKeywords, kw) + } + + switch { + case isRegoV1Compatible(n): + o.regoV1Imported = true + o.contains = true + o.ifs = true + case future.IsAllFutureKeywords(n): + o.contains = true + o.ifs = true + case future.IsFutureKeyword(n, "contains"): + o.contains = true + case future.IsFutureKeyword(n, "if"): + o.ifs = true + } + + case *ast.Rule: + if len(n.Head.Ref()) > 2 { + o.refHeads = true + } + if len(n.Head.Ref()) == 2 && n.Head.Key != nil && n.Head.Value == nil { // p.q contains "x" + o.refHeads = true + } + } + + if opts.IgnoreLocations || x.Loc() == nil { + x.SetLoc(defaultLocation(x)) + } + return false + }) + + w := &writer{ + indent: "\t", + errs: make([]*ast.Error, 0), + fmtOpts: o, + } + + switch x := x.(type) { + case *ast.Module: + if regoVersion == ast.RegoV1 && opts.DropV0Imports { + x.Imports = filterRegoV1Import(x.Imports) + } else if regoVersion == ast.RegoV0CompatV1 { + x.Imports = ensureRegoV1Import(x.Imports) + } + + regoV1Imported := moduleIsRegoV1Compatible(x) + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || regoV1Imported { + if !opts.DropV0Imports && !regoV1Imported { + for _, kw := range o.futureKeywords { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } else { + x.Imports = future.FilterFutureImports(x.Imports) + } + } else { + for kw := range extraFutureKeywordImports { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } + w.writeModule(x) + case *ast.Package: + w.writePackage(x, nil) + case *ast.Import: + w.writeImports([]*ast.Import{x}, nil) + case *ast.Rule: + w.writeRule(x, false /* isElse */, nil) + case *ast.Head: + w.writeHead(x, + false, // isDefault + false, // isExpandedConst + nil) + case ast.Body: + w.writeBody(x, nil) + case *ast.Expr: + w.writeExpr(x, nil) + case *ast.With: + w.writeWith(x, nil, false) + case *ast.Term: + w.writeTerm(x, nil) + case ast.Value: + w.writeTerm(&ast.Term{Value: x, Location: &ast.Location{}}, nil) + case *ast.Comment: + w.writeComments([]*ast.Comment{x}) + default: + return nil, fmt.Errorf("not an ast element: %v", x) + } + + if len(w.errs) > 0 { + return nil, w.errs + } + return squashTrailingNewlines(w.buf.Bytes()), nil +} + +func unmangleWildcardVar(wildcards map[ast.Var]*ast.Term, n *ast.Term) { + + v, ok := n.Value.(ast.Var) + if !ok || !v.IsWildcard() { + return + } + + first, ok := wildcards[v] + if !ok { + wildcards[v] = n + return + } + + w := v[len(ast.WildcardPrefix):] + + // Prepend an underscore to ensure the variable will parse. + if len(w) == 0 || w[0] != '_' { + w = "_" + w + } + + if first != nil { + first.Value = w + wildcards[v] = nil + } + + n.Value = w +} + +func squashTrailingNewlines(bs []byte) []byte { + if bytes.HasSuffix(bs, []byte("\n")) { + return append(bytes.TrimRight(bs, "\n"), '\n') + } + return bs +} + +func defaultLocation(x ast.Node) *ast.Location { + return ast.NewLocation([]byte(x.String()), defaultLocationFile, 1, 1) +} + +type writer struct { + buf bytes.Buffer + + indent string + level int + inline bool + beforeEnd *ast.Comment + delay bool + errs ast.Errors + fmtOpts fmtOpts +} + +func (w *writer) writeModule(module *ast.Module) { + var pkg *ast.Package + var others []interface{} + var comments []*ast.Comment + visitor := ast.NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case *ast.Comment: + comments = append(comments, x) + return true + case *ast.Import, *ast.Rule: + others = append(others, x) + return true + case *ast.Package: + pkg = x + return true + default: + return false + } + }) + visitor.Walk(module) + + sort.Slice(comments, func(i, j int) bool { + return locLess(comments[i], comments[j]) + }) + + sort.Slice(others, func(i, j int) bool { + return locLess(others[i], others[j]) + }) + + comments = trimTrailingWhitespaceInComments(comments) + + comments = w.writePackage(pkg, comments) + var imports []*ast.Import + var rules []*ast.Rule + for len(others) > 0 { + imports, others = gatherImports(others) + comments = w.writeImports(imports, comments) + rules, others = gatherRules(others) + comments = w.writeRules(rules, comments) + } + + for i, c := range comments { + w.writeLine(c.String()) + if i == len(comments)-1 { + w.write("\n") + } + } +} + +func trimTrailingWhitespaceInComments(comments []*ast.Comment) []*ast.Comment { + for _, c := range comments { + c.Text = bytes.TrimRightFunc(c.Text, unicode.IsSpace) + } + + return comments +} + +func (w *writer) writePackage(pkg *ast.Package, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, pkg.Location) + + w.startLine() + + // Omit head as all packages have the DefaultRootDocument prepended at parse time. + path := make(ast.Ref, len(pkg.Path)-1) + path[0] = ast.VarTerm(string(pkg.Path[1].Value.(ast.String))) + copy(path[1:], pkg.Path[2:]) + + w.write("package ") + w.writeRef(path, nil) + + w.blankLine() + + return comments +} + +func (w *writer) writeComments(comments []*ast.Comment) { + for i := 0; i < len(comments); i++ { + if i > 0 && locCmp(comments[i], comments[i-1]) > 1 { + w.blankLine() + } + w.writeLine(comments[i].String()) + } +} + +func (w *writer) writeRules(rules []*ast.Rule, comments []*ast.Comment) []*ast.Comment { + for _, rule := range rules { + comments = w.insertComments(comments, rule.Location) + comments = w.writeRule(rule, false, comments) + w.blankLine() + } + return comments +} + +func (w *writer) writeRule(rule *ast.Rule, isElse bool, comments []*ast.Comment) []*ast.Comment { + if rule == nil { + return comments + } + + if !isElse { + w.startLine() + } + + if rule.Default { + w.write("default ") + } + + // OPA transforms lone bodies like `foo = {"a": "b"}` into rules of the form + // `foo = {"a": "b"} { true }` in the AST. We want to preserve that notation + // in the formatted code instead of expanding the bodies into rules, so we + // pretend that the rule has no body in this case. + isExpandedConst := rule.Body.Equal(ast.NewBody(ast.NewExpr(ast.BooleanTerm(true)))) && rule.Else == nil + + comments = w.writeHead(rule.Head, rule.Default, isExpandedConst, comments) + + // this excludes partial sets UNLESS `contains` is used + partialSetException := w.fmtOpts.contains || rule.Head.Value != nil + + if len(rule.Body) == 0 || isExpandedConst { + w.endLine() + return comments + } + + if (w.fmtOpts.regoV1 || w.fmtOpts.ifs) && partialSetException { + w.write(" if") + if len(rule.Body) == 1 { + if rule.Body[0].Location.Row == rule.Head.Location.Row { + w.write(" ") + comments = w.writeExpr(rule.Body[0], comments) + w.endLine() + if rule.Else != nil { + comments = w.writeElse(rule, comments) + } + return comments + } + } + } + w.write(" {") + w.endLine() + w.up() + + comments = w.writeBody(rule.Body, comments) + + var closeLoc *ast.Location + + if len(rule.Head.Args) > 0 { + closeLoc = closingLoc('(', ')', '{', '}', rule.Location) + } else if rule.Head.Key != nil { + closeLoc = closingLoc('[', ']', '{', '}', rule.Location) + } else { + closeLoc = closingLoc(0, 0, '{', '}', rule.Location) + } + + comments = w.insertComments(comments, closeLoc) + + w.down() + w.startLine() + w.write("}") + if rule.Else != nil { + comments = w.writeElse(rule, comments) + } + return comments +} + +func (w *writer) writeElse(rule *ast.Rule, comments []*ast.Comment) []*ast.Comment { + // If there was nothing else on the line before the "else" starts + // then preserve this style of else block, otherwise it will be + // started as an "inline" else eg: + // + // p { + // ... + // } + // + // else { + // ... + // } + // + // versus + // + // p { + // ... + // } else { + // ... + // } + // + // Note: This doesn't use the `close` as it currently isn't accurate for all + // types of values. Checking the actual line text is the most consistent approach. + wasInline := false + ruleLines := bytes.Split(rule.Location.Text, []byte("\n")) + relativeElseRow := rule.Else.Location.Row - rule.Location.Row + if relativeElseRow > 0 && relativeElseRow < len(ruleLines) { + elseLine := ruleLines[relativeElseRow] + if !bytes.HasPrefix(bytes.TrimSpace(elseLine), []byte("else")) { + wasInline = true + } + } + + // If there are any comments between the closing brace of the previous rule and the start + // of the else block we will always insert a new blank line between them. + hasCommentAbove := len(comments) > 0 && comments[0].Location.Row-rule.Else.Head.Location.Row < 0 || w.beforeEnd != nil + + if !hasCommentAbove && wasInline { + w.write(" ") + } else { + w.blankLine() + w.startLine() + } + + rule.Else.Head.Name = "else" // NOTE(sr): whaaat + + elseHeadReference := ast.VarTerm("else") // construct a reference for the term + elseHeadReference.Location = rule.Else.Head.Location // and set the location to match the rule location + + rule.Else.Head.Reference = ast.Ref{elseHeadReference} + rule.Else.Head.Args = nil + comments = w.insertComments(comments, rule.Else.Head.Location) + + if hasCommentAbove && !wasInline { + // The comments would have ended the line, be sure to start one again + // before writing the rest of the "else" rule. + w.startLine() + } + + // For backwards compatibility adjust the rule head value location + // TODO: Refactor the logic for inserting comments, or special + // case comments in a rule head value so this can be removed + if rule.Else.Head.Value != nil { + rule.Else.Head.Value.Location = rule.Else.Head.Location + } + + return w.writeRule(rule.Else, true, comments) +} + +func (w *writer) writeHead(head *ast.Head, isDefault, isExpandedConst bool, comments []*ast.Comment) []*ast.Comment { + ref := head.Ref() + if head.Key != nil && head.Value == nil && !head.HasDynamicRef() { + ref = ref.GroundPrefix() + } + if w.fmtOpts.refHeads || len(ref) == 1 { + w.writeRef(ref, comments) + } else { + w.write(ref[0].String()) + w.write("[") + w.write(ref[1].String()) + w.write("]") + } + + if len(head.Args) > 0 { + w.write("(") + var args []interface{} + for _, arg := range head.Args { + args = append(args, arg) + } + comments = w.writeIterable(args, head.Location, closingLoc(0, 0, '(', ')', head.Location), comments, w.listWriter()) + w.write(")") + } + if head.Key != nil { + if w.fmtOpts.contains && head.Value == nil { + w.write(" contains ") + comments = w.writeTerm(head.Key, comments) + } else if head.Value == nil { // no `if` for p[x] notation + w.write("[") + comments = w.writeTerm(head.Key, comments) + w.write("]") + } + } + + if head.Value != nil && + (head.Key != nil || ast.Compare(head.Value, ast.BooleanTerm(true)) != 0 || isExpandedConst || isDefault) { + + // in rego v1, explicitly print value for ref-head constants that aren't partial set assignments, e.g.: + // * a -> parser error, won't reach here + // * a.b -> a contains "b" + // * a.b.c -> a.b.c := true + // * a.b.c.d -> a.b.c.d := true + isRegoV1RefConst := w.fmtOpts.regoV1 && isExpandedConst && head.Key == nil && len(head.Args) == 0 + + if head.Location == head.Value.Location && + head.Name != "else" && + ast.Compare(head.Value, ast.BooleanTerm(true)) == 0 && + !isRegoV1RefConst { + // If the value location is the same as the location of the head, + // we know that the value is generated, i.e. f(1) + // Don't print the value (` = true`) as it is implied. + return comments + } + + if head.Assign || w.fmtOpts.regoV1 { + // preserve assignment operator, and enforce it if formatting for Rego v1 + w.write(" := ") + } else { + w.write(" = ") + } + comments = w.writeTerm(head.Value, comments) + } + return comments +} + +func (w *writer) insertComments(comments []*ast.Comment, loc *ast.Location) []*ast.Comment { + before, at, comments := partitionComments(comments, loc) + w.writeComments(before) + if len(before) > 0 && loc.Row-before[len(before)-1].Location.Row > 1 { + w.blankLine() + } + + w.beforeLineEnd(at) + return comments +} + +func (w *writer) writeBody(body ast.Body, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, body.Loc()) + for i, expr := range body { + // Insert a blank line in before the expression if it was not right + // after the previous expression. + if i > 0 { + lastRow := body[i-1].Location.Row + for _, c := range body[i-1].Location.Text { + if c == '\n' { + lastRow++ + } + } + if expr.Location.Row > lastRow+1 { + w.blankLine() + } + } + w.startLine() + + comments = w.writeExpr(expr, comments) + w.endLine() + } + return comments +} + +func (w *writer) writeExpr(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, expr.Location) + if !w.inline { + w.startLine() + } + + if expr.Negated { + w.write("not ") + } + + switch t := expr.Terms.(type) { + case *ast.SomeDecl: + comments = w.writeSomeDecl(t, comments) + case *ast.Every: + comments = w.writeEvery(t, comments) + case []*ast.Term: + comments = w.writeFunctionCall(expr, comments) + case *ast.Term: + comments = w.writeTerm(t, comments) + } + + var indented bool + for i, with := range expr.With { + if i == 0 || with.Location.Row == expr.With[i-1].Location.Row { // we're on the same line + comments = w.writeWith(with, comments, false) + } else { // we're on a new line + if !indented { + indented = true + + w.up() + defer w.down() + } + w.endLine() + w.startLine() + comments = w.writeWith(with, comments, true) + } + } + + return comments +} + +func (w *writer) writeSomeDecl(decl *ast.SomeDecl, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, decl.Location) + w.write("some ") + + row := decl.Location.Row + + for i, term := range decl.Symbols { + switch val := term.Value.(type) { + case ast.Var: + if term.Location.Row > row { + w.endLine() + w.startLine() + w.write(w.indent) + row = term.Location.Row + } else if i > 0 { + w.write(" ") + } + + comments = w.writeTerm(term, comments) + + if i < len(decl.Symbols)-1 { + w.write(",") + } + case ast.Call: + comments = w.writeInOperator(false, val[1:], comments, decl.Location, ast.BuiltinMap[val[0].String()].Decl) + } + } + + return comments +} + +func (w *writer) writeEvery(every *ast.Every, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, every.Location) + w.write("every ") + if every.Key != nil { + comments = w.writeTerm(every.Key, comments) + w.write(", ") + } + comments = w.writeTerm(every.Value, comments) + w.write(" in ") + comments = w.writeTerm(every.Domain, comments) + w.write(" {") + comments = w.writeComprehensionBody('{', '}', every.Body, every.Loc(), every.Loc(), comments) + + if len(every.Body) == 1 && + every.Body[0].Location.Row == every.Location.Row { + w.write(" ") + } + w.write("}") + return comments +} + +func (w *writer) writeFunctionCall(expr *ast.Expr, comments []*ast.Comment) []*ast.Comment { + + terms := expr.Terms.([]*ast.Term) + operator := terms[0].Value.String() + + switch operator { + case ast.Member.Name, ast.MemberWithKey.Name: + return w.writeInOperator(false, terms[1:], comments, terms[0].Location, ast.BuiltinMap[terms[0].String()].Decl) + } + + bi, ok := ast.BuiltinMap[operator] + if !ok || bi.Infix == "" { + return w.writeFunctionCallPlain(terms, comments) + } + + numDeclArgs := bi.Decl.Arity() + numCallArgs := len(terms) - 1 + + switch numCallArgs { + case numDeclArgs: // Print infix where result is unassigned (e.g., x != y) + comments = w.writeTerm(terms[1], comments) + w.write(" " + bi.Infix + " ") + return w.writeTerm(terms[2], comments) + + case numDeclArgs + 1: // Print infix where result is assigned (e.g., z = x + y) + comments = w.writeTerm(terms[3], comments) + w.write(" " + ast.Equality.Infix + " ") + comments = w.writeTerm(terms[1], comments) + w.write(" " + bi.Infix + " ") + comments = w.writeTerm(terms[2], comments) + return comments + } + // NOTE(Trolloldem): in this point we are operating with a built-in function with the + // wrong arity even when the assignment notation is used + w.errs = append(w.errs, ArityFormatMismatchError(terms[1:], terms[0].String(), terms[0].Location, bi.Decl)) + return w.writeFunctionCallPlain(terms, comments) +} + +func (w *writer) writeFunctionCallPlain(terms []*ast.Term, comments []*ast.Comment) []*ast.Comment { + w.write(terms[0].String() + "(") + defer w.write(")") + args := make([]interface{}, len(terms)-1) + for i, t := range terms[1:] { + args[i] = t + } + loc := terms[0].Location + return w.writeIterable(args, loc, closingLoc(0, 0, '(', ')', loc), comments, w.listWriter()) +} + +func (w *writer) writeWith(with *ast.With, comments []*ast.Comment, indented bool) []*ast.Comment { + comments = w.insertComments(comments, with.Location) + if !indented { + w.write(" ") + } + w.write("with ") + comments = w.writeTerm(with.Target, comments) + w.write(" as ") + return w.writeTerm(with.Value, comments) +} + +func (w *writer) writeTerm(term *ast.Term, comments []*ast.Comment) []*ast.Comment { + return w.writeTermParens(false, term, comments) +} + +func (w *writer) writeTermParens(parens bool, term *ast.Term, comments []*ast.Comment) []*ast.Comment { + comments = w.insertComments(comments, term.Location) + if !w.inline { + w.startLine() + } + + switch x := term.Value.(type) { + case ast.Ref: + comments = w.writeRef(x, comments) + case ast.Object: + comments = w.writeObject(x, term.Location, comments) + case *ast.Array: + comments = w.writeArray(x, term.Location, comments) + case ast.Set: + comments = w.writeSet(x, term.Location, comments) + case *ast.ArrayComprehension: + comments = w.writeArrayComprehension(x, term.Location, comments) + case *ast.ObjectComprehension: + comments = w.writeObjectComprehension(x, term.Location, comments) + case *ast.SetComprehension: + comments = w.writeSetComprehension(x, term.Location, comments) + case ast.String: + if term.Location.Text[0] == '`' { + // To preserve raw strings, we need to output the original text, + // not what x.String() would give us. + w.write(string(term.Location.Text)) + } else { + w.write(x.String()) + } + case ast.Var: + w.write(w.formatVar(x)) + case ast.Call: + comments = w.writeCall(parens, x, term.Location, comments) + case fmt.Stringer: + w.write(x.String()) + } + + if !w.inline { + w.startLine() + } + return comments +} + +func (w *writer) writeRef(x ast.Ref, comments []*ast.Comment) []*ast.Comment { + if len(x) > 0 { + parens := false + _, ok := x[0].Value.(ast.Call) + if ok { + parens = x[0].Location.Text[0] == 40 // Starts with "(" + } + comments = w.writeTermParens(parens, x[0], comments) + path := x[1:] + for _, t := range path { + switch p := t.Value.(type) { + case ast.String: + w.writeRefStringPath(p) + case ast.Var: + w.writeBracketed(w.formatVar(p)) + default: + w.write("[") + comments = w.writeTerm(t, comments) + w.write("]") + } + } + } + + return comments +} + +func (w *writer) writeBracketed(str string) { + w.write("[" + str + "]") +} + +var varRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") + +func (w *writer) writeRefStringPath(s ast.String) { + str := string(s) + if varRegexp.MatchString(str) && !ast.IsInKeywords(str, w.fmtOpts.keywords()) { + w.write("." + str) + } else { + w.writeBracketed(s.String()) + } +} + +func (w *writer) formatVar(v ast.Var) string { + if v.IsWildcard() { + return ast.Wildcard.String() + } + return v.String() +} + +func (w *writer) writeCall(parens bool, x ast.Call, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + bi, ok := ast.BuiltinMap[x[0].String()] + if !ok || bi.Infix == "" { + return w.writeFunctionCallPlain(x, comments) + } + + if bi.Infix == "in" { + // NOTE(sr): `in` requires special handling, mirroring what happens in the parser, + // since there can be one or two lhs arguments. + return w.writeInOperator(true, x[1:], comments, loc, bi.Decl) + } + + // TODO(tsandall): improve to consider precedence? + if parens { + w.write("(") + } + + // NOTE(Trolloldem): writeCall is only invoked when the function call is a term + // of another function. The only valid arity is the one of the + // built-in function + if bi.Decl.Arity() != len(x)-1 { + w.errs = append(w.errs, ArityFormatMismatchError(x[1:], x[0].String(), loc, bi.Decl)) + return comments + } + + comments = w.writeTermParens(true, x[1], comments) + w.write(" " + bi.Infix + " ") + comments = w.writeTermParens(true, x[2], comments) + if parens { + w.write(")") + } + + return comments +} + +func (w *writer) writeInOperator(parens bool, operands []*ast.Term, comments []*ast.Comment, loc *ast.Location, f *types.Function) []*ast.Comment { + + if len(operands) != f.Arity() { + // The number of operands does not math the arity of the `in` operator + operator := ast.Member.Name + if f.Arity() == 3 { + operator = ast.MemberWithKey.Name + } + w.errs = append(w.errs, ArityFormatMismatchError(operands, operator, loc, f)) + return comments + } + kw := "in" + switch len(operands) { + case 2: + comments = w.writeTermParens(true, operands[0], comments) + w.write(" ") + w.write(kw) + w.write(" ") + comments = w.writeTermParens(true, operands[1], comments) + case 3: + if parens { + w.write("(") + defer w.write(")") + } + comments = w.writeTermParens(true, operands[0], comments) + w.write(", ") + comments = w.writeTermParens(true, operands[1], comments) + w.write(" ") + w.write(kw) + w.write(" ") + comments = w.writeTermParens(true, operands[2], comments) + } + return comments +} + +func (w *writer) writeObject(obj ast.Object, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + w.write("{") + defer w.write("}") + + var s []interface{} + obj.Foreach(func(k, v *ast.Term) { + s = append(s, ast.Item(k, v)) + }) + return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.objectWriter()) +} + +func (w *writer) writeArray(arr *ast.Array, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + w.write("[") + defer w.write("]") + + var s []interface{} + arr.Foreach(func(t *ast.Term) { + s = append(s, t) + }) + return w.writeIterable(s, loc, closingLoc(0, 0, '[', ']', loc), comments, w.listWriter()) +} + +func (w *writer) writeSet(set ast.Set, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + + if set.Len() == 0 { + w.write("set()") + return w.insertComments(comments, closingLoc(0, 0, '(', ')', loc)) + } + + w.write("{") + defer w.write("}") + + var s []interface{} + set.Foreach(func(t *ast.Term) { + s = append(s, t) + }) + return w.writeIterable(s, loc, closingLoc(0, 0, '{', '}', loc), comments, w.listWriter()) +} + +func (w *writer) writeArrayComprehension(arr *ast.ArrayComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + w.write("[") + defer w.write("]") + + return w.writeComprehension('[', ']', arr.Term, arr.Body, loc, comments) +} + +func (w *writer) writeSetComprehension(set *ast.SetComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + w.write("{") + defer w.write("}") + + return w.writeComprehension('{', '}', set.Term, set.Body, loc, comments) +} + +func (w *writer) writeObjectComprehension(object *ast.ObjectComprehension, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + w.write("{") + defer w.write("}") + + object.Value.Location = object.Key.Location // Ensure the value is not written on the next line. + if object.Key.Location.Row-loc.Row > 1 { + w.endLine() + w.startLine() + } + + comments = w.writeTerm(object.Key, comments) + w.write(": ") + return w.writeComprehension('{', '}', object.Value, object.Body, loc, comments) +} + +func (w *writer) writeComprehension(openChar, closeChar byte, term *ast.Term, body ast.Body, loc *ast.Location, comments []*ast.Comment) []*ast.Comment { + if term.Location.Row-loc.Row >= 1 { + w.endLine() + w.startLine() + } + + parens := false + _, ok := term.Value.(ast.Call) + if ok { + parens = term.Location.Text[0] == 40 // Starts with "(" + } + comments = w.writeTermParens(parens, term, comments) + w.write(" |") + + return w.writeComprehensionBody(openChar, closeChar, body, term.Location, loc, comments) +} + +func (w *writer) writeComprehensionBody(openChar, closeChar byte, body ast.Body, term, compr *ast.Location, comments []*ast.Comment) []*ast.Comment { + exprs := make([]interface{}, 0, len(body)) + for _, expr := range body { + exprs = append(exprs, expr) + } + lines := groupIterable(exprs, term) + + if body.Loc().Row-term.Row > 0 || len(lines) > 1 { + w.endLine() + w.up() + defer w.startLine() + defer w.down() + + comments = w.writeBody(body, comments) + } else { + w.write(" ") + i := 0 + for ; i < len(body)-1; i++ { + comments = w.writeExpr(body[i], comments) + w.write("; ") + } + comments = w.writeExpr(body[i], comments) + } + + return w.insertComments(comments, closingLoc(0, 0, openChar, closeChar, compr)) +} + +func (w *writer) writeImports(imports []*ast.Import, comments []*ast.Comment) []*ast.Comment { + m, comments := mapImportsToComments(imports, comments) + + groups := groupImports(imports) + for _, group := range groups { + comments = w.insertComments(comments, group[0].Loc()) + + // Sort imports within a newline grouping. + sort.Slice(group, func(i, j int) bool { + a := group[i] + b := group[j] + return a.Compare(b) < 0 + }) + for _, i := range group { + w.startLine() + w.writeImport(i) + if c, ok := m[i]; ok { + w.write(" " + c.String()) + } + w.endLine() + } + w.blankLine() + } + + return comments +} + +func (w *writer) writeImport(imp *ast.Import) { + path := imp.Path.Value.(ast.Ref) + + buf := []string{"import"} + + if _, ok := future.WhichFutureKeyword(imp); ok { + // We don't want to wrap future.keywords imports in parens, so we create a new writer that doesn't + w2 := writer{ + buf: bytes.Buffer{}, + } + w2.writeRef(path, nil) + buf = append(buf, w2.buf.String()) + } else { + buf = append(buf, path.String()) + } + + if len(imp.Alias) > 0 { + buf = append(buf, "as "+imp.Alias.String()) + } + w.write(strings.Join(buf, " ")) +} + +type entryWriter func(interface{}, []*ast.Comment) []*ast.Comment + +func (w *writer) writeIterable(elements []interface{}, last *ast.Location, close *ast.Location, comments []*ast.Comment, fn entryWriter) []*ast.Comment { + lines := groupIterable(elements, last) + if len(lines) > 1 { + w.delayBeforeEnd() + w.startMultilineSeq() + } + + i := 0 + for ; i < len(lines)-1; i++ { + comments = w.writeIterableLine(lines[i], comments, fn) + w.write(",") + + w.endLine() + w.startLine() + } + + comments = w.writeIterableLine(lines[i], comments, fn) + + if len(lines) > 1 { + w.write(",") + w.endLine() + comments = w.insertComments(comments, close) + w.down() + w.startLine() + } + + return comments +} + +func (w *writer) writeIterableLine(elements []interface{}, comments []*ast.Comment, fn entryWriter) []*ast.Comment { + if len(elements) == 0 { + return comments + } + + i := 0 + for ; i < len(elements)-1; i++ { + comments = fn(elements[i], comments) + w.write(", ") + } + + return fn(elements[i], comments) +} + +func (w *writer) objectWriter() entryWriter { + return func(x interface{}, comments []*ast.Comment) []*ast.Comment { + entry := x.([2]*ast.Term) + + call, isCall := entry[0].Value.(ast.Call) + + paren := false + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[0].Location.Text[0] == 40 { // Starts with "(" + paren = true + w.write("(") + } + + comments = w.writeTerm(entry[0], comments) + if paren { + w.write(")") + } + + w.write(": ") + + call, isCall = entry[1].Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && entry[1].Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + + return w.writeTerm(entry[1], comments) + } +} + +func (w *writer) listWriter() entryWriter { + return func(x interface{}, comments []*ast.Comment) []*ast.Comment { + t, ok := x.(*ast.Term) + if ok { + call, isCall := t.Value.(ast.Call) + if isCall && ast.Or.Ref().Equal(call[0].Value) && t.Location.Text[0] == 40 { // Starts with "(" + w.write("(") + defer w.write(")") + } + } + + return w.writeTerm(t, comments) + } +} + +// groupIterable will group the `elements` slice into slices according to their +// location: anything on the same line will be put into a slice. +func groupIterable(elements []interface{}, last *ast.Location) [][]interface{} { + // Generated vars occur in the AST when we're rendering the result of + // partial evaluation in a bundle build with optimization. + // Those variables, and wildcard variables have the "default location", + // set in `Ast()`). That is no proper file location, and the grouping + // based on source location will yield a bad result. + // Another case is generated variables: they do have proper file locations, + // but their row/col information may no longer match their AST location. + // So, for generated variables, we also don't trust the location, but + // keep them ungrouped. + def := false // default location found? + for _, elem := range elements { + ast.WalkTerms(elem, func(t *ast.Term) bool { + if t.Location.File == defaultLocationFile { + def = true + return true + } + return false + }) + ast.WalkVars(elem, func(v ast.Var) bool { + if v.IsGenerated() { + def = true + return true + } + return false + }) + if def { // return as-is + return [][]interface{}{elements} + } + } + sort.Slice(elements, func(i, j int) bool { + return locLess(elements[i], elements[j]) + }) + + var lines [][]interface{} + cur := make([]interface{}, 0, len(elements)) + for i, t := range elements { + elem := t + loc := getLoc(elem) + lineDiff := loc.Row - last.Row + if lineDiff > 0 && i > 0 { + lines = append(lines, cur) + cur = nil + } + + last = loc + cur = append(cur, elem) + } + return append(lines, cur) +} + +func mapImportsToComments(imports []*ast.Import, comments []*ast.Comment) (map[*ast.Import]*ast.Comment, []*ast.Comment) { + var leftovers []*ast.Comment + m := map[*ast.Import]*ast.Comment{} + + for _, c := range comments { + matched := false + for _, i := range imports { + if c.Loc().Row == i.Loc().Row { + m[i] = c + matched = true + break + } + } + if !matched { + leftovers = append(leftovers, c) + } + } + + return m, leftovers +} + +func groupImports(imports []*ast.Import) [][]*ast.Import { + switch len(imports) { // shortcuts + case 0: + return nil + case 1: + return [][]*ast.Import{imports} + } + // there are >=2 imports to group + + var groups [][]*ast.Import + group := []*ast.Import{imports[0]} + + for _, i := range imports[1:] { + last := group[len(group)-1] + + // nil-location imports have been sorted up to come first + if i.Loc() != nil && last.Loc() != nil && // first import with a location, or + i.Loc().Row-last.Loc().Row > 1 { // more than one row apart from previous import + + // start a new group + groups = append(groups, group) + group = []*ast.Import{} + } + group = append(group, i) + } + if len(group) > 0 { + groups = append(groups, group) + } + + return groups +} + +func partitionComments(comments []*ast.Comment, l *ast.Location) (before []*ast.Comment, at *ast.Comment, after []*ast.Comment) { + for _, c := range comments { + switch cmp := c.Location.Row - l.Row; { + case cmp < 0: + before = append(before, c) + case cmp > 0: + after = append(after, c) + case cmp == 0: + at = c + } + } + + return before, at, after +} + +func gatherImports(others []interface{}) (imports []*ast.Import, rest []interface{}) { + i := 0 +loop: + for ; i < len(others); i++ { + switch x := others[i].(type) { + case *ast.Import: + imports = append(imports, x) + case *ast.Rule: + break loop + } + } + return imports, others[i:] +} + +func gatherRules(others []interface{}) (rules []*ast.Rule, rest []interface{}) { + i := 0 +loop: + for ; i < len(others); i++ { + switch x := others[i].(type) { + case *ast.Rule: + rules = append(rules, x) + case *ast.Import: + break loop + } + } + return rules, others[i:] +} + +func locLess(a, b interface{}) bool { + return locCmp(a, b) < 0 +} + +func locCmp(a, b interface{}) int { + al := getLoc(a) + bl := getLoc(b) + switch { + case al == nil && bl == nil: + return 0 + case al == nil: + return -1 + case bl == nil: + return 1 + } + + if cmp := al.Row - bl.Row; cmp != 0 { + return cmp + + } + return al.Col - bl.Col +} + +func getLoc(x interface{}) *ast.Location { + switch x := x.(type) { + case ast.Node: // *ast.Head, *ast.Expr, *ast.With, *ast.Term + return x.Loc() + case *ast.Location: + return x + case [2]*ast.Term: // Special case to allow for easy printing of objects. + return x[0].Location + default: + panic("Not reached") + } +} + +func closingLoc(skipOpen, skipClose, openChar, closeChar byte, loc *ast.Location) *ast.Location { + i, offset := 0, 0 + + // Skip past parens/brackets/braces in rule heads. + if skipOpen > 0 { + i, offset = skipPast(skipOpen, skipClose, loc) + } + + for ; i < len(loc.Text); i++ { + if loc.Text[i] == openChar { + break + } + } + + if i >= len(loc.Text) { + return &ast.Location{Row: -1} + } + + state := 1 + for state > 0 { + i++ + if i >= len(loc.Text) { + return &ast.Location{Row: -1} + } + + switch loc.Text[i] { + case openChar: + state++ + case closeChar: + state-- + case '\n': + offset++ + } + } + + return &ast.Location{Row: loc.Row + offset} +} + +func skipPast(openChar, closeChar byte, loc *ast.Location) (int, int) { + i := 0 + for ; i < len(loc.Text); i++ { + if loc.Text[i] == openChar { + break + } + } + + state := 1 + offset := 0 + for state > 0 { + i++ + if i >= len(loc.Text) { + return i, offset + } + + switch loc.Text[i] { + case openChar: + state++ + case closeChar: + state-- + case '\n': + offset++ + } + } + + return i, offset +} + +// startLine begins a line with the current indentation level. +func (w *writer) startLine() { + w.inline = true + for i := 0; i < w.level; i++ { + w.write(w.indent) + } +} + +// endLine ends a line with a newline. +func (w *writer) endLine() { + w.inline = false + if w.beforeEnd != nil && !w.delay { + w.write(" " + w.beforeEnd.String()) + w.beforeEnd = nil + } + w.delay = false + w.write("\n") +} + +// beforeLineEnd registers a comment to be printed at the end of the current line. +func (w *writer) beforeLineEnd(c *ast.Comment) { + if w.beforeEnd != nil { + if c == nil { + return + } + panic("overwriting non-nil beforeEnd") + } + w.beforeEnd = c +} + +func (w *writer) delayBeforeEnd() { + w.delay = true +} + +// line prints a blank line. If the writer is currently in the middle of a line, +// line ends it and then prints a blank one. +func (w *writer) blankLine() { + if w.inline { + w.endLine() + } + w.write("\n") +} + +// write the input string and writes it to the buffer. +func (w *writer) write(s string) { + w.buf.WriteString(s) +} + +// writeLine writes the string on a newly started line, then terminate the line. +func (w *writer) writeLine(s string) { + if !w.inline { + w.startLine() + } + w.write(s) + w.endLine() +} + +func (w *writer) startMultilineSeq() { + w.endLine() + w.up() + w.startLine() +} + +// up increases the indentation level +func (w *writer) up() { + w.level++ +} + +// down decreases the indentation level +func (w *writer) down() { + if w.level == 0 { + panic("negative indentation level") + } + w.level-- +} + +func ensureFutureKeywordImport(imps []*ast.Import, kw string) []*ast.Import { + for _, imp := range imps { + if future.IsAllFutureKeywords(imp) || + future.IsFutureKeyword(imp, kw) || + (future.IsFutureKeyword(imp, "every") && kw == "in") { // "every" implies "in", so we don't need to add both + return imps + } + } + imp := &ast.Import{ + // NOTE: This is a hack to not error on the ref containing a keyword already present in v1. + // A cleaner solution would be to instead allow refs to contain keyword terms. + // E.g. in v1, `import future.keywords["in"]` is valid, but `import future.keywords.in` is not + // as it contains a reserved keyword. + Path: ast.MustParseTerm("future.keywords[\"" + kw + "\"]"), + //Path: ast.MustParseTerm("future.keywords." + kw), + } + imp.Location = defaultLocation(imp) + return append(imps, imp) +} + +func ensureRegoV1Import(imps []*ast.Import) []*ast.Import { + return ensureImport(imps, ast.RegoV1CompatibleRef) +} + +func filterRegoV1Import(imps []*ast.Import) []*ast.Import { + var ret []*ast.Import + for _, imp := range imps { + path := imp.Path.Value.(ast.Ref) + if !ast.RegoV1CompatibleRef.Equal(path) { + ret = append(ret, imp) + } + } + return ret +} + +func ensureImport(imps []*ast.Import, path ast.Ref) []*ast.Import { + for _, imp := range imps { + p := imp.Path.Value.(ast.Ref) + if p.Equal(path) { + return imps + } + } + imp := &ast.Import{ + Path: ast.NewTerm(path), + } + imp.Location = defaultLocation(imp) + return append(imps, imp) +} + +// ArgErrDetail but for `fmt` checks since compiler has not run yet. +type ArityFormatErrDetail struct { + Have []string `json:"have"` + Want []string `json:"want"` +} + +// arityMismatchError but for `fmt` checks since the compiler has not run yet. +func ArityFormatMismatchError(operands []*ast.Term, operator string, loc *ast.Location, f *types.Function) *ast.Error { + want := make([]string, f.Arity()) + for i, arg := range f.Args() { + want[i] = types.Sprint(arg) + } + + have := make([]string, len(operands)) + for i := 0; i < len(operands); i++ { + have[i] = ast.ValueName(operands[i].Value) + } + err := ast.NewError(ast.TypeErr, loc, "%s: %s", operator, "arity mismatch") + err.Details = &ArityFormatErrDetail{ + Have: have, + Want: want, + } + return err +} + +// Lines returns the string representation of the detail. +func (d *ArityFormatErrDetail) Lines() []string { + return []string{ + "have: " + "(" + strings.Join(d.Have, ",") + ")", + "want: " + "(" + strings.Join(d.Want, ",") + ")", + } +} + +func moduleIsRegoV1Compatible(m *ast.Module) bool { + for _, imp := range m.Imports { + if isRegoV1Compatible(imp) { + return true + } + } + return false +} + +// isRegoV1Compatible returns true if the passed *ast.Import is `rego.v1` +func isRegoV1Compatible(imp *ast.Import) bool { + path := imp.Path.Value.(ast.Ref) + return len(path) == 2 && + ast.RegoRootDocument.Equal(path[0]) && + path[1].Equal(ast.StringTerm("v1")) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/hooks/hooks.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go similarity index 98% rename from constraint/vendor/github.com/open-policy-agent/opa/hooks/hooks.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go index 9659d7b49..caf69b124 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/hooks/hooks.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/hooks/hooks.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "github.com/open-policy-agent/opa/config" + "github.com/open-policy-agent/opa/v1/config" ) // Hook is a hook to be called in some select places in OPA's operation. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ir/ir.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go similarity index 99% rename from constraint/vendor/github.com/open-policy-agent/opa/ir/ir.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go index c07670704..4f6961605 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/ir/ir.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/ir/ir.go @@ -11,7 +11,7 @@ package ir import ( "fmt" - "github.com/open-policy-agent/opa/types" + "github.com/open-policy-agent/opa/v1/types" ) type ( diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ir/marshal.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/ir/marshal.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ir/marshal.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ir/pretty.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/ir/pretty.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ir/pretty.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/ir/walk.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/ir/walk.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/ir/walk.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go new file mode 100644 index 000000000..fba7a9c93 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/keys/keys.go @@ -0,0 +1,99 @@ +package keys + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/open-policy-agent/opa/v1/util" +) + +const defaultSigningAlgorithm = "RS256" + +var supportedAlgos = map[string]struct{}{ + "ES256": {}, "ES384": {}, "ES512": {}, + "HS256": {}, "HS384": {}, "HS512": {}, + "PS256": {}, "PS384": {}, "PS512": {}, + "RS256": {}, "RS384": {}, "RS512": {}, +} + +// IsSupportedAlgorithm true if provided alg is supported +func IsSupportedAlgorithm(alg string) bool { + _, ok := supportedAlgos[alg] + return ok +} + +// Config holds the keys used to sign or verify bundles and tokens +type Config struct { + Key string `json:"key"` + PrivateKey string `json:"private_key"` + Algorithm string `json:"algorithm"` + Scope string `json:"scope"` +} + +// Equal returns true if this key config is equal to the other. +func (k *Config) Equal(other *Config) bool { + return other != nil && *k == *other +} + +func (k *Config) validateAndInjectDefaults(id string) error { + if k.Key == "" && k.PrivateKey == "" { + return fmt.Errorf("invalid keys configuration: no keys provided for key ID %v", id) + } + + if k.Algorithm == "" { + k.Algorithm = defaultSigningAlgorithm + } + + if !IsSupportedAlgorithm(k.Algorithm) { + return fmt.Errorf("unsupported algorithm '%v'", k.Algorithm) + } + + return nil +} + +// NewKeyConfig return a new Config +func NewKeyConfig(key, alg, scope string) (*Config, error) { + var pubKey string + if _, err := os.Stat(key); err == nil { + bs, err := os.ReadFile(key) + if err != nil { + return nil, err + } + pubKey = string(bs) + } else if os.IsNotExist(err) { + pubKey = key + } else { + return nil, err + } + + return &Config{ + Key: pubKey, + Algorithm: alg, + Scope: scope, + }, nil +} + +// ParseKeysConfig returns a map containing the key and the signing algorithm +func ParseKeysConfig(raw json.RawMessage) (map[string]*Config, error) { + keys := map[string]*Config{} + var obj map[string]json.RawMessage + + if err := util.Unmarshal(raw, &obj); err == nil { + for k := range obj { + var keyConfig Config + if err = util.Unmarshal(obj[k], &keyConfig); err != nil { + return nil, err + } + + if err = keyConfig.validateAndInjectDefaults(k); err != nil { + return nil, err + } + + keys[k] = &keyConfig + } + } else { + return nil, err + } + return keys, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go new file mode 100644 index 000000000..55b8e7dc4 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/errors.go @@ -0,0 +1,62 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package loader + +import ( + "fmt" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Errors is a wrapper for multiple loader errors. +type Errors []error + +func (e Errors) Error() string { + if len(e) == 0 { + return "no error(s)" + } + if len(e) == 1 { + return "1 error occurred during loading: " + e[0].Error() + } + buf := make([]string, len(e)) + for i := range buf { + buf[i] = e[i].Error() + } + return fmt.Sprintf("%v errors occurred during loading:\n", len(e)) + strings.Join(buf, "\n") +} + +func (e *Errors) add(err error) { + if errs, ok := err.(ast.Errors); ok { + for i := range errs { + *e = append(*e, errs[i]) + } + } else { + *e = append(*e, err) + } +} + +type unsupportedDocumentType string + +func (path unsupportedDocumentType) Error() string { + return string(path) + ": document must be of type object" +} + +type unrecognizedFile string + +func (path unrecognizedFile) Error() string { + return string(path) + ": can't recognize file type" +} + +func isUnrecognizedFile(err error) bool { + _, ok := err.(unrecognizedFile) + return ok +} + +type mergeError string + +func (e mergeError) Error() string { + return string(e) + ": merge error" +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/loader/extension/extension.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/loader/extension/extension.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/loader/filter/filter.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/loader/filter/filter.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/loader/filter/filter.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go new file mode 100644 index 000000000..5e2217473 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/loader/loader.go @@ -0,0 +1,834 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package loader contains utilities for loading files into OPA. +package loader + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/yaml" + + fileurl "github.com/open-policy-agent/opa/internal/file/url" + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + astJSON "github.com/open-policy-agent/opa/v1/ast/json" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/loader/filter" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/util" +) + +// Result represents the result of successfully loading zero or more files. +type Result struct { + Documents map[string]interface{} + Modules map[string]*RegoFile + path []string +} + +// ParsedModules returns the parsed modules stored on the result. +func (l *Result) ParsedModules() map[string]*ast.Module { + modules := make(map[string]*ast.Module) + for _, module := range l.Modules { + modules[module.Name] = module.Parsed + } + return modules +} + +// Compiler returns a Compiler object with the compiled modules from this loader +// result. +func (l *Result) Compiler() (*ast.Compiler, error) { + compiler := ast.NewCompiler() + compiler.Compile(l.ParsedModules()) + if compiler.Failed() { + return nil, compiler.Errors + } + return compiler, nil +} + +// Store returns a Store object with the documents from this loader result. +func (l *Result) Store() (storage.Store, error) { + return l.StoreWithOpts() +} + +// StoreWithOpts returns a Store object with the documents from this loader result, +// instantiated with the passed options. +func (l *Result) StoreWithOpts(opts ...inmem.Opt) (storage.Store, error) { + return inmem.NewFromObjectWithOpts(l.Documents, opts...), nil +} + +// RegoFile represents the result of loading a single Rego source file. +type RegoFile struct { + Name string + Parsed *ast.Module + Raw []byte +} + +// Filter defines the interface for filtering files during loading. If the +// filter returns true, the file should be excluded from the result. +type Filter = filter.LoaderFilter + +// GlobExcludeName excludes files and directories whose names do not match the +// shell style pattern at minDepth or greater. +func GlobExcludeName(pattern string, minDepth int) Filter { + return func(_ string, info fs.FileInfo, depth int) bool { + match, _ := filepath.Match(pattern, info.Name()) + return match && depth >= minDepth + } +} + +// FileLoader defines an interface for loading OPA data files +// and Rego policies. +type FileLoader interface { + All(paths []string) (*Result, error) + Filtered(paths []string, filter Filter) (*Result, error) + AsBundle(path string) (*bundle.Bundle, error) + WithReader(io.Reader) FileLoader + WithFS(fs.FS) FileLoader + WithMetrics(metrics.Metrics) FileLoader + WithFilter(Filter) FileLoader + WithBundleVerificationConfig(*bundle.VerificationConfig) FileLoader + WithSkipBundleVerification(bool) FileLoader + WithProcessAnnotation(bool) FileLoader + WithCapabilities(*ast.Capabilities) FileLoader + // Deprecated: Use SetOptions in the json package instead, where a longer description + // of why this is deprecated also can be found. + WithJSONOptions(*astJSON.Options) FileLoader + WithRegoVersion(ast.RegoVersion) FileLoader + WithFollowSymlinks(bool) FileLoader +} + +// NewFileLoader returns a new FileLoader instance. +func NewFileLoader() FileLoader { + return &fileLoader{ + metrics: metrics.New(), + files: make(map[string]bundle.FileInfo), + } +} + +type fileLoader struct { + metrics metrics.Metrics + filter Filter + bvc *bundle.VerificationConfig + skipVerify bool + files map[string]bundle.FileInfo + opts ast.ParserOptions + fsys fs.FS + reader io.Reader + followSymlinks bool +} + +// WithFS provides an fs.FS to use for loading files. You can pass nil to +// use plain IO calls (e.g. os.Open, os.Stat, etc.), this is the default +// behaviour. +func (fl *fileLoader) WithFS(fsys fs.FS) FileLoader { + fl.fsys = fsys + return fl +} + +// WithReader provides an io.Reader to use for loading the bundle tarball. +// An io.Reader passed via WithReader takes precedence over an fs.FS passed +// via WithFS. +func (fl *fileLoader) WithReader(rdr io.Reader) FileLoader { + fl.reader = rdr + return fl +} + +// WithMetrics provides the metrics instance to use while loading +func (fl *fileLoader) WithMetrics(m metrics.Metrics) FileLoader { + fl.metrics = m + return fl +} + +// WithFilter specifies the filter object to use to filter files while loading +func (fl *fileLoader) WithFilter(filter Filter) FileLoader { + fl.filter = filter + return fl +} + +// WithBundleVerificationConfig sets the key configuration used to verify a signed bundle +func (fl *fileLoader) WithBundleVerificationConfig(config *bundle.VerificationConfig) FileLoader { + fl.bvc = config + return fl +} + +// WithSkipBundleVerification skips verification of a signed bundle +func (fl *fileLoader) WithSkipBundleVerification(skipVerify bool) FileLoader { + fl.skipVerify = skipVerify + return fl +} + +// WithProcessAnnotation enables or disables processing of schema annotations on rules +func (fl *fileLoader) WithProcessAnnotation(processAnnotation bool) FileLoader { + fl.opts.ProcessAnnotation = processAnnotation + return fl +} + +// WithCapabilities sets the supported capabilities when loading the files +func (fl *fileLoader) WithCapabilities(caps *ast.Capabilities) FileLoader { + fl.opts.Capabilities = caps + return fl +} + +// WithJSONOptions sets the JSON options on the parser (now a no-op). +// +// Deprecated: Use SetOptions in the json package instead, where a longer description +// of why this is deprecated also can be found. +func (fl *fileLoader) WithJSONOptions(*astJSON.Options) FileLoader { + return fl +} + +// WithRegoVersion sets the ast.RegoVersion to use when parsing and compiling modules. +func (fl *fileLoader) WithRegoVersion(version ast.RegoVersion) FileLoader { + fl.opts.RegoVersion = version + return fl +} + +// WithFollowSymlinks enables or disables following symlinks when loading files +func (fl *fileLoader) WithFollowSymlinks(followSymlinks bool) FileLoader { + fl.followSymlinks = followSymlinks + return fl +} + +// All returns a Result object loaded (recursively) from the specified paths. +func (fl fileLoader) All(paths []string) (*Result, error) { + return fl.Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func (fl fileLoader) Filtered(paths []string, filter Filter) (*Result, error) { + return all(fl.fsys, paths, filter, func(curr *Result, path string, depth int) error { + + var ( + bs []byte + err error + ) + if fl.fsys != nil { + bs, err = fs.ReadFile(fl.fsys, path) + } else { + bs, err = os.ReadFile(path) + } + if err != nil { + return err + } + + result, err := loadKnownTypes(path, bs, fl.metrics, fl.opts) + if err != nil { + if !isUnrecognizedFile(err) { + return err + } + if depth > 0 { + return nil + } + result, err = loadFileForAnyType(path, bs, fl.metrics, fl.opts) + if err != nil { + return err + } + } + + return curr.merge(path, result) + }) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +func (fl fileLoader) AsBundle(path string) (*bundle.Bundle, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, err + } + + var bundleLoader bundle.DirectoryLoader + var isDir bool + if fl.reader != nil { + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fl.reader, path).WithFilter(fl.filter) + } else { + bundleLoader, isDir, err = GetBundleDirectoryLoaderFS(fl.fsys, path, fl.filter) + } + + if err != nil { + return nil, err + } + bundleLoader = bundleLoader.WithFollowSymlinks(fl.followSymlinks) + + br := bundle.NewCustomReader(bundleLoader). + WithMetrics(fl.metrics). + WithBundleVerificationConfig(fl.bvc). + WithSkipBundleVerification(fl.skipVerify). + WithProcessAnnotations(fl.opts.ProcessAnnotation). + WithCapabilities(fl.opts.Capabilities). + WithFollowSymlinks(fl.followSymlinks). + WithRegoVersion(fl.opts.RegoVersion) + + // For bundle directories add the full path in front of module file names + // to simplify debugging. + if isDir { + br.WithBaseDir(path) + } + + b, err := br.Read() + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + + return &b, err +} + +// GetBundleDirectoryLoader returns a bundle directory loader which can be used to load +// files in the directory +func GetBundleDirectoryLoader(path string) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, nil) +} + +// GetBundleDirectoryLoaderWithFilter returns a bundle directory loader which can be used to load +// files in the directory after applying the given filter. +func GetBundleDirectoryLoaderWithFilter(path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + return GetBundleDirectoryLoaderFS(nil, path, filter) +} + +// GetBundleDirectoryLoaderFS returns a bundle directory loader which can be used to load +// files in the directory. +func GetBundleDirectoryLoaderFS(fsys fs.FS, path string, filter Filter) (bundle.DirectoryLoader, bool, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, false, err + } + + if err := checkForUNCPath(path); err != nil { + return nil, false, err + } + + var fi fs.FileInfo + if fsys != nil { + fi, err = fs.Stat(fsys, path) + } else { + fi, err = os.Stat(path) + } + if err != nil { + return nil, false, fmt.Errorf("error reading %q: %s", path, err) + } + + var bundleLoader bundle.DirectoryLoader + if fi.IsDir() { + if fsys != nil { + bundleLoader = bundle.NewFSLoaderWithRoot(fsys, path) + } else { + bundleLoader = bundle.NewDirectoryLoader(path) + } + } else { + var fh fs.File + if fsys != nil { + fh, err = fsys.Open(path) + } else { + fh, err = os.Open(path) + } + if err != nil { + return nil, false, err + } + bundleLoader = bundle.NewTarballLoaderWithBaseURL(fh, path) + } + + if filter != nil { + bundleLoader = bundleLoader.WithFilter(filter) + } + return bundleLoader, fi.IsDir(), nil +} + +// FilteredPaths is the same as FilterPathsFS using the current diretory file +// system +func FilteredPaths(paths []string, filter Filter) ([]string, error) { + return FilteredPathsFS(nil, paths, filter) +} + +// FilteredPathsFS return a list of files from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +func FilteredPathsFS(fsys fs.FS, paths []string, filter Filter) ([]string, error) { + result := []string{} + + _, err := all(fsys, paths, filter, func(_ *Result, path string, _ int) error { + result = append(result, path) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// Schemas loads a schema set from the specified file path. +func Schemas(schemaPath string) (*ast.SchemaSet, error) { + + var errs Errors + ss, err := loadSchemas(schemaPath) + if err != nil { + errs.add(err) + return nil, errs + } + + return ss, nil +} + +func loadSchemas(schemaPath string) (*ast.SchemaSet, error) { + + if schemaPath == "" { + return nil, nil + } + + ss := ast.NewSchemaSet() + path, err := fileurl.Clean(schemaPath) + if err != nil { + return nil, err + } + + info, err := os.Stat(path) + if err != nil { + return nil, err + } + + // Handle single file case. + if !info.IsDir() { + schema, err := loadOneSchema(path) + if err != nil { + return nil, err + } + ss.Put(ast.SchemaRootRef, schema) + return ss, nil + + } + + // Handle directory case. + rootDir := path + + err = filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } else if info.IsDir() { + return nil + } + + schema, err := loadOneSchema(path) + if err != nil { + return err + } + + relPath, err := filepath.Rel(rootDir, path) + if err != nil { + return err + } + + key := getSchemaSetByPathKey(relPath) + ss.Put(key, schema) + return nil + }) + + if err != nil { + return nil, err + } + + return ss, nil +} + +func getSchemaSetByPathKey(path string) ast.Ref { + + front := filepath.Dir(path) + last := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + + var parts []string + + if front != "." { + parts = append(strings.Split(filepath.ToSlash(front), "/"), last) + } else { + parts = []string{last} + } + + key := make(ast.Ref, 1+len(parts)) + key[0] = ast.SchemaRootDocument + for i := range parts { + key[i+1] = ast.StringTerm(parts[i]) + } + + return key +} + +func loadOneSchema(path string) (interface{}, error) { + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var schema interface{} + if err := util.Unmarshal(bs, &schema); err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + + return schema, nil +} + +// All returns a Result object loaded (recursively) from the specified paths. +// Deprecated: Use FileLoader.Filtered() instead. +func All(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, nil) +} + +// Filtered returns a Result object loaded (recursively) from the specified +// paths while applying the given filters. If any filter returns true, the +// file/directory is excluded. +// Deprecated: Use FileLoader.Filtered() instead. +func Filtered(paths []string, filter Filter) (*Result, error) { + return NewFileLoader().Filtered(paths, filter) +} + +// AsBundle loads a path as a bundle. If it is a single file +// it will be treated as a normal tarball bundle. If a directory +// is supplied it will be loaded as an unzipped bundle tree. +// Deprecated: Use FileLoader.AsBundle() instead. +func AsBundle(path string) (*bundle.Bundle, error) { + return NewFileLoader().AsBundle(path) +} + +// AllRegos returns a Result object loaded (recursively) with all Rego source +// files from the specified paths. +func AllRegos(paths []string) (*Result, error) { + return NewFileLoader().Filtered(paths, func(_ string, info os.FileInfo, _ int) bool { + return !info.IsDir() && !strings.HasSuffix(info.Name(), bundle.RegoExt) + }) +} + +// Rego is deprecated. Use RegoWithOpts instead. +func Rego(path string) (*RegoFile, error) { + return RegoWithOpts(path, ast.ParserOptions{}) +} + +// RegoWithOpts returns a RegoFile object loaded from the given path. +func RegoWithOpts(path string, opts ast.ParserOptions) (*RegoFile, error) { + path, err := fileurl.Clean(path) + if err != nil { + return nil, err + } + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return loadRego(path, bs, metrics.New(), opts) +} + +// CleanPath returns the normalized version of a path that can be used as an identifier. +func CleanPath(path string) string { + return strings.Trim(path, "/") +} + +// Paths returns a sorted list of files contained at path. If recurse is true +// and path is a directory, then Paths will walk the directory structure +// recursively and list files at each level. +func Paths(path string, recurse bool) (paths []string, err error) { + path, err = fileurl.Clean(path) + if err != nil { + return nil, err + } + err = filepath.Walk(path, func(f string, _ os.FileInfo, _ error) error { + if !recurse { + if path != f && path != filepath.Dir(f) { + return filepath.SkipDir + } + } + paths = append(paths, f) + return nil + }) + return paths, err +} + +// Dirs resolves filepaths to directories. It will return a list of unique +// directories. +func Dirs(paths []string) []string { + unique := map[string]struct{}{} + + for _, path := range paths { + // TODO: /dir/dir will register top level directory /dir + dir := filepath.Dir(path) + unique[dir] = struct{}{} + } + + return util.KeysSorted(unique) +} + +// SplitPrefix returns a tuple specifying the document prefix and the file +// path. +func SplitPrefix(path string) ([]string, string) { + // Non-prefixed URLs can be returned without modification and their contents + // can be rooted directly under data. + if strings.Index(path, "://") == strings.Index(path, ":") { + return nil, path + } + parts := strings.SplitN(path, ":", 2) + if len(parts) == 2 && len(parts[0]) > 0 { + return strings.Split(parts[0], "."), parts[1] + } + return nil, path +} + +func (l *Result) merge(path string, result interface{}) error { + switch result := result.(type) { + case bundle.Bundle: + for _, module := range result.Modules { + l.Modules[module.Path] = &RegoFile{ + Name: module.Path, + Parsed: module.Parsed, + Raw: module.Raw, + } + } + return l.mergeDocument(path, result.Data) + case *RegoFile: + l.Modules[CleanPath(path)] = result + return nil + default: + return l.mergeDocument(path, result) + } +} + +func (l *Result) mergeDocument(path string, doc interface{}) error { + obj, ok := makeDir(l.path, doc) + if !ok { + return unsupportedDocumentType(path) + } + merged, ok := merge.InterfaceMaps(l.Documents, obj) + if !ok { + return mergeError(path) + } + for k := range merged { + l.Documents[k] = merged[k] + } + return nil +} + +func (l *Result) withParent(p string) *Result { + path := append(l.path, p) + return &Result{ + Documents: l.Documents, + Modules: l.Modules, + path: path, + } +} + +func newResult() *Result { + return &Result{ + Documents: map[string]interface{}{}, + Modules: map[string]*RegoFile{}, + } +} + +func all(fsys fs.FS, paths []string, filter Filter, f func(*Result, string, int) error) (*Result, error) { + errs := Errors{} + root := newResult() + + for _, path := range paths { + + // Paths can be prefixed with a string that specifies where content should be + // loaded under data. E.g., foo.bar:/path/to/some.json will load the content + // of some.json under {"foo": {"bar": ...}}. + loaded := root + prefix, path := SplitPrefix(path) + if len(prefix) > 0 { + for _, part := range prefix { + loaded = loaded.withParent(part) + } + } + + allRec(fsys, path, filter, &errs, loaded, 0, f) + } + + if len(errs) > 0 { + return nil, errs + } + + return root, nil +} + +func allRec(fsys fs.FS, path string, filter Filter, errors *Errors, loaded *Result, depth int, f func(*Result, string, int) error) { + + path, err := fileurl.Clean(path) + if err != nil { + errors.add(err) + return + } + + if err := checkForUNCPath(path); err != nil { + errors.add(err) + return + } + + var info fs.FileInfo + if fsys != nil { + info, err = fs.Stat(fsys, path) + } else { + info, err = os.Stat(path) + } + + if err != nil { + errors.add(err) + return + } + + if filter != nil && filter(path, info, depth) { + return + } + + if !info.IsDir() { + if err := f(loaded, path, depth); err != nil { + errors.add(err) + } + return + } + + // If we are recursing on directories then content must be loaded under path + // specified by directory hierarchy. + if depth > 0 { + loaded = loaded.withParent(info.Name()) + } + + var files []fs.DirEntry + if fsys != nil { + files, err = fs.ReadDir(fsys, path) + } else { + files, err = os.ReadDir(path) + } + if err != nil { + errors.add(err) + return + } + + for _, file := range files { + allRec(fsys, filepath.Join(path, file.Name()), filter, errors, loaded, depth+1, f) + } +} + +func loadKnownTypes(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { + switch filepath.Ext(path) { + case ".json": + return loadJSON(path, bs, m) + case ".rego": + return loadRego(path, bs, m, opts) + case ".yaml", ".yml": + return loadYAML(path, bs, m) + default: + if strings.HasSuffix(path, ".tar.gz") { + r, err := loadBundleFile(path, bs, m, opts) + if err != nil { + err = fmt.Errorf("bundle %s: %w", path, err) + } + return r, err + } + } + return nil, unrecognizedFile(path) +} + +func loadFileForAnyType(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (interface{}, error) { + module, err := loadRego(path, bs, m, opts) + if err == nil { + return module, nil + } + doc, err := loadJSON(path, bs, m) + if err == nil { + return doc, nil + } + doc, err = loadYAML(path, bs, m) + if err == nil { + return doc, nil + } + return nil, unrecognizedFile(path) +} + +func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (bundle.Bundle, error) { + tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) + br := bundle.NewCustomReader(tl). + WithRegoVersion(opts.RegoVersion). + WithCapabilities(opts.Capabilities). + WithProcessAnnotations(opts.ProcessAnnotation). + WithMetrics(m). + WithSkipBundleVerification(true). + IncludeManifestInData(true) + return br.Read() +} + +func loadRego(path string, bs []byte, m metrics.Metrics, opts ast.ParserOptions) (*RegoFile, error) { + m.Timer(metrics.RegoModuleParse).Start() + var module *ast.Module + var err error + module, err = ast.ParseModuleWithOpts(path, string(bs), opts) + m.Timer(metrics.RegoModuleParse).Stop() + if err != nil { + return nil, err + } + result := &RegoFile{ + Name: path, + Parsed: module, + Raw: bs, + } + return result, nil +} + +func loadJSON(path string, bs []byte, m metrics.Metrics) (interface{}, error) { + m.Timer(metrics.RegoDataParse).Start() + var x interface{} + err := util.UnmarshalJSON(bs, &x) + m.Timer(metrics.RegoDataParse).Stop() + + if err != nil { + return nil, fmt.Errorf("%s: %w", path, err) + } + return x, nil +} + +func loadYAML(path string, bs []byte, m metrics.Metrics) (interface{}, error) { + m.Timer(metrics.RegoDataParse).Start() + bs, err := yaml.YAMLToJSON(bs) + m.Timer(metrics.RegoDataParse).Stop() + if err != nil { + return nil, fmt.Errorf("%v: error converting YAML to JSON: %v", path, err) + } + return loadJSON(path, bs, m) +} + +func makeDir(path []string, x interface{}) (map[string]interface{}, bool) { + if len(path) == 0 { + obj, ok := x.(map[string]interface{}) + if !ok { + return nil, false + } + return obj, true + } + return makeDir(path[:len(path)-1], map[string]interface{}{path[len(path)-1]: x}) +} + +// isUNC reports whether path is a UNC path. +func isUNC(path string) bool { + return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) +} + +func isSlash(c uint8) bool { + return c == '\\' || c == '/' +} + +func checkForUNCPath(path string) error { + if isUNC(path) { + return fmt.Errorf("UNC path read is not allowed: %s", path) + } + return nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/logging/logging.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/logging/logging.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/logging/logging.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/metrics/metrics.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/metrics/metrics.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/metrics/metrics.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go new file mode 100644 index 000000000..c9b99ab28 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/plugins.go @@ -0,0 +1,1113 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package plugins implements plugin management for the policy engine. +package plugins + +import ( + "context" + "errors" + "fmt" + mr "math/rand" + "sync" + "time" + + "github.com/open-policy-agent/opa/internal/report" + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/sdk/trace" + + "github.com/gorilla/mux" + + bundleUtils "github.com/open-policy-agent/opa/internal/bundle" + cfg "github.com/open-policy-agent/opa/internal/config" + initload "github.com/open-policy-agent/opa/internal/runtime/init" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/config" + "github.com/open-policy-agent/opa/v1/hooks" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/plugins/rest" + "github.com/open-policy-agent/opa/v1/resolver/wasm" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" +) + +// Factory defines the interface OPA uses to instantiate your plugin. +// +// When OPA processes it's configuration it looks for factories that +// have been registered by calling runtime.RegisterPlugin. Factories +// are registered to a name which is used to key into the +// configuration blob. If your plugin has not been configured, your +// factory will not be invoked. +// +// plugins: +// my_plugin1: +// some_key: foo +// # my_plugin2: +// # some_key2: bar +// +// If OPA was started with the configuration above and received two +// calls to runtime.RegisterPlugins (one with NAME "my_plugin1" and +// one with NAME "my_plugin2"), it would only invoke the factory for +// for my_plugin1. +// +// OPA instantiates and reconfigures plugins in two steps. First, OPA +// will call Validate to check the configuration. Assuming the +// configuration is valid, your factory should return a configuration +// value that can be used to construct your plugin. Second, OPA will +// call New to instantiate your plugin providing the configuration +// value returned from the Validate call. +// +// Validate receives a slice of bytes representing plugin +// configuration and returns a configuration value that can be used to +// instantiate your plugin. The manager is provided to give access to +// the OPA's compiler, storage layer, and global configuration. Your +// Validate function will typically: +// +// 1. Deserialize the raw config bytes +// 2. Validate the deserialized config for semantic errors +// 3. Inject default values +// 4. Return a deserialized/parsed config +// +// New receives a valid configuration for your plugin and returns a +// plugin object. Your New function will typically: +// +// 1. Cast the config value to it's own type +// 2. Instantiate a plugin object +// 3. Return the plugin object +// 4. Update status via `plugins.Manager#UpdatePluginStatus` +// +// After a plugin has been created subsequent status updates can be +// send anytime the plugin enters a ready or error state. +type Factory interface { + Validate(manager *Manager, config []byte) (interface{}, error) + New(manager *Manager, config interface{}) Plugin +} + +// Plugin defines the interface OPA uses to manage your plugin. +// +// When OPA starts it will start all of the plugins it was configured +// to instantiate. Each time a new plugin is configured (via +// discovery), OPA will start it. You can use the Start call to spawn +// additional goroutines or perform initialization tasks. +// +// Currently OPA will not call Stop on plugins. +// +// When OPA receives new configuration for your plugin via discovery +// it will first Validate the configuration using your factory and +// then call Reconfigure. +type Plugin interface { + Start(ctx context.Context) error + Stop(ctx context.Context) + Reconfigure(ctx context.Context, config interface{}) +} + +// Triggerable defines the interface plugins use for manual plugin triggers. +type Triggerable interface { + Trigger(context.Context) error +} + +// State defines the state that a Plugin instance is currently +// in with pre-defined states. +type State string + +const ( + // StateNotReady indicates that the Plugin is not in an error state, but isn't + // ready for normal operation yet. This should only happen at + // initialization time. + StateNotReady State = "NOT_READY" + + // StateOK signifies that the Plugin is operating normally. + StateOK State = "OK" + + // StateErr indicates that the Plugin is in an error state and should not + // be considered as functional. + StateErr State = "ERROR" + + // StateWarn indicates the Plugin is operating, but in a potentially dangerous or + // degraded state. It may be used to indicate manual remediation is needed, or to + // alert admins of some other noteworthy state. + StateWarn State = "WARN" +) + +// TriggerMode defines the trigger mode utilized by a Plugin for bundle download, +// log upload etc. +type TriggerMode string + +const ( + // TriggerPeriodic represents periodic polling mechanism + TriggerPeriodic TriggerMode = "periodic" + + // TriggerManual represents manual triggering mechanism + TriggerManual TriggerMode = "manual" + + // DefaultTriggerMode represents default trigger mechanism + DefaultTriggerMode TriggerMode = "periodic" +) + +// default interval between OPA report uploads +var defaultUploadIntervalSec = int64(3600) + +// Status has a Plugin's current status plus an optional Message. +type Status struct { + State State `json:"state"` + Message string `json:"message,omitempty"` +} + +func (s *Status) String() string { + return fmt.Sprintf("{%v %q}", s.State, s.Message) +} + +func (s *Status) Equal(other *Status) bool { + if s == nil || other == nil { + return s == nil && other == nil + } + + return s.State == other.State && s.Message == other.Message +} + +// StatusListener defines a handler to register for status updates. +type StatusListener func(status map[string]*Status) + +// Manager implements lifecycle management of plugins and gives plugins access +// to engine-wide components like storage. +type Manager struct { + Store storage.Store + Config *config.Config + Info *ast.Term + ID string + + compiler *ast.Compiler + compilerMux sync.RWMutex + wasmResolvers []*wasm.Resolver + wasmResolversMtx sync.RWMutex + services map[string]rest.Client + keys map[string]*keys.Config + plugins []namedplugin + registeredTriggers []func(storage.Transaction) + mtx sync.Mutex + pluginStatus map[string]*Status + pluginStatusListeners map[string]StatusListener + initBundles map[string]*bundle.Bundle + initFiles loader.Result + maxErrors int + initialized bool + interQueryBuiltinCacheConfig *cache.Config + gracefulShutdownPeriod int + registeredCacheTriggers []func(*cache.Config) + logger logging.Logger + consoleLogger logging.Logger + serverInitialized chan struct{} + serverInitializedOnce sync.Once + printHook print.Hook + enablePrintStatements bool + router *mux.Router + prometheusRegister prometheus.Registerer + tracerProvider *trace.TracerProvider + distributedTacingOpts tracing.Options + registeredNDCacheTriggers []func(bool) + registeredTelemetryGatherers map[string]report.Gatherer + bootstrapConfigLabels map[string]string + hooks hooks.Hooks + enableTelemetry bool + reporter *report.Reporter + opaReportNotifyCh chan struct{} + stop chan chan struct{} + parserOptions ast.ParserOptions +} + +type managerContextKey string +type managerWasmResolverKey string + +const managerCompilerContextKey = managerContextKey("compiler") +const managerWasmResolverContextKey = managerWasmResolverKey("wasmResolvers") + +// SetCompilerOnContext puts the compiler into the storage context. Calling this +// function before committing updated policies to storage allows the manager to +// skip parsing and compiling of modules. Instead, the manager will use the +// compiler that was stored on the context. +func SetCompilerOnContext(context *storage.Context, compiler *ast.Compiler) { + context.Put(managerCompilerContextKey, compiler) +} + +// GetCompilerOnContext gets the compiler cached on the storage context. +func GetCompilerOnContext(context *storage.Context) *ast.Compiler { + compiler, ok := context.Get(managerCompilerContextKey).(*ast.Compiler) + if !ok { + return nil + } + return compiler +} + +// SetWasmResolversOnContext puts a set of Wasm Resolvers into the storage +// context. Calling this function before committing updated wasm modules to +// storage allows the manager to skip initializing modules before using them. +// Instead, the manager will use the compiler that was stored on the context. +func SetWasmResolversOnContext(context *storage.Context, rs []*wasm.Resolver) { + context.Put(managerWasmResolverContextKey, rs) +} + +// getWasmResolversOnContext gets the resolvers cached on the storage context. +func getWasmResolversOnContext(context *storage.Context) []*wasm.Resolver { + resolvers, ok := context.Get(managerWasmResolverContextKey).([]*wasm.Resolver) + if !ok { + return nil + } + return resolvers +} + +func validateTriggerMode(mode TriggerMode) error { + switch mode { + case TriggerPeriodic, TriggerManual: + return nil + default: + return fmt.Errorf("invalid trigger mode %q (want %q or %q)", mode, TriggerPeriodic, TriggerManual) + } +} + +// ValidateAndInjectDefaultsForTriggerMode validates the trigger mode and injects default values +func ValidateAndInjectDefaultsForTriggerMode(a, b *TriggerMode) (*TriggerMode, error) { + + if a == nil && b != nil { + err := validateTriggerMode(*b) + if err != nil { + return nil, err + } + return b, nil + } else if a != nil && b == nil { + err := validateTriggerMode(*a) + if err != nil { + return nil, err + } + return a, nil + } else if a != nil && b != nil { + if *a != *b { + return nil, fmt.Errorf("trigger mode mismatch: %s and %s (hint: check discovery configuration)", *a, *b) + } + err := validateTriggerMode(*a) + if err != nil { + return nil, err + } + return a, nil + } + + t := DefaultTriggerMode + return &t, nil +} + +type namedplugin struct { + name string + plugin Plugin +} + +// Info sets the runtime information on the manager. The runtime information is +// propagated to opa.runtime() built-in function calls. +func Info(term *ast.Term) func(*Manager) { + return func(m *Manager) { + m.Info = term + } +} + +// InitBundles provides the initial set of bundles to load. +func InitBundles(b map[string]*bundle.Bundle) func(*Manager) { + return func(m *Manager) { + m.initBundles = b + } +} + +// InitFiles provides the initial set of other data/policy files to load. +func InitFiles(f loader.Result) func(*Manager) { + return func(m *Manager) { + m.initFiles = f + } +} + +// MaxErrors sets the error limit for the manager's shared compiler. +func MaxErrors(n int) func(*Manager) { + return func(m *Manager) { + m.maxErrors = n + } +} + +// GracefulShutdownPeriod passes the configured graceful shutdown period to plugins +func GracefulShutdownPeriod(gracefulShutdownPeriod int) func(*Manager) { + return func(m *Manager) { + m.gracefulShutdownPeriod = gracefulShutdownPeriod + } +} + +// Logger configures the passed logger on the plugin manager (useful to +// configure default fields) +func Logger(logger logging.Logger) func(*Manager) { + return func(m *Manager) { + m.logger = logger + } +} + +// ConsoleLogger sets the passed logger to be used by plugins that are +// configured with console logging enabled. +func ConsoleLogger(logger logging.Logger) func(*Manager) { + return func(m *Manager) { + m.consoleLogger = logger + } +} + +func EnablePrintStatements(yes bool) func(*Manager) { + return func(m *Manager) { + m.enablePrintStatements = yes + } +} + +func PrintHook(h print.Hook) func(*Manager) { + return func(m *Manager) { + m.printHook = h + } +} + +func WithRouter(r *mux.Router) func(*Manager) { + return func(m *Manager) { + m.router = r + } +} + +// WithPrometheusRegister sets the passed prometheus.Registerer to be used by plugins +func WithPrometheusRegister(prometheusRegister prometheus.Registerer) func(*Manager) { + return func(m *Manager) { + m.prometheusRegister = prometheusRegister + } +} + +// WithTracerProvider sets the passed *trace.TracerProvider to be used by plugins +func WithTracerProvider(tracerProvider *trace.TracerProvider) func(*Manager) { + return func(m *Manager) { + m.tracerProvider = tracerProvider + } +} + +// WithDistributedTracingOpts sets the options to be used by distributed tracing. +func WithDistributedTracingOpts(tr tracing.Options) func(*Manager) { + return func(m *Manager) { + m.distributedTacingOpts = tr + } +} + +// WithHooks allows passing hooks to the plugin manager. +func WithHooks(hs hooks.Hooks) func(*Manager) { + return func(m *Manager) { + m.hooks = hs + } +} + +// WithParserOptions sets the parser options to be used by the plugin manager. +func WithParserOptions(opts ast.ParserOptions) func(*Manager) { + return func(m *Manager) { + m.parserOptions = opts + } +} + +// WithEnableTelemetry controls whether OPA will send telemetry reports to an external service. +func WithEnableTelemetry(enableTelemetry bool) func(*Manager) { + return func(m *Manager) { + m.enableTelemetry = enableTelemetry + } +} + +// WithTelemetryGatherers allows registration of telemetry gatherers which enable injection of additional data in the +// telemetry report +func WithTelemetryGatherers(gs map[string]report.Gatherer) func(*Manager) { + return func(m *Manager) { + m.registeredTelemetryGatherers = gs + } +} + +// New creates a new Manager using config. +func New(raw []byte, id string, store storage.Store, opts ...func(*Manager)) (*Manager, error) { + + parsedConfig, err := config.ParseConfig(raw, id) + if err != nil { + return nil, err + } + + m := &Manager{ + Store: store, + Config: parsedConfig, + ID: id, + pluginStatus: map[string]*Status{}, + pluginStatusListeners: map[string]StatusListener{}, + maxErrors: -1, + serverInitialized: make(chan struct{}), + bootstrapConfigLabels: parsedConfig.Labels, + } + + for _, f := range opts { + f(m) + } + + if m.logger == nil { + m.logger = logging.Get() + } + + if m.consoleLogger == nil { + m.consoleLogger = logging.New() + } + + m.hooks.Each(func(h hooks.Hook) { + if f, ok := h.(hooks.ConfigHook); ok { + if c, e := f.OnConfig(context.Background(), parsedConfig); e != nil { + err = errors.Join(err, e) + } else { + parsedConfig = c + } + } + }) + if err != nil { + return nil, err + } + + // do after options and overrides + m.keys, err = keys.ParseKeysConfig(parsedConfig.Keys) + if err != nil { + return nil, err + } + + m.interQueryBuiltinCacheConfig, err = cache.ParseCachingConfig(parsedConfig.Caching) + if err != nil { + return nil, err + } + + serviceOpts := cfg.ServiceOptions{ + Raw: parsedConfig.Services, + AuthPlugin: m.AuthPlugin, + Keys: m.keys, + Logger: m.logger, + DistributedTacingOpts: m.distributedTacingOpts, + } + + m.services, err = cfg.ParseServicesConfig(serviceOpts) + if err != nil { + return nil, err + } + + if m.enableTelemetry { + reporter, err := report.New(id, report.Options{Logger: m.logger}) + if err != nil { + return nil, err + } + m.reporter = reporter + + m.reporter.RegisterGatherer("min_compatible_version", func(_ context.Context) (any, error) { + var minimumCompatibleVersion string + if m.compiler != nil && m.compiler.Required != nil { + minimumCompatibleVersion, _ = m.compiler.Required.MinimumCompatibleVersion() + } + return minimumCompatibleVersion, nil + }) + + // register any additional gatherers + for k, g := range m.registeredTelemetryGatherers { + m.reporter.RegisterGatherer(k, g) + } + } + + return m, nil +} + +// Init returns an error if the manager could not initialize itself. Init() should +// be called before Start(). Init() is idempotent. +func (m *Manager) Init(ctx context.Context) error { + + if m.initialized { + return nil + } + + params := storage.TransactionParams{ + Write: true, + Context: storage.NewContext(), + } + + if m.enableTelemetry { + m.opaReportNotifyCh = make(chan struct{}) + m.stop = make(chan chan struct{}) + go m.sendOPAUpdateLoop(ctx) + } + + err := storage.Txn(ctx, m.Store, params, func(txn storage.Transaction) error { + + result, err := initload.InsertAndCompile(ctx, initload.InsertAndCompileOptions{ + Store: m.Store, + Txn: txn, + Files: m.initFiles, + Bundles: m.initBundles, + MaxErrors: m.maxErrors, + EnablePrintStatements: m.enablePrintStatements, + ParserOptions: m.parserOptions, + }) + + if err != nil { + return err + } + + SetCompilerOnContext(params.Context, result.Compiler) + + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) + if err != nil { + return err + } + SetWasmResolversOnContext(params.Context, resolvers) + + _, err = m.Store.Register(ctx, txn, storage.TriggerConfig{OnCommit: m.onCommit}) + return err + }) + + if err != nil { + if m.stop != nil { + done := make(chan struct{}) + m.stop <- done + <-done + } + + return err + } + + m.initialized = true + return nil +} + +// Labels returns the set of labels from the configuration. +func (m *Manager) Labels() map[string]string { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.Config.Labels +} + +// InterQueryBuiltinCacheConfig returns the configuration for the inter-query caches. +func (m *Manager) InterQueryBuiltinCacheConfig() *cache.Config { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.interQueryBuiltinCacheConfig +} + +// Register adds a plugin to the manager. When the manager is started, all of +// the plugins will be started. +func (m *Manager) Register(name string, plugin Plugin) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.plugins = append(m.plugins, namedplugin{ + name: name, + plugin: plugin, + }) + if _, ok := m.pluginStatus[name]; !ok { + m.pluginStatus[name] = &Status{State: StateNotReady} + } +} + +// Plugins returns the list of plugins registered with the manager. +func (m *Manager) Plugins() []string { + m.mtx.Lock() + defer m.mtx.Unlock() + result := make([]string, len(m.plugins)) + for i := range m.plugins { + result[i] = m.plugins[i].name + } + return result +} + +// Plugin returns the plugin registered with name or nil if name is not found. +func (m *Manager) Plugin(name string) Plugin { + m.mtx.Lock() + defer m.mtx.Unlock() + for i := range m.plugins { + if m.plugins[i].name == name { + return m.plugins[i].plugin + } + } + return nil +} + +// AuthPlugin returns the HTTPAuthPlugin registered with name or nil if name is not found. +func (m *Manager) AuthPlugin(name string) rest.HTTPAuthPlugin { + m.mtx.Lock() + defer m.mtx.Unlock() + for i := range m.plugins { + if m.plugins[i].name == name { + return m.plugins[i].plugin.(rest.HTTPAuthPlugin) + } + } + return nil +} + +// GetCompiler returns the manager's compiler. +func (m *Manager) GetCompiler() *ast.Compiler { + m.compilerMux.RLock() + defer m.compilerMux.RUnlock() + return m.compiler +} + +func (m *Manager) setCompiler(compiler *ast.Compiler) { + m.compilerMux.Lock() + defer m.compilerMux.Unlock() + m.compiler = compiler +} + +// GetRouter returns the managers router if set +func (m *Manager) GetRouter() *mux.Router { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.router +} + +// RegisterCompilerTrigger registers for change notifications when the compiler +// is changed. +func (m *Manager) RegisterCompilerTrigger(f func(storage.Transaction)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredTriggers = append(m.registeredTriggers, f) +} + +// GetWasmResolvers returns the manager's set of Wasm Resolvers. +func (m *Manager) GetWasmResolvers() []*wasm.Resolver { + m.wasmResolversMtx.RLock() + defer m.wasmResolversMtx.RUnlock() + return m.wasmResolvers +} + +func (m *Manager) setWasmResolvers(rs []*wasm.Resolver) { + m.wasmResolversMtx.Lock() + defer m.wasmResolversMtx.Unlock() + m.wasmResolvers = rs +} + +// Start starts the manager. Init() should be called once before Start(). +func (m *Manager) Start(ctx context.Context) error { + + if m == nil { + return nil + } + + if !m.initialized { + if err := m.Init(ctx); err != nil { + return err + } + } + + var toStart []Plugin + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + toStart = make([]Plugin, len(m.plugins)) + for i := range m.plugins { + toStart[i] = m.plugins[i].plugin + } + }() + + for i := range toStart { + if err := toStart[i].Start(ctx); err != nil { + return err + } + } + + return nil +} + +// Stop stops the manager, stopping all the plugins registered with it. +// Any plugin that needs to perform cleanup should do so within the duration +// of the graceful shutdown period passed with the context as a timeout. +// Note that a graceful shutdown period configured with the Manager instance +// will override the timeout of the passed in context (if applicable). +func (m *Manager) Stop(ctx context.Context) { + var toStop []Plugin + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + toStop = make([]Plugin, len(m.plugins)) + for i := range m.plugins { + toStop[i] = m.plugins[i].plugin + } + }() + + var cancel context.CancelFunc + if m.gracefulShutdownPeriod > 0 { + ctx, cancel = context.WithTimeout(ctx, time.Duration(m.gracefulShutdownPeriod)*time.Second) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + for i := range toStop { + toStop[i].Stop(ctx) + } + if c, ok := m.Store.(interface{ Close(context.Context) error }); ok { + if err := c.Close(ctx); err != nil { + m.logger.Error("Error closing store: %v", err) + } + } + + if m.stop != nil { + done := make(chan struct{}) + m.stop <- done + <-done + } +} + +// Reconfigure updates the configuration on the manager. +func (m *Manager) Reconfigure(config *config.Config) error { + opts := cfg.ServiceOptions{ + Raw: config.Services, + AuthPlugin: m.AuthPlugin, + Logger: m.logger, + DistributedTacingOpts: m.distributedTacingOpts, + } + + keys, err := keys.ParseKeysConfig(config.Keys) + if err != nil { + return err + } + opts.Keys = keys + + services, err := cfg.ParseServicesConfig(opts) + if err != nil { + return err + } + + interQueryBuiltinCacheConfig, err := cache.ParseCachingConfig(config.Caching) + if err != nil { + return err + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + // don't overwrite existing labels, only allow additions - always based on the boostrap config + if config.Labels == nil { + config.Labels = m.bootstrapConfigLabels + } else { + for label, value := range m.bootstrapConfigLabels { + config.Labels[label] = value + } + } + + // don't erase persistence directory + if config.PersistenceDirectory == nil { + config.PersistenceDirectory = m.Config.PersistenceDirectory + } + + m.Config = config + m.interQueryBuiltinCacheConfig = interQueryBuiltinCacheConfig + for name, client := range services { + m.services[name] = client + } + + for name, key := range keys { + m.keys[name] = key + } + + for _, trigger := range m.registeredCacheTriggers { + trigger(interQueryBuiltinCacheConfig) + } + + for _, trigger := range m.registeredNDCacheTriggers { + trigger(config.NDBuiltinCache) + } + + return nil +} + +// PluginStatus returns the current statuses of any plugins registered. +func (m *Manager) PluginStatus() map[string]*Status { + m.mtx.Lock() + defer m.mtx.Unlock() + + return m.copyPluginStatus() +} + +// RegisterPluginStatusListener registers a StatusListener to be +// called when plugin status updates occur. +func (m *Manager) RegisterPluginStatusListener(name string, listener StatusListener) { + m.mtx.Lock() + defer m.mtx.Unlock() + + m.pluginStatusListeners[name] = listener +} + +// UnregisterPluginStatusListener removes a StatusListener registered with the +// same name. +func (m *Manager) UnregisterPluginStatusListener(name string) { + m.mtx.Lock() + defer m.mtx.Unlock() + + delete(m.pluginStatusListeners, name) +} + +// UpdatePluginStatus updates a named plugins status. Any registered +// listeners will be called with a copy of the new state of all +// plugins. +func (m *Manager) UpdatePluginStatus(pluginName string, status *Status) { + + var toNotify map[string]StatusListener + var statuses map[string]*Status + + func() { + m.mtx.Lock() + defer m.mtx.Unlock() + m.pluginStatus[pluginName] = status + toNotify = make(map[string]StatusListener, len(m.pluginStatusListeners)) + for k, v := range m.pluginStatusListeners { + toNotify[k] = v + } + statuses = m.copyPluginStatus() + }() + + for _, l := range toNotify { + l(statuses) + } +} + +func (m *Manager) copyPluginStatus() map[string]*Status { + statusCpy := map[string]*Status{} + for k, v := range m.pluginStatus { + var cpy *Status + if v != nil { + cpy = &Status{ + State: v.State, + Message: v.Message, + } + } + statusCpy[k] = cpy + } + return statusCpy +} + +func (m *Manager) onCommit(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { + + compiler := GetCompilerOnContext(event.Context) + + // If the context does not contain the compiler fallback to loading the + // compiler from the store. Currently the bundle plugin sets the + // compiler on the context but the server does not (nor would users + // implementing their own policy loading.) + if compiler == nil && event.PolicyChanged() { + compiler, _ = loadCompilerFromStore(ctx, m.Store, txn, m.enablePrintStatements, m.ParserOptions()) + } + + if compiler != nil { + m.setCompiler(compiler) + + if m.enableTelemetry && event.PolicyChanged() { + m.opaReportNotifyCh <- struct{}{} + } + + for _, f := range m.registeredTriggers { + f(txn) + } + } + + // Similar to the compiler, look for a set of resolvers on the transaction + // context. If they are not set we may need to reload from the store. + resolvers := getWasmResolversOnContext(event.Context) + if resolvers != nil { + m.setWasmResolvers(resolvers) + + } else if event.DataChanged() { + if requiresWasmResolverReload(event) { + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, m.Store, txn, nil) + if err != nil { + panic(err) + } + m.setWasmResolvers(resolvers) + } else { + err := m.updateWasmResolversData(ctx, event) + if err != nil { + panic(err) + } + } + } +} + +func loadCompilerFromStore(ctx context.Context, store storage.Store, txn storage.Transaction, enablePrintStatements bool, popts ast.ParserOptions) (*ast.Compiler, error) { + policies, err := store.ListPolicies(ctx, txn) + if err != nil { + return nil, err + } + modules := map[string]*ast.Module{} + + for _, policy := range policies { + bs, err := store.GetPolicy(ctx, txn, policy) + if err != nil { + return nil, err + } + module, err := ast.ParseModuleWithOpts(policy, string(bs), popts) + if err != nil { + return nil, err + } + modules[policy] = module + } + + compiler := ast.NewCompiler(). + WithEnablePrintStatements(enablePrintStatements) + + if popts.RegoVersion != ast.RegoUndefined { + compiler = compiler.WithDefaultRegoVersion(popts.RegoVersion) + } + + compiler.Compile(modules) + return compiler, nil +} + +func requiresWasmResolverReload(event storage.TriggerEvent) bool { + // If the data changes touched the bundle path (which includes + // the wasm modules) we will reload them. Otherwise update + // data for each module already on the manager. + for _, dataEvent := range event.Data { + if dataEvent.Path.HasPrefix(bundle.BundlesBasePath) { + return true + } + } + return false +} + +func (m *Manager) updateWasmResolversData(ctx context.Context, event storage.TriggerEvent) error { + m.wasmResolversMtx.Lock() + defer m.wasmResolversMtx.Unlock() + + for _, resolver := range m.wasmResolvers { + for _, dataEvent := range event.Data { + var err error + if dataEvent.Removed { + err = resolver.RemoveDataPath(ctx, dataEvent.Path) + } else { + err = resolver.SetDataPath(ctx, dataEvent.Path, dataEvent.Data) + } + if err != nil { + return fmt.Errorf("failed to update wasm runtime data: %s", err) + } + } + } + return nil +} + +// PublicKeys returns a public keys that can be used for verifying signed bundles. +func (m *Manager) PublicKeys() map[string]*keys.Config { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.keys +} + +// Client returns a client for communicating with a remote service. +func (m *Manager) Client(name string) rest.Client { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.services[name] +} + +// Services returns a list of services that m can provide clients for. +func (m *Manager) Services() []string { + m.mtx.Lock() + defer m.mtx.Unlock() + s := make([]string, 0, len(m.services)) + for name := range m.services { + s = append(s, name) + } + return s +} + +// Logger gets the standard logger for this plugin manager. +func (m *Manager) Logger() logging.Logger { + return m.logger +} + +// ConsoleLogger gets the console logger for this plugin manager. +func (m *Manager) ConsoleLogger() logging.Logger { + return m.consoleLogger +} + +func (m *Manager) PrintHook() print.Hook { + return m.printHook +} + +func (m *Manager) EnablePrintStatements() bool { + return m.enablePrintStatements +} + +// ServerInitialized signals a channel indicating that the OPA +// server has finished initialization. +func (m *Manager) ServerInitialized() { + m.serverInitializedOnce.Do(func() { close(m.serverInitialized) }) +} + +// ServerInitializedChannel returns a receive-only channel that +// is closed when the OPA server has finished initialization. +// Be aware that the socket of the server listener may not be +// open by the time this channel is closed. There is a very +// small window where the socket may still be closed, due to +// a race condition. +func (m *Manager) ServerInitializedChannel() <-chan struct{} { + return m.serverInitialized +} + +// RegisterCacheTrigger accepts a func that receives new inter-query cache config generated by +// a reconfigure of the plugin manager, so that it can be propagated to existing inter-query caches. +func (m *Manager) RegisterCacheTrigger(trigger func(*cache.Config)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredCacheTriggers = append(m.registeredCacheTriggers, trigger) +} + +// PrometheusRegister gets the prometheus.Registerer for this plugin manager. +func (m *Manager) PrometheusRegister() prometheus.Registerer { + return m.prometheusRegister +} + +// TracerProvider gets the *trace.TracerProvider for this plugin manager. +func (m *Manager) TracerProvider() *trace.TracerProvider { + return m.tracerProvider +} + +func (m *Manager) RegisterNDCacheTrigger(trigger func(bool)) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.registeredNDCacheTriggers = append(m.registeredNDCacheTriggers, trigger) +} + +func (m *Manager) sendOPAUpdateLoop(ctx context.Context) { + ticker := time.NewTicker(time.Duration(int64(time.Second) * defaultUploadIntervalSec)) + mr.New(mr.NewSource(time.Now().UnixNano())) + + ctx, cancel := context.WithCancel(ctx) + + var opaReportNotify bool + + for { + select { + case <-m.opaReportNotifyCh: + opaReportNotify = true + case <-ticker.C: + ticker.Stop() + + if opaReportNotify { + opaReportNotify = false + _, err := m.reporter.SendReport(ctx) + if err != nil { + m.logger.WithFields(map[string]interface{}{"err": err}).Debug("Unable to send OPA telemetry report.") + } + } + + newInterval := mr.Int63n(defaultUploadIntervalSec) + defaultUploadIntervalSec + ticker = time.NewTicker(time.Duration(int64(time.Second) * newInterval)) + case done := <-m.stop: + cancel() + ticker.Stop() + done <- struct{}{} + return + } + } +} + +func (m *Manager) ParserOptions() ast.ParserOptions { + return m.parserOptions +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go similarity index 99% rename from constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go index 11e72001a..964630fa2 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/auth.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/auth.go @@ -33,8 +33,8 @@ import ( "github.com/open-policy-agent/opa/internal/jwx/jws/sign" "github.com/open-policy-agent/opa/internal/providers/aws" "github.com/open-policy-agent/opa/internal/uuid" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" ) const ( diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go index 349441c83..133df8099 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/aws.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/aws.go @@ -19,7 +19,7 @@ import ( "github.com/go-ini/ini" "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/logging" + "github.com/open-policy-agent/opa/v1/logging" ) const ( @@ -30,10 +30,11 @@ const ( ec2DefaultTokenPath = "http://169.254.169.254/latest/api/token" // ref. https://docs.aws.amazon.com/AmazonECS/latest/userguide/task-iam-roles.html - ecsDefaultCredServicePath = "http://169.254.170.2" - ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - ecsFullPathEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - ecsAuthorizationTokenEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + ecsDefaultCredServicePath = "http://169.254.170.2" + ecsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + ecsFullPathEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsAuthorizationTokenEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + ecsAuthorizationTokenFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" // ref. https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html stsDefaultDomain = "amazonaws.com" @@ -277,9 +278,22 @@ func (cs *awsMetadataCredentialService) refreshFromService(ctx context.Context) // if using the AWS_CONTAINER_CREDENTIALS_FULL_URI variable, we need to associate the token // to the request if _, useFullPath := os.LookupEnv(ecsFullPathEnvVar); useFullPath { - token, tokenExists := os.LookupEnv(ecsAuthorizationTokenEnvVar) - if !tokenExists { - return errors.New("unable to get ECS metadata authorization token") + var token string + tokenFilePath, tokenFilePathExists := os.LookupEnv(ecsAuthorizationTokenFileEnvVar) + + if tokenFilePathExists { + tokenBytes, err := os.ReadFile(tokenFilePath) + if err != nil { + return errors.New("failed to read ECS metadata authorization token from file: " + err.Error()) + } + token = string(tokenBytes) + // If token doesn't exist as a file check if it exists as an environment variable + } else { + var tokenExists bool + token, tokenExists = os.LookupEnv(ecsAuthorizationTokenEnvVar) + if !tokenExists { + return errors.New("unable to get ECS metadata authorization token") + } } req.Header.Set("Authorization", token) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go new file mode 100644 index 000000000..ae00d48a7 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/azure.go @@ -0,0 +1,180 @@ +package rest + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "time" +) + +var ( + azureIMDSEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + defaultAPIVersion = "2018-02-01" + defaultResource = "https://storage.azure.com/" + timeout = 5 * time.Second + defaultAPIVersionForAppServiceMsi = "2019-08-01" +) + +// azureManagedIdentitiesToken holds a token for managed identities for Azure resources +type azureManagedIdentitiesToken struct { + AccessToken string `json:"access_token"` + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + Resource string `json:"resource"` + TokenType string `json:"token_type"` +} + +// azureManagedIdentitiesError represents an error fetching an azureManagedIdentitiesToken +type azureManagedIdentitiesError struct { + Err string `json:"error"` + Description string `json:"error_description"` + Endpoint string + StatusCode int +} + +func (e *azureManagedIdentitiesError) Error() string { + return fmt.Sprintf("%v %s retrieving azure token from %s: %s", e.StatusCode, e.Err, e.Endpoint, e.Description) +} + +// azureManagedIdentitiesAuthPlugin uses an azureManagedIdentitiesToken.AccessToken for bearer authorization +type azureManagedIdentitiesAuthPlugin struct { + Endpoint string `json:"endpoint"` + APIVersion string `json:"api_version"` + Resource string `json:"resource"` + ObjectID string `json:"object_id"` + ClientID string `json:"client_id"` + MiResID string `json:"mi_res_id"` + UseAppServiceMsi bool `json:"use_app_service_msi,omitempty"` +} + +func (ap *azureManagedIdentitiesAuthPlugin) NewClient(c Config) (*http.Client, error) { + if c.Type == "oci" { + return nil, errors.New("azure managed identities auth: OCI service not supported") + } + + if ap.Endpoint == "" { + identityEndpoint := os.Getenv("IDENTITY_ENDPOINT") + if identityEndpoint != "" { + ap.UseAppServiceMsi = true + ap.Endpoint = identityEndpoint + } else { + ap.Endpoint = azureIMDSEndpoint + } + } + + if ap.Resource == "" { + ap.Resource = defaultResource + } + + if ap.APIVersion == "" { + if ap.UseAppServiceMsi { + ap.APIVersion = defaultAPIVersionForAppServiceMsi + } else { + ap.APIVersion = defaultAPIVersion + } + } + + t, err := DefaultTLSConfig(c) + if err != nil { + return nil, err + } + + return DefaultRoundTripperClient(t, *c.ResponseHeaderTimeoutSeconds), nil +} + +func (ap *azureManagedIdentitiesAuthPlugin) Prepare(req *http.Request) error { + token, err := azureManagedIdentitiesTokenRequest( + ap.Endpoint, ap.APIVersion, ap.Resource, + ap.ObjectID, ap.ClientID, ap.MiResID, + ap.UseAppServiceMsi, + ) + if err != nil { + return err + } + + req.Header.Add("Authorization", "Bearer "+token.AccessToken) + return nil +} + +// azureManagedIdentitiesTokenRequest fetches an azureManagedIdentitiesToken +func azureManagedIdentitiesTokenRequest( + endpoint, apiVersion, resource, objectID, clientID, miResID string, + useAppServiceMsi bool, +) (azureManagedIdentitiesToken, error) { + var token azureManagedIdentitiesToken + e := buildAzureManagedIdentitiesRequestPath(endpoint, apiVersion, resource, objectID, clientID, miResID) + + request, err := http.NewRequest("GET", e, nil) + if err != nil { + return token, err + } + if useAppServiceMsi { + identityHeader := os.Getenv("IDENTITY_HEADER") + if identityHeader == "" { + return token, errors.New("azure managed identities auth: IDENTITY_HEADER env var not found") + } + request.Header.Add("x-identity-header", identityHeader) + } else { + request.Header.Add("Metadata", "true") + } + + httpClient := http.Client{Timeout: timeout} + response, err := httpClient.Do(request) + if err != nil { + return token, err + } + defer response.Body.Close() + + data, err := io.ReadAll(response.Body) + if err != nil { + return token, err + } + + if s := response.StatusCode; s != http.StatusOK { + var azureError azureManagedIdentitiesError + err = json.Unmarshal(data, &azureError) + if err != nil { + return token, err + } + + azureError.Endpoint = e + azureError.StatusCode = s + return token, &azureError + } + + err = json.Unmarshal(data, &token) + if err != nil { + return token, err + } + + return token, nil +} + +// buildAzureManagedIdentitiesRequestPath constructs the request URL for an Azure managed identities token request +func buildAzureManagedIdentitiesRequestPath( + endpoint, apiVersion, resource, objectID, clientID, miResID string, +) string { + params := url.Values{ + "api-version": []string{apiVersion}, + "resource": []string{resource}, + } + + if objectID != "" { + params.Add("object_id", objectID) + } + + if clientID != "" { + params.Add("client_id", clientID) + } + + if miResID != "" { + params.Add("mi_res_id", miResID) + } + + return endpoint + "?" + params.Encode() +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/gcp.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/gcp.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go similarity index 98% rename from constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go index fd59058ca..fea351557 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/plugins/rest/rest.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/plugins/rest/rest.go @@ -18,10 +18,10 @@ import ( "strings" "github.com/open-policy-agent/opa/internal/version" - "github.com/open-policy-agent/opa/keys" - "github.com/open-policy-agent/opa/logging" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/keys" + "github.com/open-policy-agent/opa/v1/logging" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" ) const ( diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go new file mode 100644 index 000000000..dcc5e2679 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/errors.go @@ -0,0 +1,24 @@ +package rego + +// HaltError is an error type to return from a custom function implementation +// that will abort the evaluation process (analogous to topdown.Halt). +type HaltError struct { + err error +} + +// Error delegates to the wrapped error +func (h *HaltError) Error() string { + return h.err.Error() +} + +// NewHaltError wraps an error such that the evaluation process will stop +// when it occurs. +func NewHaltError(err error) error { + return &HaltError{err: err} +} + +// ErrorDetails interface is satisfied by an error that provides further +// details. +type ErrorDetails interface { + Lines() []string +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go new file mode 100644 index 000000000..88f23480b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/plugins.go @@ -0,0 +1,43 @@ +// Copyright 2023 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package rego + +import ( + "context" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/ir" +) + +var targetPlugins = map[string]TargetPlugin{} +var pluginMtx sync.Mutex + +type TargetPlugin interface { + IsTarget(string) bool + PrepareForEval(context.Context, *ir.Policy, ...PrepareOption) (TargetPluginEval, error) +} + +type TargetPluginEval interface { + Eval(context.Context, *EvalContext, ast.Value) (ast.Value, error) +} + +func (r *Rego) targetPlugin(tgt string) TargetPlugin { + for _, p := range targetPlugins { + if p.IsTarget(tgt) { + return p + } + } + return nil +} + +func RegisterPlugin(name string, p TargetPlugin) { + pluginMtx.Lock() + defer pluginMtx.Unlock() + if _, ok := targetPlugins[name]; ok { + panic("plugin already registered " + name) + } + targetPlugins[name] = p +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go new file mode 100644 index 000000000..ef930a2cf --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/rego.go @@ -0,0 +1,2935 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package rego exposes high level APIs for evaluating Rego policies. +package rego + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "time" + + bundleUtils "github.com/open-policy-agent/opa/internal/bundle" + "github.com/open-policy-agent/opa/internal/compiler/wasm" + "github.com/open-policy-agent/opa/internal/future" + "github.com/open-policy-agent/opa/internal/planner" + "github.com/open-policy-agent/opa/internal/rego/opa" + "github.com/open-policy-agent/opa/internal/wasm/encoding" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/bundle" + "github.com/open-policy-agent/opa/v1/ir" + "github.com/open-policy-agent/opa/v1/loader" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/plugins" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/inmem" + "github.com/open-policy-agent/opa/v1/topdown" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultPartialNamespace = "partial" + wasmVarPrefix = "^" +) + +// nolint: deadcode,varcheck +const ( + targetWasm = "wasm" + targetRego = "rego" +) + +// CompileResult represents the result of compiling a Rego query, zero or more +// Rego modules, and arbitrary contextual data into an executable. +type CompileResult struct { + Bytes []byte `json:"bytes"` +} + +// PartialQueries contains the queries and support modules produced by partial +// evaluation. +type PartialQueries struct { + Queries []ast.Body `json:"queries,omitempty"` + Support []*ast.Module `json:"modules,omitempty"` +} + +// PartialResult represents the result of partial evaluation. The result can be +// used to generate a new query that can be run when inputs are known. +type PartialResult struct { + compiler *ast.Compiler + store storage.Store + body ast.Body + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin +} + +// Rego returns an object that can be evaluated to produce a query result. +func (pr PartialResult) Rego(options ...func(*Rego)) *Rego { + options = append(options, Compiler(pr.compiler), Store(pr.store), ParsedQuery(pr.body)) + r := New(options...) + + // Propagate any custom builtins. + for k, v := range pr.builtinDecls { + r.builtinDecls[k] = v + } + for k, v := range pr.builtinFuncs { + r.builtinFuncs[k] = v + } + return r +} + +// preparedQuery is a wrapper around a Rego object which has pre-processed +// state stored on it. Once prepared there are a more limited number of actions +// that can be taken with it. It will, however, be able to evaluate faster since +// it will not have to re-parse or compile as much. +type preparedQuery struct { + r *Rego + cfg *PrepareConfig +} + +// EvalContext defines the set of options allowed to be set at evaluation +// time. Any other options will need to be set on a new Rego object. +type EvalContext struct { + hasInput bool + time time.Time + seed io.Reader + rawInput *interface{} + parsedInput ast.Value + metrics metrics.Metrics + txn storage.Transaction + instrument bool + instrumentation *topdown.Instrumentation + partialNamespace string + queryTracers []topdown.QueryTracer + compiledQuery compiledQuery + unknowns []string + disableInlining []ast.Ref + nondeterministicBuiltins bool + parsedUnknowns []*ast.Term + indexing bool + earlyExit bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + resolvers []refResolver + httpRoundTripper topdown.CustomizeRoundTripper + sortSets bool + copyMaps bool + printHook print.Hook + capabilities *ast.Capabilities + strictBuiltinErrors bool + virtualCache topdown.VirtualCache +} + +func (e *EvalContext) RawInput() *interface{} { + return e.rawInput +} + +func (e *EvalContext) ParsedInput() ast.Value { + return e.parsedInput +} + +func (e *EvalContext) Time() time.Time { + return e.time +} + +func (e *EvalContext) Seed() io.Reader { + return e.seed +} + +func (e *EvalContext) InterQueryBuiltinCache() cache.InterQueryCache { + return e.interQueryBuiltinCache +} + +func (e *EvalContext) InterQueryBuiltinValueCache() cache.InterQueryValueCache { + return e.interQueryBuiltinValueCache +} + +func (e *EvalContext) PrintHook() print.Hook { + return e.printHook +} + +func (e *EvalContext) Metrics() metrics.Metrics { + return e.metrics +} + +func (e *EvalContext) StrictBuiltinErrors() bool { + return e.strictBuiltinErrors +} + +func (e *EvalContext) NDBCache() builtins.NDBCache { + return e.ndBuiltinCache +} + +func (e *EvalContext) CompiledQuery() ast.Body { + return e.compiledQuery.query +} + +func (e *EvalContext) Capabilities() *ast.Capabilities { + return e.capabilities +} + +func (e *EvalContext) Transaction() storage.Transaction { + return e.txn +} + +// EvalOption defines a function to set an option on an EvalConfig +type EvalOption func(*EvalContext) + +// EvalInput configures the input for a Prepared Query's evaluation +func EvalInput(input interface{}) EvalOption { + return func(e *EvalContext) { + e.rawInput = &input + e.hasInput = true + } +} + +// EvalParsedInput configures the input for a Prepared Query's evaluation +func EvalParsedInput(input ast.Value) EvalOption { + return func(e *EvalContext) { + e.parsedInput = input + e.hasInput = true + } +} + +// EvalMetrics configures the metrics for a Prepared Query's evaluation +func EvalMetrics(metric metrics.Metrics) EvalOption { + return func(e *EvalContext) { + e.metrics = metric + } +} + +// EvalTransaction configures the Transaction for a Prepared Query's evaluation +func EvalTransaction(txn storage.Transaction) EvalOption { + return func(e *EvalContext) { + e.txn = txn + } +} + +// EvalInstrument enables or disables instrumenting for a Prepared Query's evaluation +func EvalInstrument(instrument bool) EvalOption { + return func(e *EvalContext) { + e.instrument = instrument + } +} + +// EvalTracer configures a tracer for a Prepared Query's evaluation +// Deprecated: Use EvalQueryTracer instead. +func EvalTracer(tracer topdown.Tracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, topdown.WrapLegacyTracer(tracer)) + } + } +} + +// EvalQueryTracer configures a tracer for a Prepared Query's evaluation +func EvalQueryTracer(tracer topdown.QueryTracer) EvalOption { + return func(e *EvalContext) { + if tracer != nil { + e.queryTracers = append(e.queryTracers, tracer) + } + } +} + +// EvalPartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func EvalPartialNamespace(ns string) EvalOption { + return func(e *EvalContext) { + e.partialNamespace = ns + } +} + +// EvalUnknowns returns an argument that sets the values to treat as +// unknown during partial evaluation. +func EvalUnknowns(unknowns []string) EvalOption { + return func(e *EvalContext) { + e.unknowns = unknowns + } +} + +// EvalDisableInlining returns an argument that adds a set of paths to exclude from +// partial evaluation inlining. +func EvalDisableInlining(paths []ast.Ref) EvalOption { + return func(e *EvalContext) { + e.disableInlining = paths + } +} + +// EvalParsedUnknowns returns an argument that sets the values to treat +// as unknown during partial evaluation. +func EvalParsedUnknowns(unknowns []*ast.Term) EvalOption { + return func(e *EvalContext) { + e.parsedUnknowns = unknowns + } +} + +// EvalRuleIndexing will disable indexing optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalRuleIndexing(enabled bool) EvalOption { + return func(e *EvalContext) { + e.indexing = enabled + } +} + +// EvalEarlyExit will disable 'early exit' optimizations for the +// evaluation. This should only be used when tracing in debug mode. +func EvalEarlyExit(enabled bool) EvalOption { + return func(e *EvalContext) { + e.earlyExit = enabled + } +} + +// EvalTime sets the wall clock time to use during policy evaluation. +// time.now_ns() calls will return this value. +func EvalTime(x time.Time) EvalOption { + return func(e *EvalContext) { + e.time = x + } +} + +// EvalSeed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func EvalSeed(r io.Reader) EvalOption { + return func(e *EvalContext) { + e.seed = r + } +} + +// EvalInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinCache(c cache.InterQueryCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinCache = c + } +} + +// EvalInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func EvalInterQueryBuiltinValueCache(c cache.InterQueryValueCache) EvalOption { + return func(e *EvalContext) { + e.interQueryBuiltinValueCache = c + } +} + +// EvalNDBuiltinCache sets the non-deterministic builtin cache that built-in functions can +// use during evaluation. +func EvalNDBuiltinCache(c builtins.NDBCache) EvalOption { + return func(e *EvalContext) { + e.ndBuiltinCache = c + } +} + +// EvalResolver sets a Resolver for a specified ref path for this evaluation. +func EvalResolver(ref ast.Ref, r resolver.Resolver) EvalOption { + return func(e *EvalContext) { + e.resolvers = append(e.resolvers, refResolver{ref, r}) + } +} + +// EvalHTTPRoundTripper allows customizing the http.RoundTripper for this evaluation. +func EvalHTTPRoundTripper(t topdown.CustomizeRoundTripper) EvalOption { + return func(e *EvalContext) { + e.httpRoundTripper = t + } +} + +// EvalSortSets causes the evaluator to sort sets before returning them as JSON arrays. +func EvalSortSets(yes bool) EvalOption { + return func(e *EvalContext) { + e.sortSets = yes + } +} + +// EvalCopyMaps causes the evaluator to copy `map[string]interface{}`s before returning them. +func EvalCopyMaps(yes bool) EvalOption { + return func(e *EvalContext) { + e.copyMaps = yes + } +} + +// EvalPrintHook sets the object to use for handling print statement outputs. +func EvalPrintHook(ph print.Hook) EvalOption { + return func(e *EvalContext) { + e.printHook = ph + } +} + +// EvalVirtualCache sets the topdown.VirtualCache to use for evaluation. This is +// optional, and if not set, the default cache is used. +func EvalVirtualCache(vc topdown.VirtualCache) EvalOption { + return func(e *EvalContext) { + e.virtualCache = vc + } +} + +// EvalNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func EvalNondeterministicBuiltins(yes bool) EvalOption { + return func(e *EvalContext) { + e.nondeterministicBuiltins = yes + } +} + +func (pq preparedQuery) Modules() map[string]*ast.Module { + mods := make(map[string]*ast.Module) + + for name, mod := range pq.r.parsedModules { + mods[name] = mod + } + + for _, b := range pq.r.bundles { + for _, mod := range b.Modules { + mods[mod.Path] = mod.Parsed + } + } + + return mods +} + +// newEvalContext creates a new EvalContext overlaying any EvalOptions over top +// the Rego object on the preparedQuery. The returned function should be called +// once the evaluation is complete to close any transactions that might have +// been opened. +func (pq preparedQuery) newEvalContext(ctx context.Context, options []EvalOption) (*EvalContext, func(context.Context), error) { + ectx := &EvalContext{ + hasInput: false, + rawInput: nil, + parsedInput: nil, + metrics: nil, + txn: nil, + instrument: false, + instrumentation: nil, + partialNamespace: pq.r.partialNamespace, + queryTracers: nil, + unknowns: pq.r.unknowns, + parsedUnknowns: pq.r.parsedUnknowns, + nondeterministicBuiltins: pq.r.nondeterministicBuiltins, + compiledQuery: compiledQuery{}, + indexing: true, + earlyExit: true, + resolvers: pq.r.resolvers, + printHook: pq.r.printHook, + capabilities: pq.r.capabilities, + strictBuiltinErrors: pq.r.strictBuiltinErrors, + } + + for _, o := range options { + o(ectx) + } + + if ectx.metrics == nil { + ectx.metrics = metrics.New() + } + + if ectx.instrument { + ectx.instrumentation = topdown.NewInstrumentation(ectx.metrics) + } + + // Default to an empty "finish" function + finishFunc := func(context.Context) {} + + var err error + ectx.disableInlining, err = parseStringsToRefs(pq.r.disableInlining) + if err != nil { + return nil, finishFunc, err + } + + if ectx.txn == nil { + ectx.txn, err = pq.r.store.NewTransaction(ctx) + if err != nil { + return nil, finishFunc, err + } + finishFunc = func(ctx context.Context) { + pq.r.store.Abort(ctx, ectx.txn) + } + } + + // If we didn't get an input specified in the Eval options + // then fall back to the Rego object's input fields. + if !ectx.hasInput { + ectx.rawInput = pq.r.rawInput + ectx.parsedInput = pq.r.parsedInput + } + + if ectx.parsedInput == nil { + if ectx.rawInput == nil { + // Fall back to the original Rego objects input if none was specified + // Note that it could still be nil + ectx.rawInput = pq.r.rawInput + } + + if pq.r.targetPlugin(pq.r.target) == nil && // no plugin claims this target + pq.r.target != targetWasm { + ectx.parsedInput, err = pq.r.parseRawInput(ectx.rawInput, ectx.metrics) + if err != nil { + return nil, finishFunc, err + } + } + } + + return ectx, finishFunc, nil +} + +// PreparedEvalQuery holds the prepared Rego state that has been pre-processed +// for subsequent evaluations. +type PreparedEvalQuery struct { + preparedQuery +} + +// Eval evaluates this PartialResult's Rego object with additional eval options +// and returns a ResultSet. +// If options are provided they will override the original Rego options respective value. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedEvalQuery) Eval(ctx context.Context, options ...EvalOption) (ResultSet, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[evalQueryType] + + return pq.r.eval(ctx, ectx) +} + +// PreparedPartialQuery holds the prepared Rego state that has been pre-processed +// for partial evaluations. +type PreparedPartialQuery struct { + preparedQuery +} + +// Partial runs partial evaluation on the prepared query and returns the result. +// The original Rego object transaction will *not* be re-used. A new transaction will be opened +// if one is not provided with an EvalOption. +func (pq PreparedPartialQuery) Partial(ctx context.Context, options ...EvalOption) (*PartialQueries, error) { + ectx, finish, err := pq.newEvalContext(ctx, options) + if err != nil { + return nil, err + } + defer finish(ctx) + + ectx.compiledQuery = pq.r.compiledQueries[partialQueryType] + + return pq.r.partial(ctx, ectx) +} + +// Errors represents a collection of errors returned when evaluating Rego. +type Errors []error + +func (errs Errors) Error() string { + if len(errs) == 0 { + return "no error" + } + if len(errs) == 1 { + return fmt.Sprintf("1 error occurred: %v", errs[0].Error()) + } + buf := []string{fmt.Sprintf("%v errors occurred", len(errs))} + for _, err := range errs { + buf = append(buf, err.Error()) + } + return strings.Join(buf, "\n") +} + +var errPartialEvaluationNotEffective = errors.New("partial evaluation not effective") + +// IsPartialEvaluationNotEffectiveErr returns true if err is an error returned by +// this package to indicate that partial evaluation was ineffective. +func IsPartialEvaluationNotEffectiveErr(err error) bool { + errs, ok := err.(Errors) + if !ok { + return false + } + return len(errs) == 1 && errs[0] == errPartialEvaluationNotEffective +} + +type compiledQuery struct { + query ast.Body + compiler ast.QueryCompiler +} + +type queryType int + +// Define a query type for each of the top level Rego +// API's that compile queries differently. +const ( + evalQueryType queryType = iota + partialResultQueryType + partialQueryType + compileQueryType +) + +type loadPaths struct { + paths []string + filter loader.Filter +} + +// Rego constructs a query and can be evaluated to obtain results. +type Rego struct { + query string + parsedQuery ast.Body + compiledQueries map[queryType]compiledQuery + pkg string + parsedPackage *ast.Package + imports []string + parsedImports []*ast.Import + rawInput *interface{} + parsedInput ast.Value + unknowns []string + parsedUnknowns []*ast.Term + disableInlining []string + shallowInlining bool + nondeterministicBuiltins bool + skipPartialNamespace bool + partialNamespace string + modules []rawModule + parsedModules map[string]*ast.Module + compiler *ast.Compiler + store storage.Store + ownStore bool + ownStoreReadAst bool + txn storage.Transaction + metrics metrics.Metrics + queryTracers []topdown.QueryTracer + tracebuf *topdown.BufferTracer + trace bool + instrumentation *topdown.Instrumentation + instrument bool + capture map[*ast.Expr]ast.Var // map exprs to generated capture vars + termVarID int + dump io.Writer + runtime *ast.Term + time time.Time + seed io.Reader + capabilities *ast.Capabilities + builtinDecls map[string]*ast.Builtin + builtinFuncs map[string]*topdown.Builtin + unsafeBuiltins map[string]struct{} + loadPaths loadPaths + bundlePaths []string + bundles map[string]*bundle.Bundle + skipBundleVerification bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + strictBuiltinErrors bool + builtinErrorList *[]topdown.Error + resolvers []refResolver + schemaSet *ast.SchemaSet + target string // target type (wasm, rego, etc.) + opa opa.EvalEngine + generateJSON func(*ast.Term, *EvalContext) (interface{}, error) + printHook print.Hook + enablePrintStatements bool + distributedTacingOpts tracing.Options + strict bool + pluginMgr *plugins.Manager + plugins []TargetPlugin + targetPrepState TargetPluginEval + regoVersion ast.RegoVersion +} + +func (r *Rego) RegoVersion() ast.RegoVersion { + return r.regoVersion +} + +// Function represents a built-in function that is callable in Rego. +type Function struct { + Name string + Description string + Decl *types.Function + Memoize bool + Nondeterministic bool +} + +// BuiltinContext contains additional attributes from the evaluator that +// built-in functions can use, e.g., the request context.Context, caches, etc. +type BuiltinContext = topdown.BuiltinContext + +type ( + // Builtin1 defines a built-in function that accepts 1 argument. + Builtin1 func(bctx BuiltinContext, op1 *ast.Term) (*ast.Term, error) + + // Builtin2 defines a built-in function that accepts 2 arguments. + Builtin2 func(bctx BuiltinContext, op1, op2 *ast.Term) (*ast.Term, error) + + // Builtin3 defines a built-in function that accepts 3 argument. + Builtin3 func(bctx BuiltinContext, op1, op2, op3 *ast.Term) (*ast.Term, error) + + // Builtin4 defines a built-in function that accepts 4 argument. + Builtin4 func(bctx BuiltinContext, op1, op2, op3, op4 *ast.Term) (*ast.Term, error) + + // BuiltinDyn defines a built-in function that accepts a list of arguments. + BuiltinDyn func(bctx BuiltinContext, terms []*ast.Term) (*ast.Term, error) +) + +// RegisterBuiltin1 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin1(decl *Function, impl Builtin1) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin2 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin2(decl *Function, impl Builtin2) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin3 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin3(decl *Function, impl Builtin3) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltin4 adds a built-in function globally inside the OPA runtime. +func RegisterBuiltin4(decl *Function, impl Builtin4) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// RegisterBuiltinDyn adds a built-in function globally inside the OPA runtime. +func RegisterBuiltinDyn(decl *Function, impl BuiltinDyn) { + ast.RegisterBuiltin(&ast.Builtin{ + Name: decl.Name, + Description: decl.Description, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + }) + topdown.RegisterBuiltinFunc(decl.Name, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return impl(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function1 returns an option that adds a built-in function to the Rego object. +func Function1(decl *Function, f Builtin1) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function2 returns an option that adds a built-in function to the Rego object. +func Function2(decl *Function, f Builtin2) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function3 returns an option that adds a built-in function to the Rego object. +func Function3(decl *Function, f Builtin3) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// Function4 returns an option that adds a built-in function to the Rego object. +func Function4(decl *Function, f Builtin4) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms[0], terms[1], terms[2], terms[3]) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDyn returns an option that adds a built-in function to the Rego object. +func FunctionDyn(decl *Function, f BuiltinDyn) func(*Rego) { + return newFunction(decl, func(bctx BuiltinContext, terms []*ast.Term, iter func(*ast.Term) error) error { + result, err := memoize(decl, bctx, terms, func() (*ast.Term, error) { return f(bctx, terms) }) + return finishFunction(decl.Name, bctx, result, err, iter) + }) +} + +// FunctionDecl returns an option that adds a custom-built-in function +// __declaration__. NO implementation is provided. This is used for +// non-interpreter execution envs (e.g., Wasm). +func FunctionDecl(decl *Function) func(*Rego) { + return newDecl(decl) +} + +func newDecl(decl *Function) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + } + } +} + +type memo struct { + term *ast.Term + err error +} + +type memokey string + +func memoize(decl *Function, bctx BuiltinContext, terms []*ast.Term, ifEmpty func() (*ast.Term, error)) (*ast.Term, error) { + + if !decl.Memoize { + return ifEmpty() + } + + // NOTE(tsandall): we assume memoization is applied to infrequent built-in + // calls that do things like fetch data from remote locations. As such, + // converting the terms to strings is acceptable for now. + var b strings.Builder + if _, err := b.WriteString(decl.Name); err != nil { + return nil, err + } + + // The term slice _may_ include an output term depending on how the caller + // referred to the built-in function. Only use the arguments as the cache + // key. Unification ensures we don't get false positive matches. + for i := 0; i < decl.Decl.Arity(); i++ { + if _, err := b.WriteString(terms[i].String()); err != nil { + return nil, err + } + } + + key := memokey(b.String()) + hit, ok := bctx.Cache.Get(key) + var m memo + if ok { + m = hit.(memo) + } else { + m.term, m.err = ifEmpty() + bctx.Cache.Put(key, m) + } + + return m.term, m.err +} + +// Dump returns an argument that sets the writer to dump debugging information to. +func Dump(w io.Writer) func(r *Rego) { + return func(r *Rego) { + r.dump = w + } +} + +// Query returns an argument that sets the Rego query. +func Query(q string) func(r *Rego) { + return func(r *Rego) { + r.query = q + } +} + +// ParsedQuery returns an argument that sets the Rego query. +func ParsedQuery(q ast.Body) func(r *Rego) { + return func(r *Rego) { + r.parsedQuery = q + } +} + +// Package returns an argument that sets the Rego package on the query's +// context. +func Package(p string) func(r *Rego) { + return func(r *Rego) { + r.pkg = p + } +} + +// ParsedPackage returns an argument that sets the Rego package on the query's +// context. +func ParsedPackage(pkg *ast.Package) func(r *Rego) { + return func(r *Rego) { + r.parsedPackage = pkg + } +} + +// Imports returns an argument that adds a Rego import to the query's context. +func Imports(p []string) func(r *Rego) { + return func(r *Rego) { + r.imports = append(r.imports, p...) + } +} + +// ParsedImports returns an argument that adds Rego imports to the query's +// context. +func ParsedImports(imp []*ast.Import) func(r *Rego) { + return func(r *Rego) { + r.parsedImports = append(r.parsedImports, imp...) + } +} + +// Input returns an argument that sets the Rego input document. Input should be +// a native Go value representing the input document. +func Input(x interface{}) func(r *Rego) { + return func(r *Rego) { + r.rawInput = &x + } +} + +// ParsedInput returns an argument that sets the Rego input document. +func ParsedInput(x ast.Value) func(r *Rego) { + return func(r *Rego) { + r.parsedInput = x + } +} + +// Unknowns returns an argument that sets the values to treat as unknown during +// partial evaluation. +func Unknowns(unknowns []string) func(r *Rego) { + return func(r *Rego) { + r.unknowns = unknowns + } +} + +// ParsedUnknowns returns an argument that sets the values to treat as unknown +// during partial evaluation. +func ParsedUnknowns(unknowns []*ast.Term) func(r *Rego) { + return func(r *Rego) { + r.parsedUnknowns = unknowns + } +} + +// DisableInlining adds a set of paths to exclude from partial evaluation inlining. +func DisableInlining(paths []string) func(r *Rego) { + return func(r *Rego) { + r.disableInlining = paths + } +} + +// NondeterministicBuiltins causes non-deterministic builtins to be evalued during +// partial evaluation. This is needed to pull in external data, or validate a JWT, +// during PE, so that the result informs what queries are returned. +func NondeterministicBuiltins(yes bool) func(r *Rego) { + return func(r *Rego) { + r.nondeterministicBuiltins = yes + } +} + +// ShallowInlining prevents rules that depend on unknown values from being inlined. +// Rules that only depend on known values are inlined. +func ShallowInlining(yes bool) func(r *Rego) { + return func(r *Rego) { + r.shallowInlining = yes + } +} + +// SkipPartialNamespace disables namespacing of partial evalution results for support +// rules generated from policy. Synthetic support rules are still namespaced. +func SkipPartialNamespace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipPartialNamespace = yes + } +} + +// PartialNamespace returns an argument that sets the namespace to use for +// partial evaluation results. The namespace must be a valid package path +// component. +func PartialNamespace(ns string) func(r *Rego) { + return func(r *Rego) { + r.partialNamespace = ns + } +} + +// Module returns an argument that adds a Rego module. +func Module(filename, input string) func(r *Rego) { + return func(r *Rego) { + r.modules = append(r.modules, rawModule{ + filename: filename, + module: input, + }) + } +} + +// ParsedModule returns an argument that adds a parsed Rego module. If a string +// module with the same filename name is added, it will override the parsed +// module. +func ParsedModule(module *ast.Module) func(*Rego) { + return func(r *Rego) { + var filename string + if module.Package.Location != nil { + filename = module.Package.Location.File + } else { + filename = fmt.Sprintf("module_%p.rego", module) + } + r.parsedModules[filename] = module + } +} + +// Load returns an argument that adds a filesystem path to load data +// and Rego modules from. Any file with a *.rego, *.yaml, or *.json +// extension will be loaded. The path can be either a directory or file, +// directories are loaded recursively. The optional ignore string patterns +// can be used to filter which files are used. +// The Load option can only be used once. +// Note: Loading files will require a write transaction on the store. +func Load(paths []string, filter loader.Filter) func(r *Rego) { + return func(r *Rego) { + r.loadPaths = loadPaths{paths, filter} + } +} + +// LoadBundle returns an argument that adds a filesystem path to load +// a bundle from. The path can be a compressed bundle file or a directory +// to be loaded as a bundle. +// Note: Loading bundles will require a write transaction on the store. +func LoadBundle(path string) func(r *Rego) { + return func(r *Rego) { + r.bundlePaths = append(r.bundlePaths, path) + } +} + +// ParsedBundle returns an argument that adds a bundle to be loaded. +func ParsedBundle(name string, b *bundle.Bundle) func(r *Rego) { + return func(r *Rego) { + r.bundles[name] = b + } +} + +// Compiler returns an argument that sets the Rego compiler. +func Compiler(c *ast.Compiler) func(r *Rego) { + return func(r *Rego) { + r.compiler = c + } +} + +// Store returns an argument that sets the policy engine's data storage layer. +// +// If using the Load, LoadBundle, or ParsedBundle options then a transaction +// must also be provided via the Transaction() option. After loading files +// or bundles the transaction should be aborted or committed. +func Store(s storage.Store) func(r *Rego) { + return func(r *Rego) { + r.store = s + } +} + +// StoreReadAST returns an argument that sets whether the store should eagerly convert data to AST values. +// +// Only applicable when no store has been set on the Rego object through the Store option. +func StoreReadAST(enabled bool) func(r *Rego) { + return func(r *Rego) { + r.ownStoreReadAst = enabled + } +} + +// Transaction returns an argument that sets the transaction to use for storage +// layer operations. +// +// Requires the store associated with the transaction to be provided via the +// Store() option. If using Load(), LoadBundle(), or ParsedBundle() options +// the transaction will likely require write params. +func Transaction(txn storage.Transaction) func(r *Rego) { + return func(r *Rego) { + r.txn = txn + } +} + +// Metrics returns an argument that sets the metrics collection. +func Metrics(m metrics.Metrics) func(r *Rego) { + return func(r *Rego) { + r.metrics = m + } +} + +// Instrument returns an argument that enables instrumentation for diagnosing +// performance issues. +func Instrument(yes bool) func(r *Rego) { + return func(r *Rego) { + r.instrument = yes + } +} + +// Trace returns an argument that enables tracing on r. +func Trace(yes bool) func(r *Rego) { + return func(r *Rego) { + r.trace = yes + } +} + +// Tracer returns an argument that adds a query tracer to r. +// Deprecated: Use QueryTracer instead. +func Tracer(t topdown.Tracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, topdown.WrapLegacyTracer(t)) + } + } +} + +// QueryTracer returns an argument that adds a query tracer to r. +func QueryTracer(t topdown.QueryTracer) func(r *Rego) { + return func(r *Rego) { + if t != nil { + r.queryTracers = append(r.queryTracers, t) + } + } +} + +// Runtime returns an argument that sets the runtime data to provide to the +// evaluation engine. +func Runtime(term *ast.Term) func(r *Rego) { + return func(r *Rego) { + r.runtime = term + } +} + +// Time sets the wall clock time to use during policy evaluation. Prepared queries +// do not inherit this parameter. Use EvalTime to set the wall clock time when +// executing a prepared query. +func Time(x time.Time) func(r *Rego) { + return func(r *Rego) { + r.time = x + } +} + +// Seed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func Seed(r io.Reader) func(*Rego) { + return func(e *Rego) { + e.seed = r + } +} + +// PrintTrace is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTrace(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTrace(w, *r.tracebuf) +} + +// PrintTraceWithLocation is a helper function to write a human-readable version of the +// trace to the writer w. +func PrintTraceWithLocation(w io.Writer, r *Rego) { + if r == nil || r.tracebuf == nil { + return + } + topdown.PrettyTraceWithLocation(w, *r.tracebuf) +} + +// UnsafeBuiltins sets the built-in functions to treat as unsafe and not allow. +// This option is ignored for module compilation if the caller supplies the +// compiler. This option is always honored for query compilation. Provide an +// empty (non-nil) map to disable checks on queries. +func UnsafeBuiltins(unsafeBuiltins map[string]struct{}) func(r *Rego) { + return func(r *Rego) { + r.unsafeBuiltins = unsafeBuiltins + } +} + +// SkipBundleVerification skips verification of a signed bundle. +func SkipBundleVerification(yes bool) func(r *Rego) { + return func(r *Rego) { + r.skipBundleVerification = yes + } +} + +// InterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinCache(c cache.InterQueryCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinCache = c + } +} + +// InterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize +// during evaluation. +func InterQueryBuiltinValueCache(c cache.InterQueryValueCache) func(r *Rego) { + return func(r *Rego) { + r.interQueryBuiltinValueCache = c + } +} + +// NDBuiltinCache sets the non-deterministic builtins cache. +func NDBuiltinCache(c builtins.NDBCache) func(r *Rego) { + return func(r *Rego) { + r.ndBuiltinCache = c + } +} + +// StrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. +func StrictBuiltinErrors(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strictBuiltinErrors = yes + } +} + +// BuiltinErrorList supplies an error slice to store built-in function errors. +func BuiltinErrorList(list *[]topdown.Error) func(r *Rego) { + return func(r *Rego) { + r.builtinErrorList = list + } +} + +// Resolver sets a Resolver for a specified ref path. +func Resolver(ref ast.Ref, r resolver.Resolver) func(r *Rego) { + return func(rego *Rego) { + rego.resolvers = append(rego.resolvers, refResolver{ref, r}) + } +} + +// Schemas sets the schemaSet +func Schemas(x *ast.SchemaSet) func(r *Rego) { + return func(r *Rego) { + r.schemaSet = x + } +} + +// Capabilities configures the underlying compiler's capabilities. +// This option is ignored for module compilation if the caller supplies the +// compiler. +func Capabilities(c *ast.Capabilities) func(r *Rego) { + return func(r *Rego) { + r.capabilities = c + } +} + +// Target sets the runtime to exercise. +func Target(t string) func(r *Rego) { + return func(r *Rego) { + r.target = t + } +} + +// GenerateJSON sets the AST to JSON converter for the results. +func GenerateJSON(f func(*ast.Term, *EvalContext) (interface{}, error)) func(r *Rego) { + return func(r *Rego) { + r.generateJSON = f + } +} + +// PrintHook sets the object to use for handling print statement outputs. +func PrintHook(h print.Hook) func(r *Rego) { + return func(r *Rego) { + r.printHook = h + } +} + +// DistributedTracingOpts sets the options to be used by distributed tracing. +func DistributedTracingOpts(tr tracing.Options) func(r *Rego) { + return func(r *Rego) { + r.distributedTacingOpts = tr + } +} + +// EnablePrintStatements enables print() calls. If this option is not provided, +// print() calls will be erased from the policy. This option only applies to +// queries and policies that passed as raw strings, i.e., this function will not +// have any affect if the caller supplies the ast.Compiler instance. +func EnablePrintStatements(yes bool) func(r *Rego) { + return func(r *Rego) { + r.enablePrintStatements = yes + } +} + +// Strict enables or disables strict-mode in the compiler +func Strict(yes bool) func(r *Rego) { + return func(r *Rego) { + r.strict = yes + } +} + +func SetRegoVersion(version ast.RegoVersion) func(r *Rego) { + return func(r *Rego) { + r.regoVersion = version + } +} + +// New returns a new Rego object. +func New(options ...func(r *Rego)) *Rego { + + r := &Rego{ + parsedModules: map[string]*ast.Module{}, + capture: map[*ast.Expr]ast.Var{}, + compiledQueries: map[queryType]compiledQuery{}, + builtinDecls: map[string]*ast.Builtin{}, + builtinFuncs: map[string]*topdown.Builtin{}, + bundles: map[string]*bundle.Bundle{}, + } + + for _, option := range options { + option(r) + } + + if r.compiler == nil { + r.compiler = ast.NewCompiler(). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithBuiltins(r.builtinDecls). + WithDebug(r.dump). + WithSchemas(r.schemaSet). + WithCapabilities(r.capabilities). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(r.strict). + WithUseTypeCheckAnnotations(true) + + // topdown could be target "" or "rego", but both could be overridden by + // a target plugin (checked below) + if r.target == targetWasm { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + if r.regoVersion != ast.RegoUndefined { + r.compiler = r.compiler.WithDefaultRegoVersion(r.regoVersion) + } + } + + if r.store == nil { + r.store = inmem.NewWithOpts(inmem.OptReturnASTValuesOnRead(r.ownStoreReadAst)) + r.ownStore = true + } else { + r.ownStore = false + } + + if r.metrics == nil { + r.metrics = metrics.New() + } + + if r.instrument { + r.instrumentation = topdown.NewInstrumentation(r.metrics) + r.compiler.WithMetrics(r.metrics) + } + + if r.trace { + r.tracebuf = topdown.NewBufferTracer() + r.queryTracers = append(r.queryTracers, r.tracebuf) + } + + if r.partialNamespace == "" { + r.partialNamespace = defaultPartialNamespace + } + + if r.generateJSON == nil { + r.generateJSON = generateJSON + } + + if r.pluginMgr != nil { + for _, name := range r.pluginMgr.Plugins() { + p := r.pluginMgr.Plugin(name) + if p0, ok := p.(TargetPlugin); ok { + r.plugins = append(r.plugins, p0) + } + } + } + + if t := r.targetPlugin(r.target); t != nil { + r.compiler = r.compiler.WithEvalMode(ast.EvalModeIR) + } + + return r +} + +// Eval evaluates this Rego object and returns a ResultSet. +func (r *Rego) Eval(ctx context.Context) (ResultSet, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForEval(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalTime(r.time), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + EvalSeed(r.seed), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, qt := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(qt)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + rs, err := pq.Eval(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return rs, err +} + +// PartialEval has been deprecated and renamed to PartialResult. +func (r *Rego) PartialEval(ctx context.Context) (PartialResult, error) { + return r.PartialResult(ctx) +} + +// PartialResult partially evaluates this Rego object and returns a PartialResult. +func (r *Rego) PartialResult(ctx context.Context) (PartialResult, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.PrepareForEval(ctx, WithPartialEval()) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PartialResult{}, err + } + if txnErr != nil { + return PartialResult{}, txnErr + } + + pr := PartialResult{ + compiler: pq.r.compiler, + store: pq.r.store, + body: pq.r.parsedQuery, + builtinDecls: pq.r.builtinDecls, + builtinFuncs: pq.r.builtinFuncs, + } + + return pr, nil +} + +// Partial runs partial evaluation on r and returns the result. +func (r *Rego) Partial(ctx context.Context) (*PartialQueries, error) { + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + pq, err := r.PrepareForPartial(ctx) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return nil, err + } + + evalArgs := []EvalOption{ + EvalTransaction(r.txn), + EvalMetrics(r.metrics), + EvalInstrument(r.instrument), + EvalInterQueryBuiltinCache(r.interQueryBuiltinCache), + EvalInterQueryBuiltinValueCache(r.interQueryBuiltinValueCache), + } + + if r.ndBuiltinCache != nil { + evalArgs = append(evalArgs, EvalNDBuiltinCache(r.ndBuiltinCache)) + } + + for _, t := range r.queryTracers { + evalArgs = append(evalArgs, EvalQueryTracer(t)) + } + + for i := range r.resolvers { + evalArgs = append(evalArgs, EvalResolver(r.resolvers[i].ref, r.resolvers[i].r)) + } + + pqs, err := pq.Partial(ctx, evalArgs...) + txnErr := txnClose(ctx, err) // Always call closer + if err == nil { + err = txnErr + } + return pqs, err +} + +// CompileOption defines a function to set options on Compile calls. +type CompileOption func(*CompileContext) + +// CompileContext contains options for Compile calls. +type CompileContext struct { + partial bool +} + +// CompilePartial defines an option to control whether partial evaluation is run +// before the query is planned and compiled. +func CompilePartial(yes bool) CompileOption { + return func(cfg *CompileContext) { + cfg.partial = yes + } +} + +// Compile returns a compiled policy query. +func (r *Rego) Compile(ctx context.Context, opts ...CompileOption) (*CompileResult, error) { + + var cfg CompileContext + + for _, opt := range opts { + opt(&cfg) + } + + var queries []ast.Body + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + + if cfg.partial { + + pq, err := r.Partial(ctx) + if err != nil { + return nil, err + } + if r.dump != nil { + if len(pq.Queries) != 0 { + msg := fmt.Sprintf("QUERIES (%d total):", len(pq.Queries)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Queries { + fmt.Println(pq.Queries[i]) + } + fmt.Fprintln(r.dump) + } + if len(pq.Support) != 0 { + msg := fmt.Sprintf("SUPPORT (%d total):", len(pq.Support)) + fmt.Fprintln(r.dump, msg) + fmt.Fprintln(r.dump, strings.Repeat("-", len(msg))) + for i := range pq.Support { + fmt.Println(pq.Support[i]) + } + fmt.Fprintln(r.dump) + } + } + + queries = pq.Queries + modules = pq.Support + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + } else { + var err error + // If creating a new transaction it should be closed before calling the + // planner to avoid holding open the transaction longer than needed. + // + // TODO(tsandall): in future, planner could make use of store, in which + // case this will need to change. + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return nil, err + } + + err = r.prepare(ctx, compileQueryType, nil) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return nil, err + } + if txnErr != nil { + return nil, err + } + + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries = []ast.Body{r.compiledQueries[compileQueryType].query} + } + + if tgt := r.targetPlugin(r.target); tgt != nil { + return nil, fmt.Errorf("unsupported for rego target plugins") + } + + return r.compileWasm(modules, queries, compileQueryType) // TODO(sr) control flow is funky here +} + +func (r *Rego) compileWasm(_ []*ast.Module, queries []ast.Body, qType queryType) (*CompileResult, error) { + policy, err := r.planQuery(queries, qType) + if err != nil { + return nil, err + } + + m, err := wasm.New().WithPolicy(policy).Compile() + if err != nil { + return nil, err + } + + var out bytes.Buffer + if err := encoding.WriteModule(&out, m); err != nil { + return nil, err + } + + return &CompileResult{ + Bytes: out.Bytes(), + }, nil +} + +// PrepareOption defines a function to set an option to control +// the behavior of the Prepare call. +type PrepareOption func(*PrepareConfig) + +// PrepareConfig holds settings to control the behavior of the +// Prepare call. +type PrepareConfig struct { + doPartialEval bool + disableInlining *[]string + builtinFuncs map[string]*topdown.Builtin +} + +// WithPartialEval configures an option for PrepareForEval +// which will have it perform partial evaluation while preparing +// the query (similar to rego.Rego#PartialResult) +func WithPartialEval() PrepareOption { + return func(p *PrepareConfig) { + p.doPartialEval = true + } +} + +// WithNoInline adds a set of paths to exclude from partial evaluation inlining. +func WithNoInline(paths []string) PrepareOption { + return func(p *PrepareConfig) { + p.disableInlining = &paths + } +} + +// WithBuiltinFuncs carries the rego.Function{1,2,3} per-query function definitions +// to the target plugins. +func WithBuiltinFuncs(bis map[string]*topdown.Builtin) PrepareOption { + return func(p *PrepareConfig) { + if p.builtinFuncs == nil { + p.builtinFuncs = make(map[string]*topdown.Builtin, len(bis)) + } + for k, v := range bis { + p.builtinFuncs[k] = v + } + } +} + +// BuiltinFuncs allows retrieving the builtin funcs set via PrepareOption +// WithBuiltinFuncs. +func (p *PrepareConfig) BuiltinFuncs() map[string]*topdown.Builtin { + return p.builtinFuncs +} + +// PrepareForEval will parse inputs, modules, and query arguments in preparation +// of evaluating them. +func (r *Rego) PrepareForEval(ctx context.Context, opts ...PrepareOption) (PreparedEvalQuery, error) { + if !r.hasQuery() { + return PreparedEvalQuery{}, fmt.Errorf("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedEvalQuery{}, err + } + + // If the caller wanted to do partial evaluation as part of preparation + // do it now and use the new Rego object. + if pCfg.doPartialEval { + + pr, err := r.partialResult(ctx, pCfg) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // Prepare the new query using the result of partial evaluation + pq, err := pr.Rego(Transaction(r.txn)).PrepareForEval(ctx) + txnErr := txnClose(ctx, err) + if err != nil { + return pq, err + } + return pq, txnErr + } + + err = r.prepare(ctx, evalQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteToCaptureValue", + MetricName: "query_compile_stage_rewrite_to_capture_value", + Stage: r.rewriteQueryToCaptureValue, + }, + }, + }) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + switch r.target { + case targetWasm: // TODO(sr): make wasm a target plugin, too + + if r.hasWasmModule() { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, fmt.Errorf("wasm target not supported") + } + + var modules []*ast.Module + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + + e, err := opa.LookupEngine(targetWasm) + if err != nil { + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + cr, err := r.compileWasm(modules, queries, evalQueryType) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + // nolint: staticcheck // SA4006 false positive + data, err := r.store.Read(ctx, r.txn, storage.Path{}) + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + + o, err := e.New().WithPolicyBytes(cr.Bytes).WithDataJSON(data).Init() + if err != nil { + _ = txnClose(ctx, err) // Ignore error + return PreparedEvalQuery{}, err + } + r.opa = o + + case targetRego: // do nothing, don't lookup default plugin + default: // either a specific plugin target, or one that is default + if tgt := r.targetPlugin(r.target); tgt != nil { + queries := []ast.Body{r.compiledQueries[evalQueryType].query} + pol, err := r.planQuery(queries, evalQueryType) + if err != nil { + return PreparedEvalQuery{}, err + } + // always add the builtins provided via rego.FunctionN options + opts = append(opts, WithBuiltinFuncs(r.builtinFuncs)) + r.targetPrepState, err = tgt.PrepareForEval(ctx, pol, opts...) + if err != nil { + return PreparedEvalQuery{}, err + } + } + } + + txnErr := txnClose(ctx, err) // Always call closer + if txnErr != nil { + return PreparedEvalQuery{}, txnErr + } + + return PreparedEvalQuery{preparedQuery{r, pCfg}}, err +} + +// PrepareForPartial will parse inputs, modules, and query arguments in preparation +// of partially evaluating them. +func (r *Rego) PrepareForPartial(ctx context.Context, opts ...PrepareOption) (PreparedPartialQuery, error) { + if !r.hasQuery() { + return PreparedPartialQuery{}, fmt.Errorf("cannot evaluate empty query") + } + + pCfg := &PrepareConfig{} + for _, o := range opts { + o(pCfg) + } + + var err error + var txnClose transactionCloser + r.txn, txnClose, err = r.getTxn(ctx) + if err != nil { + return PreparedPartialQuery{}, err + } + + err = r.prepare(ctx, partialQueryType, []extraStage{ + { + after: "CheckSafety", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteEquals", + MetricName: "query_compile_stage_rewrite_equals", + Stage: r.rewriteEqualsForPartialQueryCompile, + }, + }, + }) + txnErr := txnClose(ctx, err) // Always call closer + if err != nil { + return PreparedPartialQuery{}, err + } + if txnErr != nil { + return PreparedPartialQuery{}, txnErr + } + + return PreparedPartialQuery{preparedQuery{r, pCfg}}, err +} + +func (r *Rego) prepare(ctx context.Context, qType queryType, extras []extraStage) error { + var err error + + r.parsedInput, err = r.parseInput() + if err != nil { + return err + } + + err = r.loadFiles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.loadBundles(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + err = r.parseModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + // Compile the modules *before* the query, else functions + // defined in the module won't be found... + err = r.compileModules(ctx, r.txn, r.metrics) + if err != nil { + return err + } + + imports, err := r.prepareImports() + if err != nil { + return err + } + + queryImports := []*ast.Import{} + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if path.HasPrefix([]*ast.Term{ast.FutureRootDocument}) || path.HasPrefix([]*ast.Term{ast.RegoRootDocument}) { + queryImports = append(queryImports, imp) + } + } + + r.parsedQuery, err = r.parseQuery(queryImports, r.metrics) + if err != nil { + return err + } + + err = r.compileAndCacheQuery(qType, r.parsedQuery, imports, r.metrics, extras) + if err != nil { + return err + } + + return nil +} + +func (r *Rego) parseModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.modules) == 0 { + return nil + } + + ids, err := r.store.ListPolicies(ctx, txn) + if err != nil { + return err + } + + m.Timer(metrics.RegoModuleParse).Start() + defer m.Timer(metrics.RegoModuleParse).Stop() + var errs Errors + + // Parse any modules that are saved to the store, but only if + // another compile step is going to occur (ie. we have parsed modules + // that need to be compiled). + for _, id := range ids { + // if it is already on the compiler we're using + // then don't bother to re-parse it from source + if _, haveMod := r.compiler.Modules[id]; haveMod { + continue + } + + bs, err := r.store.GetPolicy(ctx, txn, id) + if err != nil { + return err + } + + parsed, err := ast.ParseModuleWithOpts(id, string(bs), ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + errs = append(errs, err) + } + + r.parsedModules[id] = parsed + } + + // Parse any passed in as arguments to the Rego object + for _, module := range r.modules { + p, err := module.ParseWithOpts(ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + switch errorWithType := err.(type) { + case ast.Errors: + for _, e := range errorWithType { + errs = append(errs, e) + } + default: + errs = append(errs, errorWithType) + } + } + r.parsedModules[module.filename] = p + } + + if len(errs) > 0 { + return errs + } + + return nil +} + +func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + if len(r.loadPaths.paths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadFiles).Start() + defer m.Timer(metrics.RegoLoadFiles).Stop() + + result, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + Filtered(r.loadPaths.paths, r.loadPaths.filter) + if err != nil { + return err + } + for name, mod := range result.Modules { + r.parsedModules[name] = mod.Parsed + } + + if len(result.Documents) > 0 { + err = r.store.Write(ctx, txn, storage.AddOp, storage.Path{}, result.Documents) + if err != nil { + return err + } + } + return nil +} + +func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.Metrics) error { + if len(r.bundlePaths) == 0 { + return nil + } + + m.Timer(metrics.RegoLoadBundles).Start() + defer m.Timer(metrics.RegoLoadBundles).Stop() + + for _, path := range r.bundlePaths { + bndl, err := loader.NewFileLoader(). + WithMetrics(m). + WithProcessAnnotation(true). + WithSkipBundleVerification(r.skipBundleVerification). + WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). + AsBundle(path) + if err != nil { + return fmt.Errorf("loading error: %s", err) + } + r.bundles[path] = bndl + } + return nil +} + +func (r *Rego) parseInput() (ast.Value, error) { + if r.parsedInput != nil { + return r.parsedInput, nil + } + return r.parseRawInput(r.rawInput, r.metrics) +} + +func (r *Rego) parseRawInput(rawInput *interface{}, m metrics.Metrics) (ast.Value, error) { + var input ast.Value + + if rawInput == nil { + return input, nil + } + + m.Timer(metrics.RegoInputParse).Start() + defer m.Timer(metrics.RegoInputParse).Stop() + + rawPtr := util.Reference(rawInput) + + // roundtrip through json: this turns slices (e.g. []string, []bool) into + // []interface{}, the only array type ast.InterfaceToValue can work with + if err := util.RoundTrip(rawPtr); err != nil { + return nil, err + } + + return ast.InterfaceToValue(*rawPtr) +} + +func (r *Rego) parseQuery(queryImports []*ast.Import, m metrics.Metrics) (ast.Body, error) { + if r.parsedQuery != nil { + return r.parsedQuery, nil + } + + m.Timer(metrics.RegoQueryParse).Start() + defer m.Timer(metrics.RegoQueryParse).Stop() + + popts, err := future.ParserOptionsFromFutureImports(queryImports) + if err != nil { + return nil, err + } + popts.RegoVersion = r.regoVersion + popts, err = parserOptionsFromRegoVersionImport(queryImports, popts) + if err != nil { + return nil, err + } + popts.SkipRules = true + return ast.ParseBodyWithOpts(r.query, popts) +} + +func parserOptionsFromRegoVersionImport(imports []*ast.Import, popts ast.ParserOptions) (ast.ParserOptions, error) { + for _, imp := range imports { + path := imp.Path.Value.(ast.Ref) + if ast.Compare(path, ast.RegoV1CompatibleRef) == 0 { + popts.RegoVersion = ast.RegoV1 + return popts, nil + } + } + return popts, nil +} + +func (r *Rego) compileModules(ctx context.Context, txn storage.Transaction, m metrics.Metrics) error { + + // Only compile again if there are new modules. + if len(r.bundles) > 0 || len(r.parsedModules) > 0 { + + // The bundle.Activate call will activate any bundles passed in + // (ie compile + handle data store changes), and include any of + // the additional modules passed in. If no bundles are provided + // it will only compile the passed in modules. + // Use this as the single-point of compiling everything only a + // single time. + opts := &bundle.ActivateOpts{ + Ctx: ctx, + Store: r.store, + Txn: txn, + Compiler: r.compilerForTxn(ctx, r.store, txn), + Metrics: m, + Bundles: r.bundles, + ExtraModules: r.parsedModules, + ParserOptions: ast.ParserOptions{RegoVersion: r.regoVersion}, + } + err := bundle.Activate(opts) + if err != nil { + return err + } + } + + // Ensure all configured resolvers from the store are loaded. Skip if any were explicitly provided. + if len(r.resolvers) == 0 { + resolvers, err := bundleUtils.LoadWasmResolversFromStore(ctx, r.store, txn, r.bundles) + if err != nil { + return err + } + + for _, rslvr := range resolvers { + for _, ep := range rslvr.Entrypoints() { + r.resolvers = append(r.resolvers, refResolver{ep, rslvr}) + } + } + } + return nil +} + +func (r *Rego) compileAndCacheQuery(qType queryType, query ast.Body, imports []*ast.Import, m metrics.Metrics, extras []extraStage) error { + m.Timer(metrics.RegoQueryCompile).Start() + defer m.Timer(metrics.RegoQueryCompile).Stop() + + cachedQuery, ok := r.compiledQueries[qType] + if ok && cachedQuery.query != nil && cachedQuery.compiler != nil { + return nil + } + + qc, compiled, err := r.compileQuery(query, imports, m, extras) + if err != nil { + return err + } + + // cache the query for future use + r.compiledQueries[qType] = compiledQuery{ + query: compiled, + compiler: qc, + } + return nil +} + +func (r *Rego) prepareImports() ([]*ast.Import, error) { + imports := r.parsedImports + + if len(r.imports) > 0 { + s := make([]string, len(r.imports)) + for i := range r.imports { + s[i] = fmt.Sprintf("import %v", r.imports[i]) + } + parsed, err := ast.ParseImports(strings.Join(s, "\n")) + if err != nil { + return nil, err + } + imports = append(imports, parsed...) + } + return imports, nil +} + +func (r *Rego) compileQuery(query ast.Body, imports []*ast.Import, _ metrics.Metrics, extras []extraStage) (ast.QueryCompiler, ast.Body, error) { + var pkg *ast.Package + + if r.pkg != "" { + var err error + pkg, err = ast.ParsePackage(fmt.Sprintf("package %v", r.pkg)) + if err != nil { + return nil, nil, err + } + } else { + pkg = r.parsedPackage + } + + qctx := ast.NewQueryContext(). + WithPackage(pkg). + WithImports(imports) + + qc := r.compiler.QueryCompiler(). + WithContext(qctx). + WithUnsafeBuiltins(r.unsafeBuiltins). + WithEnablePrintStatements(r.enablePrintStatements). + WithStrict(false) + + for _, extra := range extras { + qc = qc.WithStageAfter(extra.after, extra.stage) + } + + compiled, err := qc.Compile(query) + + return qc, compiled, err + +} + +func (r *Rego) eval(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + switch { + case r.targetPrepState != nil: // target plugin flow + var val ast.Value + if r.runtime != nil { + val = r.runtime.Value + } + s, err := r.targetPrepState.Eval(ctx, ectx, val) + if err != nil { + return nil, err + } + return r.valueToQueryResult(s, ectx) + case r.target == targetWasm: + return r.evalWasm(ctx, ectx) + case r.target == targetRego: // continue + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(r.strictBuiltinErrors). + WithBuiltinErrorList(r.builtinErrorList). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook). + WithDistributedTracingOpts(r.distributedTacingOpts). + WithVirtualCache(ectx.virtualCache) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + if ectx.httpRoundTripper != nil { + q = q.WithHTTPRoundTripper(ectx.httpRoundTripper) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + + var rs ResultSet + err := q.Iter(ctx, func(qr topdown.QueryResult) error { + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + + if err != nil { + return nil, err + } + + if len(rs) == 0 { + return nil, nil + } + + return rs, nil +} + +func (r *Rego) evalWasm(ctx context.Context, ectx *EvalContext) (ResultSet, error) { + input := ectx.rawInput + if ectx.parsedInput != nil { + i := interface{}(ectx.parsedInput) + input = &i + } + result, err := r.opa.Eval(ctx, opa.EvalOpts{ + Metrics: r.metrics, + Input: input, + Time: ectx.time, + Seed: ectx.seed, + InterQueryBuiltinCache: ectx.interQueryBuiltinCache, + NDBuiltinCache: ectx.ndBuiltinCache, + PrintHook: ectx.printHook, + Capabilities: ectx.capabilities, + }) + if err != nil { + return nil, err + } + + parsed, err := ast.ParseTerm(string(result.Result)) + if err != nil { + return nil, err + } + + return r.valueToQueryResult(parsed.Value, ectx) +} + +func (r *Rego) valueToQueryResult(res ast.Value, ectx *EvalContext) (ResultSet, error) { + resultSet, ok := res.(ast.Set) + if !ok { + return nil, fmt.Errorf("illegal result type") + } + + if resultSet.Len() == 0 { + return nil, nil + } + + var rs ResultSet + err := resultSet.Iter(func(term *ast.Term) error { + obj, ok := term.Value.(ast.Object) + if !ok { + return fmt.Errorf("illegal result type") + } + qr := topdown.QueryResult{} + obj.Foreach(func(k, v *ast.Term) { + kvt := ast.VarTerm(string(k.Value.(ast.String))) + qr[kvt.Value.(ast.Var)] = v + }) + result, err := r.generateResult(qr, ectx) + if err != nil { + return err + } + rs = append(rs, result) + return nil + }) + + return rs, err +} + +func (r *Rego) generateResult(qr topdown.QueryResult, ectx *EvalContext) (Result, error) { + + rewritten := ectx.compiledQuery.compiler.RewrittenVars() + + result := newResult() + for k, term := range qr { + v, err := r.generateJSON(term, ectx) + if err != nil { + return result, err + } + + if rw, ok := rewritten[k]; ok { + k = rw + } + if isTermVar(k) || isTermWasmVar(k) || k.IsGenerated() || k.IsWildcard() { + continue + } + result.Bindings[string(k)] = v + } + + for _, expr := range ectx.compiledQuery.query { + if expr.Generated { + continue + } + + if k, ok := r.capture[expr]; ok { + v, err := r.generateJSON(qr[k], ectx) + if err != nil { + return result, err + } + result.Expressions = append(result.Expressions, newExpressionValue(expr, v)) + } else { + result.Expressions = append(result.Expressions, newExpressionValue(expr, true)) + } + + } + return result, nil +} + +func (r *Rego) partialResult(ctx context.Context, pCfg *PrepareConfig) (PartialResult, error) { + + err := r.prepare(ctx, partialResultQueryType, []extraStage{ + { + after: "ResolveRefs", + stage: ast.QueryCompilerStageDefinition{ + Name: "RewriteForPartialEval", + MetricName: "query_compile_stage_rewrite_for_partial_eval", + Stage: r.rewriteQueryForPartialEval, + }, + }, + }) + if err != nil { + return PartialResult{}, err + } + + ectx := &EvalContext{ + parsedInput: r.parsedInput, + metrics: r.metrics, + txn: r.txn, + partialNamespace: r.partialNamespace, + queryTracers: r.queryTracers, + compiledQuery: r.compiledQueries[partialResultQueryType], + instrumentation: r.instrumentation, + indexing: true, + resolvers: r.resolvers, + capabilities: r.capabilities, + strictBuiltinErrors: r.strictBuiltinErrors, + nondeterministicBuiltins: r.nondeterministicBuiltins, + } + + disableInlining := r.disableInlining + + if pCfg.disableInlining != nil { + disableInlining = *pCfg.disableInlining + } + + ectx.disableInlining, err = parseStringsToRefs(disableInlining) + if err != nil { + return PartialResult{}, err + } + + pq, err := r.partial(ctx, ectx) + if err != nil { + return PartialResult{}, err + } + + // Construct module for queries. + id := fmt.Sprintf("__partialresult__%s__", ectx.partialNamespace) + + module, err := ast.ParseModuleWithOpts(id, "package "+ectx.partialNamespace, + ast.ParserOptions{RegoVersion: r.regoVersion}) + if err != nil { + return PartialResult{}, fmt.Errorf("bad partial namespace") + } + + module.Rules = make([]*ast.Rule, len(pq.Queries)) + for i, body := range pq.Queries { + rule := &ast.Rule{ + Head: ast.NewHead(ast.Var("__result__"), nil, ast.Wildcard), + Body: body, + Module: module, + } + module.Rules[i] = rule + if checkPartialResultForRecursiveRefs(body, rule.Path()) { + return PartialResult{}, Errors{errPartialEvaluationNotEffective} + } + } + + // Update compiler with partial evaluation output. + r.compiler.Modules[id] = module + for i, module := range pq.Support { + r.compiler.Modules[fmt.Sprintf("__partialsupport__%s__%d__", ectx.partialNamespace, i)] = module + } + + r.metrics.Timer(metrics.RegoModuleCompile).Start() + r.compilerForTxn(ctx, r.store, r.txn).Compile(r.compiler.Modules) + r.metrics.Timer(metrics.RegoModuleCompile).Stop() + + if r.compiler.Failed() { + return PartialResult{}, r.compiler.Errors + } + + result := PartialResult{ + compiler: r.compiler, + store: r.store, + body: ast.MustParseBody(fmt.Sprintf("data.%v.__result__", ectx.partialNamespace)), + builtinDecls: r.builtinDecls, + builtinFuncs: r.builtinFuncs, + } + + return result, nil +} + +func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, error) { + + var unknowns []*ast.Term + + switch { + case ectx.parsedUnknowns != nil: + unknowns = ectx.parsedUnknowns + case ectx.unknowns != nil: + unknowns = make([]*ast.Term, len(ectx.unknowns)) + for i := range ectx.unknowns { + var err error + unknowns[i], err = ast.ParseTerm(ectx.unknowns[i]) + if err != nil { + return nil, err + } + } + default: + // Use input document as unknown if caller has not specified any. + unknowns = []*ast.Term{ast.NewTerm(ast.InputRootRef)} + } + + q := topdown.NewQuery(ectx.compiledQuery.query). + WithQueryCompiler(ectx.compiledQuery.compiler). + WithCompiler(r.compiler). + WithStore(r.store). + WithTransaction(ectx.txn). + WithBuiltins(r.builtinFuncs). + WithMetrics(ectx.metrics). + WithInstrumentation(ectx.instrumentation). + WithUnknowns(unknowns). + WithDisableInlining(ectx.disableInlining). + WithNondeterministicBuiltins(ectx.nondeterministicBuiltins). + WithRuntime(r.runtime). + WithIndexing(ectx.indexing). + WithEarlyExit(ectx.earlyExit). + WithPartialNamespace(ectx.partialNamespace). + WithSkipPartialNamespace(r.skipPartialNamespace). + WithShallowInlining(r.shallowInlining). + WithInterQueryBuiltinCache(ectx.interQueryBuiltinCache). + WithInterQueryBuiltinValueCache(ectx.interQueryBuiltinValueCache). + WithStrictBuiltinErrors(ectx.strictBuiltinErrors). + WithSeed(ectx.seed). + WithPrintHook(ectx.printHook) + + if !ectx.time.IsZero() { + q = q.WithTime(ectx.time) + } + + if ectx.ndBuiltinCache != nil { + q = q.WithNDBuiltinCache(ectx.ndBuiltinCache) + } + + for i := range ectx.queryTracers { + q = q.WithQueryTracer(ectx.queryTracers[i]) + } + + if ectx.parsedInput != nil { + q = q.WithInput(ast.NewTerm(ectx.parsedInput)) + } + + for i := range ectx.resolvers { + q = q.WithResolver(ectx.resolvers[i].ref, ectx.resolvers[i].r) + } + + // Cancel query if context is cancelled or deadline is reached. + c := topdown.NewCancel() + q = q.WithCancel(c) + exit := make(chan struct{}) + defer close(exit) + go waitForDone(ctx, exit, func() { + c.Cancel() + }) + + queries, support, err := q.PartialRun(ctx) + if err != nil { + return nil, err + } + + // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. + if r.regoVersion == ast.RegoV0 && + (r.capabilities == nil || + r.capabilities.ContainsFeature(ast.FeatureRegoV1Import) || + r.capabilities.ContainsFeature(ast.FeatureRegoV1)) { + + for i, mod := range support { + // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that + // conflict with future keywords. + applyRegoVersion := true + + ast.WalkRules(mod, func(r *ast.Rule) bool { + name := r.Head.Name + if name == "" && len(r.Head.Reference) > 0 { + name = r.Head.Reference[0].Value.(ast.Var) + } + if ast.IsFutureKeywordForRegoVersion(name.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + + if applyRegoVersion { + ast.WalkVars(mod, func(v ast.Var) bool { + if ast.IsFutureKeywordForRegoVersion(v.String(), ast.RegoV0) { + applyRegoVersion = false + return true + } + return false + }) + } + + if applyRegoVersion { + support[i].SetRegoVersion(ast.RegoV0CompatV1) + } else { + support[i].SetRegoVersion(r.regoVersion) + } + } + } else { + // If the target rego-version is not v0, then we apply the target rego-version to the support modules. + for i := range support { + support[i].SetRegoVersion(r.regoVersion) + } + } + + pq := &PartialQueries{ + Queries: queries, + Support: support, + } + + return pq, nil +} + +func (r *Rego) rewriteQueryToCaptureValue(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + + checkCapture := iteration(query) || len(query) > 1 + + for _, expr := range query { + + if expr.Negated { + continue + } + + if expr.IsAssignment() || expr.IsEquality() { + continue + } + + var capture *ast.Term + + // If the expression can be evaluated as a function, rewrite it to + // capture the return value. E.g., neq(1,2) becomes neq(1,2,x) but + // plus(1,2,x) does not get rewritten. + switch terms := expr.Terms.(type) { + case *ast.Term: + capture = r.generateTermVar() + expr.Terms = ast.Equality.Expr(terms, capture).Terms + r.capture[expr] = capture.Value.(ast.Var) + case []*ast.Term: + tpe := r.compiler.TypeEnv.Get(terms[0]) + if !types.Void(tpe) && types.Arity(tpe) == len(terms)-1 { + capture = r.generateTermVar() + expr.Terms = append(terms, capture) + r.capture[expr] = capture.Value.(ast.Var) + } + } + + if capture != nil && checkCapture { + cpy := expr.Copy() + cpy.Terms = capture + cpy.Generated = true + cpy.With = nil + query.Append(cpy) + } + } + + return query, nil +} + +func (r *Rego) rewriteQueryForPartialEval(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + if len(query) != 1 { + return nil, fmt.Errorf("partial evaluation requires single ref (not multiple expressions)") + } + + term, ok := query[0].Terms.(*ast.Term) + if !ok { + return nil, fmt.Errorf("partial evaluation requires ref (not expression)") + } + + ref, ok := term.Value.(ast.Ref) + if !ok { + return nil, fmt.Errorf("partial evaluation requires ref (not %v)", ast.ValueName(term.Value)) + } + + if !ref.IsGround() { + return nil, fmt.Errorf("partial evaluation requires ground ref") + } + + return ast.NewBody(ast.Equality.Expr(ast.Wildcard, term)), nil +} + +// rewriteEqualsForPartialQueryCompile will rewrite == to = in queries. Normally +// this wouldn't be done, except for handling queries with the `Partial` API +// where rewriting them can substantially simplify the result, and it is unlikely +// that the caller would need expression values. +func (r *Rego) rewriteEqualsForPartialQueryCompile(_ ast.QueryCompiler, query ast.Body) (ast.Body, error) { + doubleEq := ast.Equal.Ref() + unifyOp := ast.Equality.Ref() + ast.WalkExprs(query, func(x *ast.Expr) bool { + if x.IsCall() { + operator := x.Operator() + if operator.Equal(doubleEq) && len(x.Operands()) == 2 { + x.SetOperator(ast.NewTerm(unifyOp)) + } + } + return false + }) + return query, nil +} + +func (r *Rego) generateTermVar() *ast.Term { + r.termVarID++ + prefix := ast.WildcardPrefix + if p := r.targetPlugin(r.target); p != nil { + prefix = wasmVarPrefix + } else if r.target == targetWasm { + prefix = wasmVarPrefix + } + return ast.VarTerm(fmt.Sprintf("%sterm%v", prefix, r.termVarID)) +} + +func (r Rego) hasQuery() bool { + return len(r.query) != 0 || len(r.parsedQuery) != 0 +} + +func (r Rego) hasWasmModule() bool { + for _, b := range r.bundles { + if len(b.WasmModules) > 0 { + return true + } + } + return false +} + +type transactionCloser func(ctx context.Context, err error) error + +// getTxn will conditionally create a read or write transaction suitable for +// the configured Rego object. The returned function should be used to close the txn +// regardless of status. +func (r *Rego) getTxn(ctx context.Context) (storage.Transaction, transactionCloser, error) { + + noopCloser := func(_ context.Context, _ error) error { + return nil // no-op default + } + + if r.txn != nil { + // Externally provided txn + return r.txn, noopCloser, nil + } + + // Create a new transaction.. + params := storage.TransactionParams{} + + // Bundles and data paths may require writing data files or manifests to storage + if len(r.bundles) > 0 || len(r.bundlePaths) > 0 || len(r.loadPaths.paths) > 0 { + + // If we were given a store we will *not* write to it, only do that on one + // which was created automatically on behalf of the user. + if !r.ownStore { + return nil, noopCloser, errors.New("unable to start write transaction when store was provided") + } + + params.Write = true + } + + txn, err := r.store.NewTransaction(ctx, params) + if err != nil { + return nil, noopCloser, err + } + + // Setup a closer function that will abort or commit as needed. + closer := func(ctx context.Context, txnErr error) error { + var err error + + if txnErr == nil && params.Write { + err = r.store.Commit(ctx, txn) + } else { + r.store.Abort(ctx, txn) + } + + // Clear the auto created transaction now that it is closed. + r.txn = nil + + return err + } + + return txn, closer, nil +} + +func (r *Rego) compilerForTxn(ctx context.Context, store storage.Store, txn storage.Transaction) *ast.Compiler { + // Update the compiler to have a valid path conflict check + // for the current context and transaction. + return r.compiler.WithPathConflictsCheck(storage.NonEmpty(ctx, store, txn)) +} + +func checkPartialResultForRecursiveRefs(body ast.Body, path ast.Ref) bool { + var stop bool + ast.WalkRefs(body, func(x ast.Ref) bool { + if !stop { + if path.HasPrefix(x) { + stop = true + } + } + return stop + }) + return stop +} + +func isTermVar(v ast.Var) bool { + return strings.HasPrefix(string(v), ast.WildcardPrefix+"term") +} + +func isTermWasmVar(v ast.Var) bool { + return strings.HasPrefix(string(v), wasmVarPrefix+"term") +} + +func waitForDone(ctx context.Context, exit chan struct{}, f func()) { + select { + case <-exit: + return + case <-ctx.Done(): + f() + return + } +} + +type rawModule struct { + filename string + module string +} + +func (m rawModule) Parse() (*ast.Module, error) { + return ast.ParseModule(m.filename, m.module) +} + +func (m rawModule) ParseWithOpts(opts ast.ParserOptions) (*ast.Module, error) { + return ast.ParseModuleWithOpts(m.filename, m.module, opts) +} + +type extraStage struct { + after string + stage ast.QueryCompilerStageDefinition +} + +type refResolver struct { + ref ast.Ref + r resolver.Resolver +} + +func iteration(x interface{}) bool { + + var stopped bool + + vis := ast.NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case *ast.Term: + if ast.IsComprehension(x.Value) { + return true + } + case ast.Ref: + if !stopped { + if bi := ast.BuiltinMap[x.String()]; bi != nil { + if bi.Relation { + stopped = true + return stopped + } + } + for i := 1; i < len(x); i++ { + if _, ok := x[i].Value.(ast.Var); ok { + stopped = true + return stopped + } + } + } + return stopped + } + return stopped + }) + + vis.Walk(x) + + return stopped +} + +func parseStringsToRefs(s []string) ([]ast.Ref, error) { + + refs := make([]ast.Ref, len(s)) + for i := range refs { + var err error + refs[i], err = ast.ParseRef(s[i]) + if err != nil { + return nil, err + } + } + + return refs, nil +} + +// helper function to finish a built-in function call. If an error occurred, +// wrap the error and return it. Otherwise, invoke the iterator if the result +// was defined. +func finishFunction(name string, bctx topdown.BuiltinContext, result *ast.Term, err error, iter func(*ast.Term) error) error { + if err != nil { + var e *HaltError + if errors.As(err, &e) { + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: fmt.Sprintf("%v: %v", name, e.Error()), + Location: bctx.Location, + } + return topdown.Halt{Err: tdErr.Wrap(e)} + } + tdErr := &topdown.Error{ + Code: topdown.BuiltinErr, + Message: fmt.Sprintf("%v: %v", name, err.Error()), + Location: bctx.Location, + } + return tdErr.Wrap(err) + } + if result == nil { + return nil + } + return iter(result) +} + +// helper function to return an option that sets a custom built-in function. +func newFunction(decl *Function, f topdown.BuiltinFunc) func(*Rego) { + return func(r *Rego) { + r.builtinDecls[decl.Name] = &ast.Builtin{ + Name: decl.Name, + Decl: decl.Decl, + Nondeterministic: decl.Nondeterministic, + } + r.builtinFuncs[decl.Name] = &topdown.Builtin{ + Decl: r.builtinDecls[decl.Name], + Func: f, + } + } +} + +func generateJSON(term *ast.Term, ectx *EvalContext) (interface{}, error) { + return ast.JSONWithOpt(term.Value, + ast.JSONOpt{ + SortSets: ectx.sortSets, + CopyMaps: ectx.copyMaps, + }) +} + +func (r *Rego) planQuery(queries []ast.Body, evalQueryType queryType) (*ir.Policy, error) { + modules := make([]*ast.Module, 0, len(r.compiler.Modules)) + for _, module := range r.compiler.Modules { + modules = append(modules, module) + } + + decls := make(map[string]*ast.Builtin, len(r.builtinDecls)+len(ast.BuiltinMap)) + + for k, v := range ast.BuiltinMap { + decls[k] = v + } + + for k, v := range r.builtinDecls { + decls[k] = v + } + + const queryName = "eval" // NOTE(tsandall): the query name is arbitrary + + p := planner.New(). + WithQueries([]planner.QuerySet{ + { + Name: queryName, + Queries: queries, + RewrittenVars: r.compiledQueries[evalQueryType].compiler.RewrittenVars(), + }, + }). + WithModules(modules). + WithBuiltinDecls(decls). + WithDebug(r.dump) + + policy, err := p.Plan() + if err != nil { + return nil, err + } + if r.dump != nil { + fmt.Fprintln(r.dump, "PLAN:") + fmt.Fprintln(r.dump, "-----") + err = ir.Pretty(r.dump, policy) + if err != nil { + return nil, err + } + fmt.Fprintln(r.dump) + } + return policy, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go new file mode 100644 index 000000000..cc0710426 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/rego/resultset.go @@ -0,0 +1,90 @@ +package rego + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// ResultSet represents a collection of output from Rego evaluation. An empty +// result set represents an undefined query. +type ResultSet []Result + +// Vars represents a collection of variable bindings. The keys are the variable +// names and the values are the binding values. +type Vars map[string]interface{} + +// WithoutWildcards returns a copy of v with wildcard variables removed. +func (v Vars) WithoutWildcards() Vars { + n := Vars{} + for k, v := range v { + if ast.Var(k).IsWildcard() || ast.Var(k).IsGenerated() { + continue + } + n[k] = v + } + return n +} + +// Result defines the output of Rego evaluation. +type Result struct { + Expressions []*ExpressionValue `json:"expressions"` + Bindings Vars `json:"bindings,omitempty"` +} + +func newResult() Result { + return Result{ + Bindings: Vars{}, + } +} + +// Location defines a position in a Rego query or module. +type Location struct { + Row int `json:"row"` + Col int `json:"col"` +} + +// ExpressionValue defines the value of an expression in a Rego query. +type ExpressionValue struct { + Value interface{} `json:"value"` + Text string `json:"text"` + Location *Location `json:"location"` +} + +func newExpressionValue(expr *ast.Expr, value interface{}) *ExpressionValue { + result := &ExpressionValue{ + Value: value, + } + if expr.Location != nil { + result.Text = string(expr.Location.Text) + result.Location = &Location{ + Row: expr.Location.Row, + Col: expr.Location.Col, + } + } + return result +} + +func (ev *ExpressionValue) String() string { + return fmt.Sprint(ev.Value) +} + +// Allowed is a helper method that'll return true if all of these conditions hold: +// - the result set only has one element +// - there is only one expression in the result set's only element +// - that expression has the value `true` +// - there are no bindings. +// +// If bindings are present, this will yield `false`: it would be a pitfall to +// return `true` for a query like `data.authz.allow = x`, which always has result +// set element with value true, but could also have a binding `x: false`. +func (rs ResultSet) Allowed() bool { + if len(rs) == 1 && len(rs[0].Bindings) == 0 { + if exprs := rs[0].Expressions; len(exprs) == 1 { + if b, ok := exprs[0].Value.(bool); ok { + return b + } + } + } + return false +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go new file mode 100644 index 000000000..1f04d21c0 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/interface.go @@ -0,0 +1,29 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package resolver + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" +) + +// Resolver defines an external value resolver for OPA evaluations. +type Resolver interface { + Eval(context.Context, Input) (Result, error) +} + +// Input as provided to a Resolver instance when evaluating. +type Input struct { + Ref ast.Ref + Input *ast.Term + Metrics metrics.Metrics +} + +// Result of resolving a ref. +type Result struct { + Value ast.Value +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go similarity index 97% rename from constraint/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go index 9c13879dc..4f57b3ef8 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/resolver/wasm/wasm.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/resolver/wasm/wasm.go @@ -9,9 +9,9 @@ import ( "fmt" "strconv" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/rego/opa" - "github.com/open-policy-agent/opa/resolver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/resolver" ) // New creates a new Resolver instance which is using the Wasm module diff --git a/constraint/vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json b/constraint/vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/schemas/authorizationPolicy.json rename to constraint/vendor/github.com/open-policy-agent/opa/v1/schemas/authorizationPolicy.json diff --git a/constraint/vendor/github.com/open-policy-agent/opa/schemas/schemas.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/schemas/schemas.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/schemas/schemas.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go new file mode 100644 index 000000000..6fa2f86d9 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package storage exposes the policy engine's storage layer. +package storage diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go new file mode 100644 index 000000000..8c789052e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/errors.go @@ -0,0 +1,122 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "fmt" +) + +const ( + // InternalErr indicates an unknown, internal error has occurred. + InternalErr = "storage_internal_error" + + // NotFoundErr indicates the path used in the storage operation does not + // locate a document. + NotFoundErr = "storage_not_found_error" + + // WriteConflictErr indicates a write on the path enocuntered a conflicting + // value inside the transaction. + WriteConflictErr = "storage_write_conflict_error" + + // InvalidPatchErr indicates an invalid patch/write was issued. The patch + // was rejected. + InvalidPatchErr = "storage_invalid_patch_error" + + // InvalidTransactionErr indicates an invalid operation was performed + // inside of the transaction. + InvalidTransactionErr = "storage_invalid_txn_error" + + // TriggersNotSupportedErr indicates the caller attempted to register a + // trigger against a store that does not support them. + TriggersNotSupportedErr = "storage_triggers_not_supported_error" + + // WritesNotSupportedErr indicate the caller attempted to perform a write + // against a store that does not support them. + WritesNotSupportedErr = "storage_writes_not_supported_error" + + // PolicyNotSupportedErr indicate the caller attempted to perform a policy + // management operation against a store that does not support them. + PolicyNotSupportedErr = "storage_policy_not_supported_error" +) + +// Error is the error type returned by the storage layer. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (err *Error) Error() string { + if err.Message != "" { + return fmt.Sprintf("%v: %v", err.Code, err.Message) + } + return err.Code +} + +// IsNotFound returns true if this error is a NotFoundErr. +func IsNotFound(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == NotFoundErr + } + return false +} + +// IsWriteConflictError returns true if this error a WriteConflictErr. +func IsWriteConflictError(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == WriteConflictErr + } + return false +} + +// IsInvalidPatch returns true if this error is a InvalidPatchErr. +func IsInvalidPatch(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidPatchErr + } + return false +} + +// IsInvalidTransaction returns true if this error is a InvalidTransactionErr. +func IsInvalidTransaction(err error) bool { + switch err := err.(type) { + case *Error: + return err.Code == InvalidTransactionErr + } + return false +} + +// IsIndexingNotSupported is a stub for backwards-compatibility. +// +// Deprecated: We no longer return IndexingNotSupported errors, so it is +// unnecessary to check for them. +func IsIndexingNotSupported(error) bool { return false } + +func writeConflictError(path Path) *Error { + return &Error{ + Code: WriteConflictErr, + Message: path.String(), + } +} + +func triggersNotSupportedError() *Error { + return &Error{ + Code: TriggersNotSupportedErr, + } +} + +func writesNotSupportedError() *Error { + return &Error{ + Code: WritesNotSupportedErr, + } +} + +func policyNotSupportedError() *Error { + return &Error{ + Code: PolicyNotSupportedErr, + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go new file mode 100644 index 000000000..667ca608e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/ast.go @@ -0,0 +1,314 @@ +// Copyright 2024 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package inmem + +import ( + "fmt" + "strconv" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" +) + +type updateAST struct { + path storage.Path // data path modified by update + remove bool // indicates whether update removes the value at path + value ast.Value // value to add/replace at path (ignored if remove is true) +} + +func (u *updateAST) Path() storage.Path { + return u.path +} + +func (u *updateAST) Remove() bool { + return u.remove +} + +func (u *updateAST) Set(v interface{}) { + if v, ok := v.(ast.Value); ok { + u.value = v + } else { + panic("illegal value type") // FIXME: do conversion? + } +} + +func (u *updateAST) Value() interface{} { + return u.value +} + +func (u *updateAST) Relative(path storage.Path) dataUpdate { + cpy := *u + cpy.path = cpy.path[len(path):] + return &cpy +} + +func (u *updateAST) Apply(v interface{}) interface{} { + if len(u.path) == 0 { + return u.value + } + + data, ok := v.(ast.Value) + if !ok { + panic(fmt.Errorf("illegal value type %T, expected ast.Value", v)) + } + + if u.remove { + newV, err := removeInAst(data, u.path) + if err != nil { + panic(err) + } + return newV + } + + // If we're not removing, we're replacing (adds are turned into replaces during updateAST creation). + newV, err := setInAst(data, u.path, u.value) + if err != nil { + panic(err) + } + return newV +} + +func newUpdateAST(data interface{}, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { + + switch data.(type) { + case ast.Null, ast.Boolean, ast.Number, ast.String: + return nil, errors.NewNotFoundError(path) + } + + switch data := data.(type) { + case ast.Object: + return newUpdateObjectAST(data, op, path, idx, value) + + case *ast.Array: + return newUpdateArrayAST(data, op, path, idx, value) + } + + return nil, &storage.Error{ + Code: storage.InternalErr, + Message: "invalid data value encountered", + } +} + +func newUpdateArrayAST(data *ast.Array, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { + + if idx == len(path)-1 { + if path[idx] == "-" || path[idx] == strconv.Itoa(data.Len()) { + if op != storage.AddOp { + return nil, invalidPatchError("%v: invalid patch path", path) + } + + cpy := data.Copy() + cpy = cpy.Append(ast.NewTerm(value)) + return &updateAST{path[:len(path)-1], false, cpy}, nil + } + + pos, err := ptr.ValidateASTArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + switch op { + case storage.AddOp: + var results []*ast.Term + for i := 0; i < data.Len(); i++ { + if i == pos { + results = append(results, ast.NewTerm(value)) + } + results = append(results, data.Elem(i)) + } + + return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil + + case storage.RemoveOp: + var results []*ast.Term + for i := 0; i < data.Len(); i++ { + if i != pos { + results = append(results, data.Elem(i)) + } + } + return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil + + default: + var results []*ast.Term + for i := 0; i < data.Len(); i++ { + if i == pos { + results = append(results, ast.NewTerm(value)) + } else { + results = append(results, data.Elem(i)) + } + } + + return &updateAST{path[:len(path)-1], false, ast.NewArray(results...)}, nil + } + } + + pos, err := ptr.ValidateASTArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + return newUpdateAST(data.Elem(pos).Value, op, path, idx+1, value) +} + +func newUpdateObjectAST(data ast.Object, op storage.PatchOp, path storage.Path, idx int, value ast.Value) (*updateAST, error) { + key := ast.StringTerm(path[idx]) + val := data.Get(key) + + if idx == len(path)-1 { + switch op { + case storage.ReplaceOp, storage.RemoveOp: + if val == nil { + return nil, errors.NewNotFoundError(path) + } + } + return &updateAST{path, op == storage.RemoveOp, value}, nil + } + + if val != nil { + return newUpdateAST(val.Value, op, path, idx+1, value) + } + + return nil, errors.NewNotFoundError(path) +} + +func interfaceToValue(v interface{}) (ast.Value, error) { + if v, ok := v.(ast.Value); ok { + return v, nil + } + return ast.InterfaceToValue(v) +} + +// setInAst updates the value in the AST at the given path with the given value. +// Values can only be replaced in arrays, not added. +// Values for new keys can be added to objects +func setInAst(data ast.Value, path storage.Path, value ast.Value) (ast.Value, error) { + if len(path) == 0 { + return data, nil + } + + switch data := data.(type) { + case ast.Object: + return setInAstObject(data, path, value) + case *ast.Array: + return setInAstArray(data, path, value) + default: + return nil, fmt.Errorf("illegal value type %T, expected ast.Object or ast.Array", data) + } +} + +func setInAstObject(obj ast.Object, path storage.Path, value ast.Value) (ast.Value, error) { + key := ast.StringTerm(path[0]) + + if len(path) == 1 { + obj.Insert(key, ast.NewTerm(value)) + return obj, nil + } + + child := obj.Get(key) + newChild, err := setInAst(child.Value, path[1:], value) + if err != nil { + return nil, err + } + obj.Insert(key, ast.NewTerm(newChild)) + return obj, nil +} + +func setInAstArray(arr *ast.Array, path storage.Path, value ast.Value) (ast.Value, error) { + idx, err := strconv.Atoi(path[0]) + if err != nil { + return nil, fmt.Errorf("illegal array index %v: %v", path[0], err) + } + + if idx < 0 || idx >= arr.Len() { + return arr, nil + } + + if len(path) == 1 { + arr.Set(idx, ast.NewTerm(value)) + return arr, nil + } + + child := arr.Elem(idx) + newChild, err := setInAst(child.Value, path[1:], value) + if err != nil { + return nil, err + } + arr.Set(idx, ast.NewTerm(newChild)) + return arr, nil +} + +func removeInAst(value ast.Value, path storage.Path) (ast.Value, error) { + if len(path) == 0 { + return value, nil + } + + switch value := value.(type) { + case ast.Object: + return removeInAstObject(value, path) + case *ast.Array: + return removeInAstArray(value, path) + default: + return nil, fmt.Errorf("illegal value type %T, expected ast.Object or ast.Array", value) + } +} + +func removeInAstObject(obj ast.Object, path storage.Path) (ast.Value, error) { + key := ast.StringTerm(path[0]) + + if len(path) == 1 { + var items [][2]*ast.Term + // Note: possibly expensive operation for large data. + obj.Foreach(func(k *ast.Term, v *ast.Term) { + if k.Equal(key) { + return + } + items = append(items, [2]*ast.Term{k, v}) + }) + return ast.NewObject(items...), nil + } + + if child := obj.Get(key); child != nil { + updatedChild, err := removeInAst(child.Value, path[1:]) + if err != nil { + return nil, err + } + obj.Insert(key, ast.NewTerm(updatedChild)) + } + + return obj, nil +} + +func removeInAstArray(arr *ast.Array, path storage.Path) (ast.Value, error) { + idx, err := strconv.Atoi(path[0]) + if err != nil { + // We expect the path to be valid at this point. + return arr, nil + } + + if idx < 0 || idx >= arr.Len() { + return arr, err + } + + if len(path) == 1 { + var elems []*ast.Term + // Note: possibly expensive operation for large data. + for i := 0; i < arr.Len(); i++ { + if i == idx { + continue + } + elems = append(elems, arr.Elem(i)) + } + return ast.NewArray(elems...), nil + } + + updatedChild, err := removeInAst(arr.Elem(idx).Value, path[1:]) + if err != nil { + return nil, err + } + arr.Set(idx, ast.NewTerm(updatedChild)) + return arr, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go new file mode 100644 index 000000000..7c5116b52 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/inmem.go @@ -0,0 +1,458 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package inmem implements an in-memory version of the policy engine's storage +// layer. +// +// The in-memory store is used as the default storage layer implementation. The +// in-memory store supports multi-reader/single-writer concurrency with +// rollback. +// +// Callers should assume the in-memory store does not make copies of written +// data. Once data is written to the in-memory store, it should not be modified +// (outside of calling Store.Write). Furthermore, data read from the in-memory +// store should be treated as read-only. +package inmem + +import ( + "context" + "fmt" + "io" + "path/filepath" + "strings" + "sync" + "sync/atomic" + + "github.com/open-policy-agent/opa/internal/merge" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/util" +) + +// New returns an empty in-memory store. +func New() storage.Store { + return NewWithOpts() +} + +// NewWithOpts returns an empty in-memory store, with extra options passed. +func NewWithOpts(opts ...Opt) storage.Store { + s := &store{ + triggers: map[*handle]storage.TriggerConfig{}, + policies: map[string][]byte{}, + roundTripOnWrite: true, + returnASTValuesOnRead: false, + } + + for _, opt := range opts { + opt(s) + } + + if s.returnASTValuesOnRead { + s.data = ast.NewObject() + } else { + s.data = map[string]interface{}{} + } + + return s +} + +// NewFromObject returns a new in-memory store from the supplied data object. +func NewFromObject(data map[string]interface{}) storage.Store { + return NewFromObjectWithOpts(data) +} + +// NewFromObjectWithOpts returns a new in-memory store from the supplied data object, with the +// options passed. +func NewFromObjectWithOpts(data map[string]interface{}, opts ...Opt) storage.Store { + db := NewWithOpts(opts...) + ctx := context.Background() + txn, err := db.NewTransaction(ctx, storage.WriteParams) + if err != nil { + panic(err) + } + if err := db.Write(ctx, txn, storage.AddOp, storage.Path{}, data); err != nil { + panic(err) + } + if err := db.Commit(ctx, txn); err != nil { + panic(err) + } + return db +} + +// NewFromReader returns a new in-memory store from a reader that produces a +// JSON serialized object. This function is for test purposes. +func NewFromReader(r io.Reader) storage.Store { + return NewFromReaderWithOpts(r) +} + +// NewFromReader returns a new in-memory store from a reader that produces a +// JSON serialized object, with extra options. This function is for test purposes. +func NewFromReaderWithOpts(r io.Reader, opts ...Opt) storage.Store { + d := util.NewJSONDecoder(r) + var data map[string]interface{} + if err := d.Decode(&data); err != nil { + panic(err) + } + return NewFromObjectWithOpts(data, opts...) +} + +type store struct { + rmu sync.RWMutex // reader-writer lock + wmu sync.Mutex // writer lock + xid uint64 // last generated transaction id + data interface{} // raw or AST data + policies map[string][]byte // raw policies + triggers map[*handle]storage.TriggerConfig // registered triggers + + // roundTripOnWrite, if true, means that every call to Write round trips the + // data through JSON before adding the data to the store. Defaults to true. + roundTripOnWrite bool + + // returnASTValuesOnRead, if true, means that the store will eagerly convert data to AST values, + // and return them on Read. + // FIXME: naming(?) + returnASTValuesOnRead bool +} + +type handle struct { + db *store +} + +func (db *store) NewTransaction(_ context.Context, params ...storage.TransactionParams) (storage.Transaction, error) { + var write bool + var ctx *storage.Context + if len(params) > 0 { + write = params[0].Write + ctx = params[0].Context + } + xid := atomic.AddUint64(&db.xid, uint64(1)) + if write { + db.wmu.Lock() + } else { + db.rmu.RLock() + } + return newTransaction(xid, write, ctx, db), nil +} + +// Truncate implements the storage.Store interface. This method must be called within a transaction. +func (db *store) Truncate(ctx context.Context, txn storage.Transaction, params storage.TransactionParams, it storage.Iterator) error { + var update *storage.Update + var err error + mergedData := map[string]interface{}{} + + underlying, err := db.underlying(txn) + if err != nil { + return err + } + + for { + update, err = it.Next() + if err != nil { + break + } + + if update.IsPolicy { + err = underlying.UpsertPolicy(strings.TrimLeft(update.Path.String(), "/"), update.Value) + if err != nil { + return err + } + } else { + var value interface{} + err = util.Unmarshal(update.Value, &value) + if err != nil { + return err + } + + var key []string + dirpath := strings.TrimLeft(update.Path.String(), "/") + if len(dirpath) > 0 { + key = strings.Split(dirpath, "/") + } + + if value != nil { + obj, err := mktree(key, value) + if err != nil { + return err + } + + merged, ok := merge.InterfaceMaps(mergedData, obj) + if !ok { + return fmt.Errorf("failed to insert data file from path %s", filepath.Join(key...)) + } + mergedData = merged + } + } + } + + if err != nil && err != io.EOF { + return err + } + + // For backwards compatibility, check if `RootOverwrite` was configured. + if params.RootOverwrite { + newPath, ok := storage.ParsePathEscaped("/") + if !ok { + return fmt.Errorf("storage path invalid: %v", newPath) + } + return underlying.Write(storage.AddOp, newPath, mergedData) + } + + for _, root := range params.BasePaths { + newPath, ok := storage.ParsePathEscaped("/" + root) + if !ok { + return fmt.Errorf("storage path invalid: %v", newPath) + } + + if value, ok := lookup(newPath, mergedData); ok { + if len(newPath) > 0 { + if err := storage.MakeDir(ctx, db, txn, newPath[:len(newPath)-1]); err != nil { + return err + } + } + if err := underlying.Write(storage.AddOp, newPath, value); err != nil { + return err + } + } + } + return nil +} + +func (db *store) Commit(ctx context.Context, txn storage.Transaction) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + if underlying.write { + db.rmu.Lock() + event := underlying.Commit() + db.runOnCommitTriggers(ctx, txn, event) + // Mark the transaction stale after executing triggers, so they can + // perform store operations if needed. + underlying.stale = true + db.rmu.Unlock() + db.wmu.Unlock() + } else { + db.rmu.RUnlock() + } + return nil +} + +func (db *store) Abort(_ context.Context, txn storage.Transaction) { + underlying, err := db.underlying(txn) + if err != nil { + panic(err) + } + underlying.stale = true + if underlying.write { + db.wmu.Unlock() + } else { + db.rmu.RUnlock() + } +} + +func (db *store) ListPolicies(_ context.Context, txn storage.Transaction) ([]string, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + return underlying.ListPolicies(), nil +} + +func (db *store) GetPolicy(_ context.Context, txn storage.Transaction, id string) ([]byte, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + return underlying.GetPolicy(id) +} + +func (db *store) UpsertPolicy(_ context.Context, txn storage.Transaction, id string, bs []byte) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + return underlying.UpsertPolicy(id, bs) +} + +func (db *store) DeletePolicy(_ context.Context, txn storage.Transaction, id string) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + if _, err := underlying.GetPolicy(id); err != nil { + return err + } + return underlying.DeletePolicy(id) +} + +func (db *store) Register(_ context.Context, txn storage.Transaction, config storage.TriggerConfig) (storage.TriggerHandle, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + if !underlying.write { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "triggers must be registered with a write transaction", + } + } + h := &handle{db} + db.triggers[h] = config + return h, nil +} + +func (db *store) Read(_ context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) { + underlying, err := db.underlying(txn) + if err != nil { + return nil, err + } + + v, err := underlying.Read(path) + if err != nil { + return nil, err + } + + return v, nil +} + +func (db *store) Write(_ context.Context, txn storage.Transaction, op storage.PatchOp, path storage.Path, value interface{}) error { + underlying, err := db.underlying(txn) + if err != nil { + return err + } + val := util.Reference(value) + if db.roundTripOnWrite { + if err := util.RoundTrip(val); err != nil { + return err + } + } + return underlying.Write(op, path, *val) +} + +func (h *handle) Unregister(_ context.Context, txn storage.Transaction) { + underlying, err := h.db.underlying(txn) + if err != nil { + panic(err) + } + if !underlying.write { + panic(&storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "triggers must be unregistered with a write transaction", + }) + } + delete(h.db.triggers, h) +} + +func (db *store) runOnCommitTriggers(ctx context.Context, txn storage.Transaction, event storage.TriggerEvent) { + if db.returnASTValuesOnRead && len(db.triggers) > 0 { + // FIXME: Not very performant for large data. + + dataEvents := make([]storage.DataEvent, 0, len(event.Data)) + + for _, dataEvent := range event.Data { + if astData, ok := dataEvent.Data.(ast.Value); ok { + jsn, err := ast.ValueToInterface(astData, illegalResolver{}) + if err != nil { + panic(err) + } + dataEvents = append(dataEvents, storage.DataEvent{ + Path: dataEvent.Path, + Data: jsn, + Removed: dataEvent.Removed, + }) + } else { + dataEvents = append(dataEvents, dataEvent) + } + } + + event = storage.TriggerEvent{ + Policy: event.Policy, + Data: dataEvents, + Context: event.Context, + } + } + + for _, t := range db.triggers { + t.OnCommit(ctx, txn, event) + } +} + +type illegalResolver struct{} + +func (illegalResolver) Resolve(ref ast.Ref) (interface{}, error) { + return nil, fmt.Errorf("illegal value: %v", ref) +} + +func (db *store) underlying(txn storage.Transaction) (*transaction, error) { + underlying, ok := txn.(*transaction) + if !ok { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: fmt.Sprintf("unexpected transaction type %T", txn), + } + } + if underlying.db != db { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "unknown transaction", + } + } + if underlying.stale { + return nil, &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "stale transaction", + } + } + return underlying, nil +} + +const rootMustBeObjectMsg = "root must be object" +const rootCannotBeRemovedMsg = "root cannot be removed" + +func invalidPatchError(f string, a ...interface{}) *storage.Error { + return &storage.Error{ + Code: storage.InvalidPatchErr, + Message: fmt.Sprintf(f, a...), + } +} + +func mktree(path []string, value interface{}) (map[string]interface{}, error) { + if len(path) == 0 { + // For 0 length path the value is the full tree. + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, invalidPatchError(rootMustBeObjectMsg) + } + return obj, nil + } + + dir := map[string]interface{}{} + for i := len(path) - 1; i > 0; i-- { + dir[path[i]] = value + value = dir + dir = map[string]interface{}{} + } + dir[path[0]] = value + + return dir, nil +} + +func lookup(path storage.Path, data map[string]interface{}) (interface{}, bool) { + if len(path) == 0 { + return data, true + } + for i := 0; i < len(path)-1; i++ { + value, ok := data[path[i]] + if !ok { + return nil, false + } + obj, ok := value.(map[string]interface{}) + if !ok { + return nil, false + } + data = obj + } + value, ok := data[path[len(path)-1]] + return value, ok +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go new file mode 100644 index 000000000..2239fc73a --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/opts.go @@ -0,0 +1,37 @@ +package inmem + +// An Opt modifies store at instantiation. +type Opt func(*store) + +// OptRoundTripOnWrite sets whether incoming objects written to store are +// round-tripped through JSON to ensure they are serializable to JSON. +// +// Callers should disable this if they can guarantee all objects passed to +// Write() are serializable to JSON. Failing to do so may result in undefined +// behavior, including panics. +// +// Usually, when only storing objects in the inmem store that have been read +// via encoding/json, this is safe to disable, and comes with an improvement +// in performance and memory use. +// +// If setting to false, callers should deep-copy any objects passed to Write() +// unless they can guarantee the objects will not be mutated after being written, +// and that mutations happening to the objects after they have been passed into +// Write() don't affect their logic. +func OptRoundTripOnWrite(enabled bool) Opt { + return func(s *store) { + s.roundTripOnWrite = enabled + } +} + +// OptReturnASTValuesOnRead sets whether data values added to the store should be +// eagerly converted to AST values, which are then returned on read. +// +// When enabled, this feature does not sanity check data before converting it to AST values, +// which may result in panics if the data is not valid. Callers should ensure that passed data +// can be serialized to AST values; otherwise, it's recommended to also enable OptRoundTripOnWrite. +func OptReturnASTValuesOnRead(enabled bool) Opt { + return func(s *store) { + s.returnASTValuesOnRead = enabled + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go new file mode 100644 index 000000000..f8a730391 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/inmem/txn.go @@ -0,0 +1,483 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package inmem + +import ( + "container/list" + "encoding/json" + "strconv" + + "github.com/open-policy-agent/opa/internal/deepcopy" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" + "github.com/open-policy-agent/opa/v1/storage/internal/ptr" +) + +// transaction implements the low-level read/write operations on the in-memory +// store and contains the state required for pending transactions. +// +// For write transactions, the struct contains a logical set of updates +// performed by write operations in the transaction. Each write operation +// compacts the set such that two updates never overlap: +// +// - If new update path is a prefix of existing update path, existing update is +// removed, new update is added. +// +// - If existing update path is a prefix of new update path, existing update is +// modified. +// +// - Otherwise, new update is added. +// +// Read transactions do not require any special handling and simply passthrough +// to the underlying store. Read transactions do not support upgrade. +type transaction struct { + xid uint64 + write bool + stale bool + db *store + updates *list.List + policies map[string]policyUpdate + context *storage.Context +} + +type policyUpdate struct { + value []byte + remove bool +} + +func newTransaction(xid uint64, write bool, context *storage.Context, db *store) *transaction { + return &transaction{ + xid: xid, + write: write, + db: db, + policies: map[string]policyUpdate{}, + updates: list.New(), + context: context, + } +} + +func (txn *transaction) ID() uint64 { + return txn.xid +} + +func (txn *transaction) Write(op storage.PatchOp, path storage.Path, value interface{}) error { + + if !txn.write { + return &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "data write during read transaction", + } + } + + if len(path) == 0 { + return txn.updateRoot(op, value) + } + + for curr := txn.updates.Front(); curr != nil; { + update := curr.Value.(dataUpdate) + + // Check if new update masks existing update exactly. In this case, the + // existing update can be removed and no other updates have to be + // visited (because no two updates overlap.) + if update.Path().Equal(path) { + if update.Remove() { + if op != storage.AddOp { + return errors.NewNotFoundError(path) + } + } + txn.updates.Remove(curr) + break + } + + // Check if new update masks existing update. In this case, the + // existing update has to be removed but other updates may overlap, so + // we must continue. + if update.Path().HasPrefix(path) { + remove := curr + curr = curr.Next() + txn.updates.Remove(remove) + continue + } + + // Check if new update modifies existing update. In this case, the + // existing update is mutated. + if path.HasPrefix(update.Path()) { + if update.Remove() { + return errors.NewNotFoundError(path) + } + suffix := path[len(update.Path()):] + newUpdate, err := txn.db.newUpdate(update.Value(), op, suffix, 0, value) + if err != nil { + return err + } + update.Set(newUpdate.Apply(update.Value())) + return nil + } + + curr = curr.Next() + } + + update, err := txn.db.newUpdate(txn.db.data, op, path, 0, value) + if err != nil { + return err + } + + txn.updates.PushFront(update) + return nil +} + +func (txn *transaction) updateRoot(op storage.PatchOp, value interface{}) error { + if op == storage.RemoveOp { + return invalidPatchError(rootCannotBeRemovedMsg) + } + + var update any + if txn.db.returnASTValuesOnRead { + valueAST, err := interfaceToValue(value) + if err != nil { + return err + } + if _, ok := valueAST.(ast.Object); !ok { + return invalidPatchError(rootMustBeObjectMsg) + } + + update = &updateAST{ + path: storage.Path{}, + remove: false, + value: valueAST, + } + } else { + if _, ok := value.(map[string]interface{}); !ok { + return invalidPatchError(rootMustBeObjectMsg) + } + + update = &updateRaw{ + path: storage.Path{}, + remove: false, + value: value, + } + } + + txn.updates.Init() + txn.updates.PushFront(update) + return nil +} + +func (txn *transaction) Commit() (result storage.TriggerEvent) { + result.Context = txn.context + for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { + action := curr.Value.(dataUpdate) + txn.db.data = action.Apply(txn.db.data) + + result.Data = append(result.Data, storage.DataEvent{ + Path: action.Path(), + Data: action.Value(), + Removed: action.Remove(), + }) + } + for id, upd := range txn.policies { + if upd.remove { + delete(txn.db.policies, id) + } else { + txn.db.policies[id] = upd.value + } + + result.Policy = append(result.Policy, storage.PolicyEvent{ + ID: id, + Data: upd.value, + Removed: upd.remove, + }) + } + return result +} + +func pointer(v interface{}, path storage.Path) (interface{}, error) { + if v, ok := v.(ast.Value); ok { + return ptr.ValuePtr(v, path) + } + return ptr.Ptr(v, path) +} + +func deepcpy(v interface{}) interface{} { + if v, ok := v.(ast.Value); ok { + var cpy ast.Value + + switch data := v.(type) { + case ast.Object: + cpy = data.Copy() + case *ast.Array: + cpy = data.Copy() + } + + return cpy + } + return deepcopy.DeepCopy(v) +} + +func (txn *transaction) Read(path storage.Path) (interface{}, error) { + + if !txn.write { + return pointer(txn.db.data, path) + } + + var merge []dataUpdate + + for curr := txn.updates.Front(); curr != nil; curr = curr.Next() { + + upd := curr.Value.(dataUpdate) + + if path.HasPrefix(upd.Path()) { + if upd.Remove() { + return nil, errors.NewNotFoundError(path) + } + return pointer(upd.Value(), path[len(upd.Path()):]) + } + + if upd.Path().HasPrefix(path) { + merge = append(merge, upd) + } + } + + data, err := pointer(txn.db.data, path) + + if err != nil { + return nil, err + } + + if len(merge) == 0 { + return data, nil + } + + cpy := deepcpy(data) + + for _, update := range merge { + cpy = update.Relative(path).Apply(cpy) + } + + return cpy, nil +} + +func (txn *transaction) ListPolicies() []string { + var ids []string + for id := range txn.db.policies { + if _, ok := txn.policies[id]; !ok { + ids = append(ids, id) + } + } + for id, update := range txn.policies { + if !update.remove { + ids = append(ids, id) + } + } + return ids +} + +func (txn *transaction) GetPolicy(id string) ([]byte, error) { + if update, ok := txn.policies[id]; ok { + if !update.remove { + return update.value, nil + } + return nil, errors.NewNotFoundErrorf("policy id %q", id) + } + if exist, ok := txn.db.policies[id]; ok { + return exist, nil + } + return nil, errors.NewNotFoundErrorf("policy id %q", id) +} + +func (txn *transaction) UpsertPolicy(id string, bs []byte) error { + if !txn.write { + return &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "policy write during read transaction", + } + } + txn.policies[id] = policyUpdate{bs, false} + return nil +} + +func (txn *transaction) DeletePolicy(id string) error { + if !txn.write { + return &storage.Error{ + Code: storage.InvalidTransactionErr, + Message: "policy write during read transaction", + } + } + txn.policies[id] = policyUpdate{nil, true} + return nil +} + +type dataUpdate interface { + Path() storage.Path + Remove() bool + Apply(interface{}) interface{} + Relative(path storage.Path) dataUpdate + Set(interface{}) + Value() interface{} +} + +// update contains state associated with an update to be applied to the +// in-memory data store. +type updateRaw struct { + path storage.Path // data path modified by update + remove bool // indicates whether update removes the value at path + value interface{} // value to add/replace at path (ignored if remove is true) +} + +func (db *store) newUpdate(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { + if db.returnASTValuesOnRead { + astData, err := interfaceToValue(data) + if err != nil { + return nil, err + } + astValue, err := interfaceToValue(value) + if err != nil { + return nil, err + } + return newUpdateAST(astData, op, path, idx, astValue) + } + return newUpdateRaw(data, op, path, idx, value) +} + +func newUpdateRaw(data interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { + + switch data.(type) { + case nil, bool, json.Number, string: + return nil, errors.NewNotFoundError(path) + } + + switch data := data.(type) { + case map[string]interface{}: + return newUpdateObject(data, op, path, idx, value) + + case []interface{}: + return newUpdateArray(data, op, path, idx, value) + } + + return nil, &storage.Error{ + Code: storage.InternalErr, + Message: "invalid data value encountered", + } +} + +func newUpdateArray(data []interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { + + if idx == len(path)-1 { + if path[idx] == "-" || path[idx] == strconv.Itoa(len(data)) { + if op != storage.AddOp { + return nil, invalidPatchError("%v: invalid patch path", path) + } + cpy := make([]interface{}, len(data)+1) + copy(cpy, data) + cpy[len(data)] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + } + + pos, err := ptr.ValidateArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + switch op { + case storage.AddOp: + cpy := make([]interface{}, len(data)+1) + copy(cpy[:pos], data[:pos]) + copy(cpy[pos+1:], data[pos:]) + cpy[pos] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + + case storage.RemoveOp: + cpy := make([]interface{}, len(data)-1) + copy(cpy[:pos], data[:pos]) + copy(cpy[pos:], data[pos+1:]) + return &updateRaw{path[:len(path)-1], false, cpy}, nil + + default: + cpy := make([]interface{}, len(data)) + copy(cpy, data) + cpy[pos] = value + return &updateRaw{path[:len(path)-1], false, cpy}, nil + } + } + + pos, err := ptr.ValidateArrayIndex(data, path[idx], path) + if err != nil { + return nil, err + } + + return newUpdateRaw(data[pos], op, path, idx+1, value) +} + +func newUpdateObject(data map[string]interface{}, op storage.PatchOp, path storage.Path, idx int, value interface{}) (dataUpdate, error) { + + if idx == len(path)-1 { + switch op { + case storage.ReplaceOp, storage.RemoveOp: + if _, ok := data[path[idx]]; !ok { + return nil, errors.NewNotFoundError(path) + } + } + return &updateRaw{path, op == storage.RemoveOp, value}, nil + } + + if data, ok := data[path[idx]]; ok { + return newUpdateRaw(data, op, path, idx+1, value) + } + + return nil, errors.NewNotFoundError(path) +} + +func (u *updateRaw) Remove() bool { + return u.remove +} + +func (u *updateRaw) Path() storage.Path { + return u.path +} + +func (u *updateRaw) Apply(data interface{}) interface{} { + if len(u.path) == 0 { + return u.value + } + parent, err := ptr.Ptr(data, u.path[:len(u.path)-1]) + if err != nil { + panic(err) + } + key := u.path[len(u.path)-1] + if u.remove { + obj := parent.(map[string]interface{}) + delete(obj, key) + return data + } + switch parent := parent.(type) { + case map[string]interface{}: + if parent == nil { + parent = make(map[string]interface{}, 1) + } + parent[key] = u.value + case []interface{}: + idx, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + parent[idx] = u.value + } + return data +} + +func (u *updateRaw) Set(v interface{}) { + u.value = v +} + +func (u *updateRaw) Value() interface{} { + return u.value +} + +func (u *updateRaw) Relative(path storage.Path) dataUpdate { + cpy := *u + cpy.path = cpy.path[len(path):] + return &cpy +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go new file mode 100644 index 000000000..94e02a47b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/interface.go @@ -0,0 +1,247 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/metrics" +) + +// Transaction defines the interface that identifies a consistent snapshot over +// the policy engine's storage layer. +type Transaction interface { + ID() uint64 +} + +// Store defines the interface for the storage layer's backend. +type Store interface { + Trigger + Policy + + // NewTransaction is called create a new transaction in the store. + NewTransaction(context.Context, ...TransactionParams) (Transaction, error) + + // Read is called to fetch a document referred to by path. + Read(context.Context, Transaction, Path) (interface{}, error) + + // Write is called to modify a document referred to by path. + Write(context.Context, Transaction, PatchOp, Path, interface{}) error + + // Commit is called to finish the transaction. If Commit returns an error, the + // transaction must be automatically aborted by the Store implementation. + Commit(context.Context, Transaction) error + + // Truncate is called to make a copy of the underlying store, write documents in the new store + // by creating multiple transactions in the new store as needed and finally swapping + // over to the new storage instance. This method must be called within a transaction on the original store. + Truncate(context.Context, Transaction, TransactionParams, Iterator) error + + // Abort is called to cancel the transaction. + Abort(context.Context, Transaction) +} + +// MakeDirer defines the interface a Store could realize to override the +// generic MakeDir functionality in storage.MakeDir +type MakeDirer interface { + MakeDir(context.Context, Transaction, Path) error +} + +// TransactionParams describes a new transaction. +type TransactionParams struct { + + // BasePaths indicates the top-level paths where write operations will be performed in this transaction. + BasePaths []string + + // RootOverwrite is deprecated. Use BasePaths instead. + RootOverwrite bool + + // Write indicates if this transaction will perform any write operations. + Write bool + + // Context contains key/value pairs passed to triggers. + Context *Context +} + +// Context is a simple container for key/value pairs. +type Context struct { + values map[interface{}]interface{} +} + +// NewContext returns a new context object. +func NewContext() *Context { + return &Context{ + values: map[interface{}]interface{}{}, + } +} + +// Get returns the key value in the context. +func (ctx *Context) Get(key interface{}) interface{} { + if ctx == nil { + return nil + } + return ctx.values[key] +} + +// Put adds a key/value pair to the context. +func (ctx *Context) Put(key, value interface{}) { + ctx.values[key] = value +} + +var metricsKey = struct{}{} + +// WithMetrics allows passing metrics via the Context. +// It puts the metrics object in the ctx, and returns the same +// ctx (not a copy) for convenience. +func (ctx *Context) WithMetrics(m metrics.Metrics) *Context { + ctx.values[metricsKey] = m + return ctx +} + +// Metrics() allows using a Context's metrics. Returns nil if metrics +// were not attached to the Context. +func (ctx *Context) Metrics() metrics.Metrics { + if m, ok := ctx.values[metricsKey]; ok { + if met, ok := m.(metrics.Metrics); ok { + return met + } + } + return nil +} + +// WriteParams specifies the TransactionParams for a write transaction. +var WriteParams = TransactionParams{ + Write: true, +} + +// PatchOp is the enumeration of supposed modifications. +type PatchOp int + +// Patch supports add, remove, and replace operations. +const ( + AddOp PatchOp = iota + RemoveOp = iota + ReplaceOp = iota +) + +// WritesNotSupported provides a default implementation of the write +// interface which may be used if the backend does not support writes. +type WritesNotSupported struct{} + +func (WritesNotSupported) Write(context.Context, Transaction, PatchOp, Path, interface{}) error { + return writesNotSupportedError() +} + +// Policy defines the interface for policy module storage. +type Policy interface { + ListPolicies(context.Context, Transaction) ([]string, error) + GetPolicy(context.Context, Transaction, string) ([]byte, error) + UpsertPolicy(context.Context, Transaction, string, []byte) error + DeletePolicy(context.Context, Transaction, string) error +} + +// PolicyNotSupported provides a default implementation of the policy interface +// which may be used if the backend does not support policy storage. +type PolicyNotSupported struct{} + +// ListPolicies always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) ListPolicies(context.Context, Transaction) ([]string, error) { + return nil, policyNotSupportedError() +} + +// GetPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) GetPolicy(context.Context, Transaction, string) ([]byte, error) { + return nil, policyNotSupportedError() +} + +// UpsertPolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) UpsertPolicy(context.Context, Transaction, string, []byte) error { + return policyNotSupportedError() +} + +// DeletePolicy always returns a PolicyNotSupportedErr. +func (PolicyNotSupported) DeletePolicy(context.Context, Transaction, string) error { + return policyNotSupportedError() +} + +// PolicyEvent describes a change to a policy. +type PolicyEvent struct { + ID string + Data []byte + Removed bool +} + +// DataEvent describes a change to a base data document. +type DataEvent struct { + Path Path + Data interface{} + Removed bool +} + +// TriggerEvent describes the changes that caused the trigger to be invoked. +type TriggerEvent struct { + Policy []PolicyEvent + Data []DataEvent + Context *Context +} + +// IsZero returns true if the TriggerEvent indicates no changes occurred. This +// function is primarily for test purposes. +func (e TriggerEvent) IsZero() bool { + return !e.PolicyChanged() && !e.DataChanged() +} + +// PolicyChanged returns true if the trigger was caused by a policy change. +func (e TriggerEvent) PolicyChanged() bool { + return len(e.Policy) > 0 +} + +// DataChanged returns true if the trigger was caused by a data change. +func (e TriggerEvent) DataChanged() bool { + return len(e.Data) > 0 +} + +// TriggerConfig contains the trigger registration configuration. +type TriggerConfig struct { + + // OnCommit is invoked when a transaction is successfully committed. The + // callback is invoked with a handle to the write transaction that + // successfully committed before other clients see the changes. + OnCommit func(context.Context, Transaction, TriggerEvent) +} + +// Trigger defines the interface that stores implement to register for change +// notifications when the store is changed. +type Trigger interface { + Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) +} + +// TriggersNotSupported provides default implementations of the Trigger +// interface which may be used if the backend does not support triggers. +type TriggersNotSupported struct{} + +// Register always returns an error indicating triggers are not supported. +func (TriggersNotSupported) Register(context.Context, Transaction, TriggerConfig) (TriggerHandle, error) { + return nil, triggersNotSupportedError() +} + +// TriggerHandle defines the interface that can be used to unregister triggers that have +// been registered on a Store. +type TriggerHandle interface { + Unregister(context.Context, Transaction) +} + +// Iterator defines the interface that can be used to read files from a directory starting with +// files at the base of the directory, then sub-directories etc. +type Iterator interface { + Next() (*Update, error) +} + +// Update contains information about a file +type Update struct { + Path Path + Value []byte + IsPolicy bool +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go new file mode 100644 index 000000000..06063b4c7 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/errors/errors.go @@ -0,0 +1,39 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package errors contains reusable error-related code for the storage layer. +package errors + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/storage" +) + +const ArrayIndexTypeMsg = "array index must be integer" +const DoesNotExistMsg = "document does not exist" +const OutOfRangeMsg = "array index out of range" + +func NewNotFoundError(path storage.Path) *storage.Error { + return NewNotFoundErrorWithHint(path, DoesNotExistMsg) +} + +func NewNotFoundErrorWithHint(path storage.Path, hint string) *storage.Error { + return NewNotFoundErrorf("%v: %v", path.String(), hint) +} + +func NewNotFoundErrorf(f string, a ...interface{}) *storage.Error { + msg := fmt.Sprintf(f, a...) + return &storage.Error{ + Code: storage.NotFoundErr, + Message: msg, + } +} + +func NewWriteConflictError(p storage.Path) *storage.Error { + return &storage.Error{ + Code: storage.WriteConflictErr, + Message: p.String(), + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go new file mode 100644 index 000000000..d1c36a15a --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/internal/ptr/ptr.go @@ -0,0 +1,113 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package ptr provides utilities for pointer operations using storage layer paths. +package ptr + +import ( + "strconv" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/storage/internal/errors" +) + +func Ptr(data interface{}, path storage.Path) (interface{}, error) { + node := data + for i := range path { + key := path[i] + switch curr := node.(type) { + case map[string]interface{}: + var ok bool + if node, ok = curr[key]; !ok { + return nil, errors.NewNotFoundError(path) + } + case []interface{}: + pos, err := ValidateArrayIndex(curr, key, path) + if err != nil { + return nil, err + } + node = curr[pos] + default: + return nil, errors.NewNotFoundError(path) + } + } + + return node, nil +} + +func ValuePtr(data ast.Value, path storage.Path) (ast.Value, error) { + node := data + for i := range path { + key := path[i] + switch curr := node.(type) { + case ast.Object: + keyTerm := ast.StringTerm(key) + val := curr.Get(keyTerm) + if val == nil { + return nil, errors.NewNotFoundError(path) + } + node = val.Value + case *ast.Array: + pos, err := ValidateASTArrayIndex(curr, key, path) + if err != nil { + return nil, err + } + node = curr.Elem(pos).Value + default: + return nil, errors.NewNotFoundError(path) + } + } + + return node, nil +} + +func ValidateArrayIndex(arr []interface{}, s string, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) + } + return inRange(idx, arr, path) +} + +func ValidateASTArrayIndex(arr *ast.Array, s string, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewNotFoundErrorWithHint(path, errors.ArrayIndexTypeMsg) + } + return inRange(idx, arr, path) +} + +// ValidateArrayIndexForWrite also checks that `s` is a valid way to address an +// array element like `ValidateArrayIndex`, but returns a `resource_conflict` error +// if it is not. +func ValidateArrayIndexForWrite(arr []interface{}, s string, i int, path storage.Path) (int, error) { + idx, ok := isInt(s) + if !ok { + return 0, errors.NewWriteConflictError(path[:i-1]) + } + return inRange(idx, arr, path) +} + +func isInt(s string) (int, bool) { + idx, err := strconv.Atoi(s) + return idx, err == nil +} + +func inRange(i int, arr interface{}, path storage.Path) (int, error) { + + var arrLen int + + switch v := arr.(type) { + case []interface{}: + arrLen = len(v) + case *ast.Array: + arrLen = v.Len() + } + + if i < 0 || i >= arrLen { + return 0, errors.NewNotFoundErrorWithHint(path, errors.OutOfRangeMsg) + } + return i, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/path.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/path.go new file mode 100644 index 000000000..7f90c666b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/path.go @@ -0,0 +1,150 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Path refers to a document in storage. +type Path []string + +// ParsePath returns a new path for the given str. +func ParsePath(str string) (path Path, ok bool) { + if len(str) == 0 { + return nil, false + } + if str[0] != '/' { + return nil, false + } + if len(str) == 1 { + return Path{}, true + } + parts := strings.Split(str[1:], "/") + return parts, true +} + +// ParsePathEscaped returns a new path for the given escaped str. +func ParsePathEscaped(str string) (path Path, ok bool) { + path, ok = ParsePath(str) + if !ok { + return + } + for i := range path { + segment, err := url.PathUnescape(path[i]) + if err == nil { + path[i] = segment + } + } + return +} + +// NewPathForRef returns a new path for the given ref. +func NewPathForRef(ref ast.Ref) (path Path, err error) { + + if len(ref) == 0 { + return nil, fmt.Errorf("empty reference (indicates error in caller)") + } + + if len(ref) == 1 { + return Path{}, nil + } + + path = make(Path, 0, len(ref)-1) + + for _, term := range ref[1:] { + switch v := term.Value.(type) { + case ast.String: + path = append(path, string(v)) + case ast.Number: + path = append(path, v.String()) + case ast.Boolean, ast.Null: + return nil, &Error{ + Code: NotFoundErr, + Message: fmt.Sprintf("%v: does not exist", ref), + } + case *ast.Array, ast.Object, ast.Set: + return nil, fmt.Errorf("composites cannot be base document keys: %v", ref) + default: + return nil, fmt.Errorf("unresolved reference (indicates error in caller): %v", ref) + } + } + + return path, nil +} + +// Compare performs lexigraphical comparison on p and other and returns -1 if p +// is less than other, 0 if p is equal to other, or 1 if p is greater than +// other. +func (p Path) Compare(other Path) (cmp int) { + for i := 0; i < min(len(p), len(other)); i++ { + if cmp := strings.Compare(p[i], other[i]); cmp != 0 { + return cmp + } + } + if len(p) < len(other) { + return -1 + } + if len(p) == len(other) { + return 0 + } + return 1 +} + +// Equal returns true if p is the same as other. +func (p Path) Equal(other Path) bool { + return p.Compare(other) == 0 +} + +// HasPrefix returns true if p starts with other. +func (p Path) HasPrefix(other Path) bool { + if len(other) > len(p) { + return false + } + for i := range other { + if p[i] != other[i] { + return false + } + } + return true +} + +// Ref returns a ref that represents p rooted at head. +func (p Path) Ref(head *ast.Term) (ref ast.Ref) { + ref = make(ast.Ref, len(p)+1) + ref[0] = head + for i := range p { + idx, err := strconv.ParseInt(p[i], 10, 64) + if err == nil { + ref[i+1] = ast.UIntNumberTerm(uint64(idx)) + } else { + ref[i+1] = ast.StringTerm(p[i]) + } + } + return ref +} + +func (p Path) String() string { + buf := make([]string, len(p)) + for i := range buf { + buf[i] = url.PathEscape(p[i]) + } + return "/" + strings.Join(buf, "/") +} + +// MustParsePath returns a new Path for s. If s cannot be parsed, this function +// will panic. This is mostly for test purposes. +func MustParsePath(s string) Path { + path, ok := ParsePath(s) + if !ok { + panic(s) + } + return path +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go new file mode 100644 index 000000000..34305f291 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/storage/storage.go @@ -0,0 +1,136 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package storage + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// NewTransactionOrDie is a helper function to create a new transaction. If the +// storage layer cannot create a new transaction, this function will panic. This +// function should only be used for tests. +func NewTransactionOrDie(ctx context.Context, store Store, params ...TransactionParams) Transaction { + txn, err := store.NewTransaction(ctx, params...) + if err != nil { + panic(err) + } + return txn +} + +// ReadOne is a convenience function to read a single value from the provided Store. It +// will create a new Transaction to perform the read with, and clean up after itself +// should an error occur. +func ReadOne(ctx context.Context, store Store, path Path) (interface{}, error) { + txn, err := store.NewTransaction(ctx) + if err != nil { + return nil, err + } + defer store.Abort(ctx, txn) + + return store.Read(ctx, txn, path) +} + +// WriteOne is a convenience function to write a single value to the provided Store. It +// will create a new Transaction to perform the write with, and clean up after itself +// should an error occur. +func WriteOne(ctx context.Context, store Store, op PatchOp, path Path, value interface{}) error { + txn, err := store.NewTransaction(ctx, WriteParams) + if err != nil { + return err + } + + if err := store.Write(ctx, txn, op, path, value); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// MakeDir inserts an empty object at path. If the parent path does not exist, +// MakeDir will create it recursively. +func MakeDir(ctx context.Context, store Store, txn Transaction, path Path) error { + + // Allow the Store implementation to deal with this in its own way. + if md, ok := store.(MakeDirer); ok { + return md.MakeDir(ctx, txn, path) + } + + if len(path) == 0 { + return nil + } + + node, err := store.Read(ctx, txn, path) + if err != nil { + if !IsNotFound(err) { + return err + } + + if err := MakeDir(ctx, store, txn, path[:len(path)-1]); err != nil { + return err + } + + return store.Write(ctx, txn, AddOp, path, map[string]interface{}{}) + } + + if _, ok := node.(map[string]interface{}); ok { + return nil + } + + if _, ok := node.(ast.Object); ok { + return nil + } + + return writeConflictError(path) +} + +// Txn is a convenience function that executes f inside a new transaction +// opened on the store. If the function returns an error, the transaction is +// aborted and the error is returned. Otherwise, the transaction is committed +// and the result of the commit is returned. +func Txn(ctx context.Context, store Store, params TransactionParams, f func(Transaction) error) error { + + txn, err := store.NewTransaction(ctx, params) + if err != nil { + return err + } + + if err := f(txn); err != nil { + store.Abort(ctx, txn) + return err + } + + return store.Commit(ctx, txn) +} + +// NonEmpty returns a function that tests if a path is non-empty. A +// path is non-empty if a Read on the path returns a value or a Read +// on any of the path prefixes returns a non-object value. +func NonEmpty(ctx context.Context, store Store, txn Transaction) func([]string) (bool, error) { + return func(path []string) (bool, error) { + if _, err := store.Read(ctx, txn, Path(path)); err == nil { + return true, nil + } else if !IsNotFound(err) { + return false, err + } + for i := len(path) - 1; i > 0; i-- { + val, err := store.Read(ctx, txn, Path(path[:i])) + if err != nil && !IsNotFound(err) { + return false, err + } else if err == nil { + if _, ok := val.(map[string]interface{}); ok { + return false, nil + } + if _, ok := val.(ast.Object); ok { + return false, nil + } + return true, nil + } + } + return false, nil + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go similarity index 80% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go index a0f67a7c9..02425d241 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/aggregates.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/aggregates.go @@ -7,20 +7,20 @@ package topdown import ( "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch a := operands[0].Value.(type) { case *ast.Array: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.Object: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.Set: - return iter(ast.IntNumberTerm(a.Len())) + return iter(ast.InternedIntNumberTerm(a.Len())) case ast.String: - return iter(ast.IntNumberTerm(len([]rune(a)))) + return iter(ast.InternedIntNumberTerm(len([]rune(a)))) } return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "object", "set", "string") } @@ -99,7 +99,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - var max = ast.Value(ast.Null{}) + max := ast.InternedNullTerm.Value a.Foreach(func(x *ast.Term) { if ast.Compare(max, x.Value) <= 0 { max = x.Value @@ -110,7 +110,7 @@ func builtinMax(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - max, err := a.Reduce(ast.NullTerm(), func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { + max, err := a.Reduce(ast.InternedNullTerm, func(max *ast.Term, elem *ast.Term) (*ast.Term, error) { if ast.Compare(max, elem) <= 0 { return elem, nil } @@ -142,11 +142,11 @@ func builtinMin(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err if a.Len() == 0 { return nil } - min, err := a.Reduce(ast.NullTerm(), func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { + min, err := a.Reduce(ast.InternedNullTerm, func(min *ast.Term, elem *ast.Term) (*ast.Term, error) { // The null term is considered to be less than any other term, // so in order for min of a set to make sense, we need to check // for it. - if min.Value.Compare(ast.Null{}) == 0 { + if min.Value.Compare(ast.InternedNullTerm.Value) == 0 { return elem, nil } @@ -178,7 +178,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err switch val := operands[0].Value.(type) { case ast.Set: res := true - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if !match.Equal(term) { res = false @@ -186,10 +186,10 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) case *ast.Array: res := true - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if !match.Equal(term) { res = false @@ -197,7 +197,7 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) default: return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") } @@ -206,11 +206,11 @@ func builtinAll(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch val := operands[0].Value.(type) { case ast.Set: - res := val.Len() > 0 && val.Contains(ast.BooleanTerm(true)) - return iter(ast.BooleanTerm(res)) + res := val.Len() > 0 && val.Contains(ast.InternedBooleanTerm(true)) + return iter(ast.InternedBooleanTerm(res)) case *ast.Array: res := false - match := ast.BooleanTerm(true) + match := ast.InternedBooleanTerm(true) val.Until(func(term *ast.Term) bool { if match.Equal(term) { res = true @@ -218,7 +218,7 @@ func builtinAny(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err } return false }) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) default: return builtins.NewOperandTypeErr(1, operands[0].Value, "array", "set") } @@ -228,27 +228,20 @@ func builtinMember(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) containee := operands[0] switch c := operands[1].Value.(type) { case ast.Set: - return iter(ast.BooleanTerm(c.Contains(containee))) + return iter(ast.InternedBooleanTerm(c.Contains(containee))) case *ast.Array: - ret := false - c.Until(func(v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true + for i := 0; i < c.Len(); i++ { + if c.Elem(i).Value.Compare(containee.Value) == 0 { + return iter(ast.InternedBooleanTerm(true)) } - return ret - }) - return iter(ast.BooleanTerm(ret)) + } + return iter(ast.InternedBooleanTerm(false)) case ast.Object: - ret := false - c.Until(func(_, v *ast.Term) bool { - if v.Value.Compare(containee.Value) == 0 { - ret = true - } - return ret - }) - return iter(ast.BooleanTerm(ret)) + return iter(ast.InternedBooleanTerm(c.Until(func(_, v *ast.Term) bool { + return v.Value.Compare(containee.Value) == 0 + }))) } - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -259,9 +252,9 @@ func builtinMemberWithKey(_ BuiltinContext, operands []*ast.Term, iter func(*ast if act := c.Get(key); act != nil { ret = act.Value.Compare(val.Value) == 0 } - return iter(ast.BooleanTerm(ret)) + return iter(ast.InternedBooleanTerm(ret)) } - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } func init() { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go index 3ac703efa..68c3b496e 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/arithmetic.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/arithmetic.go @@ -8,8 +8,8 @@ import ( "fmt" "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type arithArity1 func(a *big.Float) (*big.Float, error) @@ -67,7 +67,7 @@ func builtinPlus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) er y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x + y)) + return iter(ast.InternedIntNumberTerm(x + y)) } f, err := arithPlus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -91,7 +91,7 @@ func builtinMultiply(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term y, ok2 := n2.Int() if ok1 && ok2 && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x * y)) + return iter(ast.InternedIntNumberTerm(x * y)) } f, err := arithMultiply(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -171,7 +171,7 @@ func builtinMinus(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) e y, oky := n2.Int() if okx && oky && inSmallIntRange(x) && inSmallIntRange(y) { - return iter(ast.IntNumberTerm(x - y)) + return iter(ast.InternedIntNumberTerm(x - y)) } f, err := arithMinus(builtins.NumberToFloat(n1), builtins.NumberToFloat(n2)) @@ -213,7 +213,7 @@ func builtinRem(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) err return fmt.Errorf("modulo by zero") } - return iter(ast.IntNumberTerm(x % y)) + return iter(ast.InternedIntNumberTerm(x % y)) } op1, err1 := builtins.NumberToInt(n1) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/array.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go similarity index 87% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/array.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go index e7fe5be64..4a2a2ed14 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/array.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/array.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -20,6 +20,13 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T return err } + if arrA.Len() == 0 { + return iter(operands[1]) + } + if arrB.Len() == 0 { + return iter(operands[0]) + } + arrC := make([]*ast.Term, arrA.Len()+arrB.Len()) i := 0 @@ -33,7 +40,7 @@ func builtinArrayConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T i++ }) - return iter(ast.NewTerm(ast.NewArray(arrC...))) + return iter(ast.ArrayTerm(arrC...)) } func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -68,6 +75,10 @@ func builtinArraySlice(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te startIndex = stopIndex } + if startIndex == 0 && stopIndex >= arr.Len() { + return iter(operands[0]) + } + return iter(ast.NewTerm(arr.Slice(startIndex, stopIndex))) } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/binary.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go similarity index 90% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/binary.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go index b4f9dbd39..6f7ebaf40 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/binary.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/binary.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinBinaryAnd(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/bindings.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go similarity index 99% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/bindings.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go index 30a8ac5ec..ae6ca15da 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/bindings.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bindings.go @@ -8,7 +8,7 @@ import ( "fmt" "strings" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) type undo struct { @@ -68,7 +68,7 @@ func (u *bindings) Plug(a *ast.Term) *ast.Term { } func (u *bindings) PlugNamespaced(a *ast.Term, caller *bindings) *ast.Term { - if u != nil { + if u != nil && u.instr != nil { u.instr.startTimer(evalOpPlug) t := u.plugNamespaced(a, caller) u.instr.stopTimer(evalOpPlug) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/bits.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go similarity index 96% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/bits.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go index 7a63c0df1..e420ffe61 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/bits.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/bits.go @@ -7,8 +7,8 @@ package topdown import ( "math/big" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type bitsArity1 func(a *big.Int) (*big.Int, error) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go new file mode 100644 index 000000000..e0b893d47 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins.go @@ -0,0 +1,224 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "math/rand" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" +) + +type ( + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin1 func(op1 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin2 func(op1, op2 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin3 func(op1, op2, op3 ast.Value) (output ast.Value, err error) + + // Deprecated: Functional-style builtins are deprecated. Use BuiltinFunc instead. + FunctionalBuiltin4 func(op1, op2, op3, op4 ast.Value) (output ast.Value, err error) + + // BuiltinContext contains context from the evaluator that may be used by + // built-in functions. + BuiltinContext struct { + Context context.Context // request context that was passed when query started + Metrics metrics.Metrics // metrics registry for recording built-in specific metrics + Seed io.Reader // randomization source + Time *ast.Term // wall clock time + Cancel Cancel // atomic value that signals evaluation to halt + Runtime *ast.Term // runtime information on the OPA instance + Cache builtins.Cache // built-in function state cache + InterQueryBuiltinCache cache.InterQueryCache // cross-query built-in function state cache + InterQueryBuiltinValueCache cache.InterQueryValueCache // cross-query built-in function state value cache. this cache is useful for scenarios where the entry size cannot be calculated + NDBuiltinCache builtins.NDBCache // cache for non-deterministic built-in state + Location *ast.Location // location of built-in call + Tracers []Tracer // Deprecated: Use QueryTracers instead + QueryTracers []QueryTracer // tracer objects for trace() built-in function + TraceEnabled bool // indicates whether tracing is enabled for the evaluation + QueryID uint64 // identifies query being evaluated + ParentID uint64 // identifies parent of query being evaluated + PrintHook print.Hook // provides callback function to use for printing + RoundTripper CustomizeRoundTripper // customize transport to use for HTTP requests + DistributedTracingOpts tracing.Options // options to be used by distributed tracing. + rand *rand.Rand // randomization source for non-security-sensitive operations + Capabilities *ast.Capabilities + } + + // BuiltinFunc defines an interface for implementing built-in functions. + // The built-in function is called with the plugged operands from the call + // (including the output operands.) The implementation should evaluate the + // operands and invoke the iterator for each successful/defined output + // value. + BuiltinFunc func(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error +) + +// Rand returns a random number generator based on the Seed for this built-in +// context. The random number will be re-used across multiple calls to this +// function. If a random number generator cannot be created, an error is +// returned. +func (bctx *BuiltinContext) Rand() (*rand.Rand, error) { + + if bctx.rand != nil { + return bctx.rand, nil + } + + seed, err := readInt64(bctx.Seed) + if err != nil { + return nil, err + } + + bctx.rand = rand.New(rand.NewSource(seed)) + return bctx.rand, nil +} + +// RegisterBuiltinFunc adds a new built-in function to the evaluation engine. +func RegisterBuiltinFunc(name string, f BuiltinFunc) { + builtinFunctions[name] = builtinErrorWrapper(name, f) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin1(name string, fun FunctionalBuiltin1) { + builtinFunctions[name] = functionalWrapper1(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin2(name string, fun FunctionalBuiltin2) { + builtinFunctions[name] = functionalWrapper2(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin3(name string, fun FunctionalBuiltin3) { + builtinFunctions[name] = functionalWrapper3(name, fun) +} + +// Deprecated: Functional-style builtins are deprecated. Use RegisterBuiltinFunc instead. +func RegisterFunctionalBuiltin4(name string, fun FunctionalBuiltin4) { + builtinFunctions[name] = functionalWrapper4(name, fun) +} + +// GetBuiltin returns a built-in function implementation, nil if no built-in found. +func GetBuiltin(name string) BuiltinFunc { + return builtinFunctions[name] +} + +// Deprecated: The BuiltinEmpty type is no longer needed. Use nil return values instead. +type BuiltinEmpty struct{} + +func (BuiltinEmpty) Error() string { + return "" +} + +var builtinFunctions = map[string]BuiltinFunc{} + +func builtinErrorWrapper(name string, fn BuiltinFunc) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + err := fn(bctx, args, iter) + if err == nil { + return nil + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper1(name string, fn FunctionalBuiltin1) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper2(name string, fn FunctionalBuiltin2) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper3(name string, fn FunctionalBuiltin3) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value, args[2].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func functionalWrapper4(name string, fn FunctionalBuiltin4) BuiltinFunc { + return func(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error { + result, err := fn(args[0].Value, args[1].Value, args[2].Value, args[3].Value) + if err == nil { + return iter(ast.NewTerm(result)) + } + if _, empty := err.(BuiltinEmpty); empty { + return nil + } + return handleBuiltinErr(name, bctx.Location, err) + } +} + +func handleBuiltinErr(name string, loc *ast.Location, err error) error { + switch err := err.(type) { + case BuiltinEmpty: + return nil + case *Error, Halt: + return err + case builtins.ErrOperand: + e := &Error{ + Code: TypeErr, + Message: fmt.Sprintf("%v: %v", name, err.Error()), + Location: loc, + } + return e.Wrap(err) + default: + e := &Error{ + Code: BuiltinErr, + Message: fmt.Sprintf("%v: %v", name, err.Error()), + Location: loc, + } + return e.Wrap(err) + } +} + +func readInt64(r io.Reader) (int64, error) { + bs := make([]byte, 8) + n, err := io.ReadFull(r, bs) + if n != len(bs) || err != nil { + return 0, err + } + return int64(binary.BigEndian.Uint64(bs)), nil +} + +// Used to get older-style (ast.Term, error) tuples out of newer functions. +func getResult(fn BuiltinFunc, operands ...*ast.Term) (*ast.Term, error) { + var result *ast.Term + extractionFn := func(r *ast.Term) error { + result = r + return nil + } + err := fn(BuiltinContext{}, operands, extractionFn) + if err != nil { + return nil, err + } + return result, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go new file mode 100644 index 000000000..45a0b8840 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/builtins/builtins.go @@ -0,0 +1,328 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package builtins contains utilities for implementing built-in functions. +package builtins + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +// Cache defines the built-in cache used by the top-down evaluation. The keys +// must be comparable and should not be of type string. +type Cache map[interface{}]interface{} + +// Put updates the cache for the named built-in. +func (c Cache) Put(k, v interface{}) { + c[k] = v +} + +// Get returns the cached value for k. +func (c Cache) Get(k interface{}) (interface{}, bool) { + v, ok := c[k] + return v, ok +} + +// We use an ast.Object for the cached keys/values because a naive +// map[ast.Value]ast.Value will not correctly detect value equality of +// the member keys. +type NDBCache map[string]ast.Object + +func (c NDBCache) AsValue() ast.Value { + out := ast.NewObject() + for bname, obj := range c { + out.Insert(ast.StringTerm(bname), ast.NewTerm(obj)) + } + return out +} + +// Put updates the cache for the named built-in. +// Automatically creates the 2-level hierarchy as needed. +func (c NDBCache) Put(name string, k, v ast.Value) { + if _, ok := c[name]; !ok { + c[name] = ast.NewObject() + } + c[name].Insert(ast.NewTerm(k), ast.NewTerm(v)) +} + +// Get returns the cached value for k for the named builtin. +func (c NDBCache) Get(name string, k ast.Value) (ast.Value, bool) { + if m, ok := c[name]; ok { + v := m.Get(ast.NewTerm(k)) + if v != nil { + return v.Value, true + } + return nil, false + } + return nil, false +} + +// Convenience functions for serializing the data structure. +func (c NDBCache) MarshalJSON() ([]byte, error) { + v, err := ast.JSON(c.AsValue()) + if err != nil { + return nil, err + } + return json.Marshal(v) +} + +func (c *NDBCache) UnmarshalJSON(data []byte) error { + out := map[string]ast.Object{} + var incoming interface{} + + // Note: We use util.Unmarshal instead of json.Unmarshal to get + // correct deserialization of number types. + err := util.Unmarshal(data, &incoming) + if err != nil { + return err + } + + // Convert interface types back into ast.Value types. + nestedObject, err := ast.InterfaceToValue(incoming) + if err != nil { + return err + } + + // Reconstruct NDBCache from nested ast.Object structure. + if source, ok := nestedObject.(ast.Object); ok { + err = source.Iter(func(k, v *ast.Term) error { + if obj, ok := v.Value.(ast.Object); ok { + out[string(k.Value.(ast.String))] = obj + return nil + } + return fmt.Errorf("expected Object, got other Value type in conversion") + }) + if err != nil { + return err + } + } + + *c = out + + return nil +} + +// ErrOperand represents an invalid operand has been passed to a built-in +// function. Built-ins should return ErrOperand to indicate a type error has +// occurred. +type ErrOperand string + +func (err ErrOperand) Error() string { + return string(err) +} + +// NewOperandErr returns a generic operand error. +func NewOperandErr(pos int, f string, a ...interface{}) error { + f = fmt.Sprintf("operand %v ", pos) + f + return ErrOperand(fmt.Sprintf(f, a...)) +} + +// NewOperandTypeErr returns an operand error indicating the operand's type was wrong. +func NewOperandTypeErr(pos int, got ast.Value, expected ...string) error { + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v but got %v", expected[0], ast.ValueName(got)) + } + + return NewOperandErr(pos, "must be one of {%v} but got %v", strings.Join(expected, ", "), ast.ValueName(got)) +} + +// NewOperandElementErr returns an operand error indicating an element in the +// composite operand was wrong. +func NewOperandElementErr(pos int, composite ast.Value, got ast.Value, expected ...string) error { + + tpe := ast.ValueName(composite) + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v of %vs but got %v containing %v", tpe, expected[0], tpe, ast.ValueName(got)) + } + + return NewOperandErr(pos, "must be %v of (any of) {%v} but got %v containing %v", tpe, strings.Join(expected, ", "), tpe, ast.ValueName(got)) +} + +// NewOperandEnumErr returns an operand error indicating a value was wrong. +func NewOperandEnumErr(pos int, expected ...string) error { + + if len(expected) == 1 { + return NewOperandErr(pos, "must be %v", expected[0]) + } + + return NewOperandErr(pos, "must be one of {%v}", strings.Join(expected, ", ")) +} + +// IntOperand converts x to an int. If the cast fails, a descriptive error is +// returned. +func IntOperand(x ast.Value, pos int) (int, error) { + n, ok := x.(ast.Number) + if !ok { + return 0, NewOperandTypeErr(pos, x, "number") + } + + i, ok := n.Int() + if !ok { + return 0, NewOperandErr(pos, "must be integer number but got floating-point number") + } + + return i, nil +} + +// BigIntOperand converts x to a big int. If the cast fails, a descriptive error +// is returned. +func BigIntOperand(x ast.Value, pos int) (*big.Int, error) { + n, err := NumberOperand(x, 1) + if err != nil { + return nil, NewOperandTypeErr(pos, x, "integer") + } + bi, err := NumberToInt(n) + if err != nil { + return nil, NewOperandErr(pos, "must be integer number but got floating-point number") + } + + return bi, nil +} + +// NumberOperand converts x to a number. If the cast fails, a descriptive error is +// returned. +func NumberOperand(x ast.Value, pos int) (ast.Number, error) { + n, ok := x.(ast.Number) + if !ok { + return ast.Number(""), NewOperandTypeErr(pos, x, "number") + } + return n, nil +} + +// SetOperand converts x to a set. If the cast fails, a descriptive error is +// returned. +func SetOperand(x ast.Value, pos int) (ast.Set, error) { + s, ok := x.(ast.Set) + if !ok { + return nil, NewOperandTypeErr(pos, x, "set") + } + return s, nil +} + +// StringOperand converts x to a string. If the cast fails, a descriptive error is +// returned. +func StringOperand(x ast.Value, pos int) (ast.String, error) { + s, ok := x.(ast.String) + if !ok { + return ast.String(""), NewOperandTypeErr(pos, x, "string") + } + return s, nil +} + +// ObjectOperand converts x to an object. If the cast fails, a descriptive +// error is returned. +func ObjectOperand(x ast.Value, pos int) (ast.Object, error) { + o, ok := x.(ast.Object) + if !ok { + return nil, NewOperandTypeErr(pos, x, "object") + } + return o, nil +} + +// ArrayOperand converts x to an array. If the cast fails, a descriptive +// error is returned. +func ArrayOperand(x ast.Value, pos int) (*ast.Array, error) { + a, ok := x.(*ast.Array) + if !ok { + return nil, NewOperandTypeErr(pos, x, "array") + } + return a, nil +} + +// NumberToFloat converts n to a big float. +func NumberToFloat(n ast.Number) *big.Float { + r, ok := new(big.Float).SetString(string(n)) + if !ok { + panic("illegal value") + } + return r +} + +// FloatToNumber converts f to a number. +func FloatToNumber(f *big.Float) ast.Number { + var format byte = 'g' + if f.IsInt() { + format = 'f' + } + return ast.Number(f.Text(format, -1)) +} + +// NumberToInt converts n to a big int. +// If n cannot be converted to an big int, an error is returned. +func NumberToInt(n ast.Number) (*big.Int, error) { + f := NumberToFloat(n) + r, accuracy := f.Int(nil) + if accuracy != big.Exact { + return nil, fmt.Errorf("illegal value") + } + return r, nil +} + +// IntToNumber converts i to a number. +func IntToNumber(i *big.Int) ast.Number { + return ast.Number(i.String()) +} + +// StringSliceOperand converts x to a []string. If the cast fails, a descriptive error is +// returned. +func StringSliceOperand(a ast.Value, pos int) ([]string, error) { + type iterable interface { + Iter(func(*ast.Term) error) error + Len() int + } + + strs, ok := a.(iterable) + if !ok { + return nil, NewOperandTypeErr(pos, a, "array", "set") + } + + var outStrs = make([]string, 0, strs.Len()) + if err := strs.Iter(func(x *ast.Term) error { + s, ok := x.Value.(ast.String) + if !ok { + return NewOperandElementErr(pos, a, x.Value, "string") + } + outStrs = append(outStrs, string(s)) + return nil + }); err != nil { + return nil, err + } + + return outStrs, nil +} + +// RuneSliceOperand converts x to a []rune. If the cast fails, a descriptive error is +// returned. +func RuneSliceOperand(x ast.Value, pos int) ([]rune, error) { + a, err := ArrayOperand(x, pos) + if err != nil { + return nil, err + } + + var f = make([]rune, a.Len()) + for k := 0; k < a.Len(); k++ { + b := a.Elem(k) + c, ok := b.Value.(ast.String) + if !ok { + return nil, NewOperandElementErr(pos, x, b.Value, "string") + } + + d := []rune(string(c)) + if len(d) != 1 { + return nil, NewOperandElementErr(pos, x, b.Value, "rune") + } + + f[k] = d[0] + } + + return f, nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go new file mode 100644 index 000000000..607abf46e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache.go @@ -0,0 +1,352 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +// VirtualCache defines the interface for a cache that stores the results of +// evaluated virtual documents (rules). +// The cache is a stack of frames, where each frame is a mapping from references +// to values. +type VirtualCache interface { + // Push pushes a new, empty frame of value mappings onto the stack. + Push() + + // Pop pops the top frame of value mappings from the stack, removing all associated entries. + Pop() + + // Get returns the value associated with the given reference. The second return value + // indicates whether the reference has a recorded 'undefined' result. + Get(ref ast.Ref) (*ast.Term, bool) + + // Put associates the given reference with the given value. If the value is nil, the reference + // is marked as having an 'undefined' result. + Put(ref ast.Ref, value *ast.Term) + + // Keys returns the set of keys that have been cached for the active frame. + Keys() []ast.Ref +} + +type virtualCache struct { + stack []*virtualCacheElem +} + +type virtualCacheElem struct { + value *ast.Term + children *util.HashMap + undefined bool +} + +func NewVirtualCache() VirtualCache { + cache := &virtualCache{} + cache.Push() + return cache +} + +func (c *virtualCache) Push() { + c.stack = append(c.stack, newVirtualCacheElem()) +} + +func (c *virtualCache) Pop() { + c.stack = c.stack[:len(c.stack)-1] +} + +// Returns the resolved value of the AST term and a flag indicating if the value +// should be interpretted as undefined: +// +// nil, true indicates the ref is undefined +// ast.Term, false indicates the ref is defined +// nil, false indicates the ref has not been cached +// ast.Term, true is impossible +func (c *virtualCache) Get(ref ast.Ref) (*ast.Term, bool) { + node := c.stack[len(c.stack)-1] + for i := 0; i < len(ref); i++ { + x, ok := node.children.Get(ref[i]) + if !ok { + return nil, false + } + node = x.(*virtualCacheElem) + } + if node.undefined { + return nil, true + } + + return node.value, false +} + +// If value is a nil pointer, set the 'undefined' flag on the cache element to +// indicate that the Ref has resolved to undefined. +func (c *virtualCache) Put(ref ast.Ref, value *ast.Term) { + node := c.stack[len(c.stack)-1] + for i := 0; i < len(ref); i++ { + x, ok := node.children.Get(ref[i]) + if ok { + node = x.(*virtualCacheElem) + } else { + next := newVirtualCacheElem() + node.children.Put(ref[i], next) + node = next + } + } + if value != nil { + node.value = value + } else { + node.undefined = true + } +} + +func (c *virtualCache) Keys() []ast.Ref { + node := c.stack[len(c.stack)-1] + return keysRecursive(nil, node) +} + +func keysRecursive(root ast.Ref, node *virtualCacheElem) []ast.Ref { + var keys []ast.Ref + node.children.Iter(func(k, v util.T) bool { + ref := root.Append(k.(*ast.Term)) + if v.(*virtualCacheElem).value != nil { + keys = append(keys, ref) + } + if v.(*virtualCacheElem).children.Len() > 0 { + keys = append(keys, keysRecursive(ref, v.(*virtualCacheElem))...) + } + return false + }) + return keys +} + +func newVirtualCacheElem() *virtualCacheElem { + return &virtualCacheElem{children: newVirtualCacheHashMap()} +} + +func newVirtualCacheHashMap() *util.HashMap { + return util.NewHashMap(func(a, b util.T) bool { + return a.(*ast.Term).Equal(b.(*ast.Term)) + }, func(x util.T) int { + return x.(*ast.Term).Hash() + }) +} + +// baseCache implements a trie structure to cache base documents read out of +// storage. Values inserted into the cache may contain other values that were +// previously inserted. In this case, the previous values are erased from the +// structure. +type baseCache struct { + root *baseCacheElem +} + +func newBaseCache() *baseCache { + return &baseCache{ + root: newBaseCacheElem(), + } +} + +func (c *baseCache) Get(ref ast.Ref) ast.Value { + node := c.root + for i := 0; i < len(ref); i++ { + node = node.children[ref[i].Value] + if node == nil { + return nil + } else if node.value != nil { + result, err := node.value.Find(ref[i+1:]) + if err != nil { + return nil + } + return result + } + } + return nil +} + +func (c *baseCache) Put(ref ast.Ref, value ast.Value) { + node := c.root + for i := 0; i < len(ref); i++ { + if child, ok := node.children[ref[i].Value]; ok { + node = child + } else { + child := newBaseCacheElem() + node.children[ref[i].Value] = child + node = child + } + } + node.set(value) +} + +type baseCacheElem struct { + value ast.Value + children map[ast.Value]*baseCacheElem +} + +func newBaseCacheElem() *baseCacheElem { + return &baseCacheElem{ + children: map[ast.Value]*baseCacheElem{}, + } +} + +func (e *baseCacheElem) set(value ast.Value) { + e.value = value + e.children = map[ast.Value]*baseCacheElem{} +} + +type refStack struct { + sl []refStackElem +} + +type refStackElem struct { + refs []ast.Ref +} + +func newRefStack() *refStack { + return &refStack{} +} + +func (s *refStack) Push(refs []ast.Ref) { + s.sl = append(s.sl, refStackElem{refs: refs}) +} + +func (s *refStack) Pop() { + s.sl = s.sl[:len(s.sl)-1] +} + +func (s *refStack) Prefixed(ref ast.Ref) bool { + if s != nil { + for i := len(s.sl) - 1; i >= 0; i-- { + for j := range s.sl[i].refs { + if ref.HasPrefix(s.sl[i].refs[j]) { + return true + } + } + } + } + return false +} + +type comprehensionCache struct { + stack []map[*ast.Term]*comprehensionCacheElem +} + +type comprehensionCacheElem struct { + value *ast.Term + children *util.HashMap +} + +func newComprehensionCache() *comprehensionCache { + cache := &comprehensionCache{} + cache.Push() + return cache +} + +func (c *comprehensionCache) Push() { + c.stack = append(c.stack, map[*ast.Term]*comprehensionCacheElem{}) +} + +func (c *comprehensionCache) Pop() { + c.stack = c.stack[:len(c.stack)-1] +} + +func (c *comprehensionCache) Elem(t *ast.Term) (*comprehensionCacheElem, bool) { + elem, ok := c.stack[len(c.stack)-1][t] + return elem, ok +} + +func (c *comprehensionCache) Set(t *ast.Term, elem *comprehensionCacheElem) { + c.stack[len(c.stack)-1][t] = elem +} + +func newComprehensionCacheElem() *comprehensionCacheElem { + return &comprehensionCacheElem{children: newComprehensionCacheHashMap()} +} + +func (c *comprehensionCacheElem) Get(key []*ast.Term) *ast.Term { + node := c + for i := 0; i < len(key); i++ { + x, ok := node.children.Get(key[i]) + if !ok { + return nil + } + node = x.(*comprehensionCacheElem) + } + return node.value +} + +func (c *comprehensionCacheElem) Put(key []*ast.Term, value *ast.Term) { + node := c + for i := 0; i < len(key); i++ { + x, ok := node.children.Get(key[i]) + if ok { + node = x.(*comprehensionCacheElem) + } else { + next := newComprehensionCacheElem() + node.children.Put(key[i], next) + node = next + } + } + node.value = value +} + +func newComprehensionCacheHashMap() *util.HashMap { + return util.NewHashMap(func(a, b util.T) bool { + return a.(*ast.Term).Equal(b.(*ast.Term)) + }, func(x util.T) int { + return x.(*ast.Term).Hash() + }) +} + +type functionMocksStack struct { + stack []*functionMocksElem +} + +type functionMocksElem []frame + +type frame map[string]*ast.Term + +func newFunctionMocksStack() *functionMocksStack { + stack := &functionMocksStack{} + stack.Push() + return stack +} + +func newFunctionMocksElem() *functionMocksElem { + return &functionMocksElem{} +} + +func (s *functionMocksStack) Push() { + s.stack = append(s.stack, newFunctionMocksElem()) +} + +func (s *functionMocksStack) Pop() { + s.stack = s.stack[:len(s.stack)-1] +} + +func (s *functionMocksStack) PopPairs() { + current := s.stack[len(s.stack)-1] + *current = (*current)[:len(*current)-1] +} + +func (s *functionMocksStack) PutPairs(mocks [][2]*ast.Term) { + el := frame{} + for i := range mocks { + el[mocks[i][0].Value.String()] = mocks[i][1] + } + s.Put(el) +} + +func (s *functionMocksStack) Put(el frame) { + current := s.stack[len(s.stack)-1] + *current = append(*current, el) +} + +func (s *functionMocksStack) Get(f ast.Ref) (*ast.Term, bool) { + current := *s.stack[len(s.stack)-1] + for i := len(current) - 1; i >= 0; i-- { + if r, ok := current[i][f.String()]; ok { + return r, true + } + } + return nil, false +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go new file mode 100644 index 000000000..a2b80c0a7 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cache/cache.go @@ -0,0 +1,580 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package cache defines the inter-query cache interface that can cache data across queries +package cache + +import ( + "container/list" + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + defaultInterQueryBuiltinValueCacheSize = int(0) // unlimited + defaultMaxSizeBytes = int64(0) // unlimited + defaultForcedEvictionThresholdPercentage = int64(100) // trigger at max_size_bytes + defaultStaleEntryEvictionPeriodSeconds = int64(0) // never +) + +var interQueryBuiltinValueCacheDefaultConfigs = map[string]*NamedValueCacheConfig{} + +func getDefaultInterQueryBuiltinValueCacheConfig(name string) *NamedValueCacheConfig { + return interQueryBuiltinValueCacheDefaultConfigs[name] +} + +// RegisterDefaultInterQueryBuiltinValueCacheConfig registers a default configuration for the inter-query value cache; +// used when none has been explicitly configured. +// To disable a named cache when not configured, pass a nil config. +func RegisterDefaultInterQueryBuiltinValueCacheConfig(name string, config *NamedValueCacheConfig) { + interQueryBuiltinValueCacheDefaultConfigs[name] = config +} + +// Config represents the configuration for the inter-query builtin cache. +type Config struct { + InterQueryBuiltinCache InterQueryBuiltinCacheConfig `json:"inter_query_builtin_cache"` + InterQueryBuiltinValueCache InterQueryBuiltinValueCacheConfig `json:"inter_query_builtin_value_cache"` +} + +// NamedValueCacheConfig represents the configuration of a named cache that built-in functions can utilize. +// A default configuration to be used if not explicitly configured can be registered using RegisterDefaultInterQueryBuiltinValueCacheConfig. +type NamedValueCacheConfig struct { + MaxNumEntries *int `json:"max_num_entries,omitempty"` +} + +// InterQueryBuiltinValueCacheConfig represents the configuration of the inter-query value cache that built-in functions can utilize. +// MaxNumEntries - max number of cache entries +type InterQueryBuiltinValueCacheConfig struct { + MaxNumEntries *int `json:"max_num_entries,omitempty"` + NamedCacheConfigs map[string]*NamedValueCacheConfig `json:"named,omitempty"` +} + +// InterQueryBuiltinCacheConfig represents the configuration of the inter-query cache that built-in functions can utilize. +// MaxSizeBytes - max capacity of cache in bytes +// ForcedEvictionThresholdPercentage - capacity usage in percentage after which forced FIFO eviction starts +// StaleEntryEvictionPeriodSeconds - time period between end of previous and start of new stale entry eviction routine +type InterQueryBuiltinCacheConfig struct { + MaxSizeBytes *int64 `json:"max_size_bytes,omitempty"` + ForcedEvictionThresholdPercentage *int64 `json:"forced_eviction_threshold_percentage,omitempty"` + StaleEntryEvictionPeriodSeconds *int64 `json:"stale_entry_eviction_period_seconds,omitempty"` +} + +// ParseCachingConfig returns the config for the inter-query cache. +func ParseCachingConfig(raw []byte) (*Config, error) { + if raw == nil { + maxSize := new(int64) + *maxSize = defaultMaxSizeBytes + threshold := new(int64) + *threshold = defaultForcedEvictionThresholdPercentage + period := new(int64) + *period = defaultStaleEntryEvictionPeriodSeconds + + maxInterQueryBuiltinValueCacheSize := new(int) + *maxInterQueryBuiltinValueCacheSize = defaultInterQueryBuiltinValueCacheSize + + return &Config{ + InterQueryBuiltinCache: InterQueryBuiltinCacheConfig{ + MaxSizeBytes: maxSize, + ForcedEvictionThresholdPercentage: threshold, + StaleEntryEvictionPeriodSeconds: period, + }, + InterQueryBuiltinValueCache: InterQueryBuiltinValueCacheConfig{ + MaxNumEntries: maxInterQueryBuiltinValueCacheSize, + }, + }, nil + } + + var config Config + + if err := util.Unmarshal(raw, &config); err == nil { + if err = config.validateAndInjectDefaults(); err != nil { + return nil, err + } + } else { + return nil, err + } + + return &config, nil +} + +func (c *Config) validateAndInjectDefaults() error { + if c.InterQueryBuiltinCache.MaxSizeBytes == nil { + maxSize := new(int64) + *maxSize = defaultMaxSizeBytes + c.InterQueryBuiltinCache.MaxSizeBytes = maxSize + } + if c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage == nil { + threshold := new(int64) + *threshold = defaultForcedEvictionThresholdPercentage + c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage = threshold + } else { + threshold := *c.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage + if threshold < 0 || threshold > 100 { + return fmt.Errorf("invalid forced_eviction_threshold_percentage %v", threshold) + } + } + if c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds == nil { + period := new(int64) + *period = defaultStaleEntryEvictionPeriodSeconds + c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds = period + } else { + period := *c.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds + if period < 0 { + return fmt.Errorf("invalid stale_entry_eviction_period_seconds %v", period) + } + } + + if c.InterQueryBuiltinValueCache.MaxNumEntries == nil { + maxSize := new(int) + *maxSize = defaultInterQueryBuiltinValueCacheSize + c.InterQueryBuiltinValueCache.MaxNumEntries = maxSize + } else { + numEntries := *c.InterQueryBuiltinValueCache.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v", numEntries) + } + } + + for name, namedConfig := range c.InterQueryBuiltinValueCache.NamedCacheConfigs { + numEntries := *namedConfig.MaxNumEntries + if numEntries < 0 { + return fmt.Errorf("invalid max_num_entries %v for named cache %v", numEntries, name) + } + } + + return nil +} + +// InterQueryCacheValue defines the interface for the data that the inter-query cache holds. +type InterQueryCacheValue interface { + SizeInBytes() int64 + Clone() (InterQueryCacheValue, error) +} + +// InterQueryCache defines the interface for the inter-query cache. +type InterQueryCache interface { + Get(key ast.Value) (value InterQueryCacheValue, found bool) + Insert(key ast.Value, value InterQueryCacheValue) int + InsertWithExpiry(key ast.Value, value InterQueryCacheValue, expiresAt time.Time) int + Delete(key ast.Value) + UpdateConfig(config *Config) + Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) +} + +// NewInterQueryCache returns a new inter-query cache. +// The cache uses a FIFO eviction policy when it reaches the forced eviction threshold. +// Parameters: +// +// config - to configure the InterQueryCache +func NewInterQueryCache(config *Config) InterQueryCache { + return newCache(config) +} + +// NewInterQueryCacheWithContext returns a new inter-query cache with context. +// The cache uses a combination of FIFO eviction policy when it reaches the forced eviction threshold +// and a periodic cleanup routine to remove stale entries that exceed their expiration time, if specified. +// If configured with a zero stale_entry_eviction_period_seconds value, the stale entry cleanup routine is disabled. +// +// Parameters: +// +// ctx - used to control lifecycle of the stale entry cleanup routine +// config - to configure the InterQueryCache +func NewInterQueryCacheWithContext(ctx context.Context, config *Config) InterQueryCache { + iqCache := newCache(config) + if iqCache.staleEntryEvictionTimePeriodSeconds() > 0 { + go func() { + cleanupTicker := time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) + for { + select { + case <-cleanupTicker.C: + // NOTE: We stop the ticker and create a new one here to ensure that applications + // get _at least_ staleEntryEvictionTimePeriodSeconds with the cache unlocked; + // see https://github.com/open-policy-agent/opa/pull/7188/files#r1855342998 + cleanupTicker.Stop() + iqCache.cleanStaleValues() + cleanupTicker = time.NewTicker(time.Duration(iqCache.staleEntryEvictionTimePeriodSeconds()) * time.Second) + case <-ctx.Done(): + cleanupTicker.Stop() + return + } + } + }() + } + + return iqCache +} + +type cacheItem struct { + value InterQueryCacheValue + expiresAt time.Time + keyElement *list.Element +} + +type cache struct { + items map[string]cacheItem + usage int64 + config *Config + l *list.List + mtx sync.Mutex +} + +func newCache(config *Config) *cache { + return &cache{ + items: map[string]cacheItem{}, + usage: 0, + config: config, + l: list.New(), + } +} + +// InsertWithExpiry inserts a key k into the cache with value v with an expiration time expiresAt. +// A zero time value for expiresAt indicates no expiry +func (c *cache) InsertWithExpiry(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + return c.unsafeInsert(k, v, expiresAt) +} + +// Insert inserts a key k into the cache with value v with no expiration time. +func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) { + return c.InsertWithExpiry(k, v, time.Time{}) +} + +// Get returns the value in the cache for k. +func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) { + c.mtx.Lock() + defer c.mtx.Unlock() + cacheItem, ok := c.unsafeGet(k) + + if ok { + return cacheItem.value, true + } + return nil, false +} + +// Delete deletes the value in the cache for k. +func (c *cache) Delete(k ast.Value) { + c.mtx.Lock() + defer c.mtx.Unlock() + c.unsafeDelete(k) +} + +func (c *cache) UpdateConfig(config *Config) { + if config == nil { + return + } + c.mtx.Lock() + defer c.mtx.Unlock() + c.config = config +} + +func (c *cache) Clone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + return c.unsafeClone(value) +} + +func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue, expiresAt time.Time) (dropped int) { + size := v.SizeInBytes() + limit := int64(math.Ceil(float64(c.forcedEvictionThresholdPercentage())/100.0) * (float64(c.maxSizeBytes()))) + if limit > 0 { + if size > limit { + dropped++ + return dropped + } + + for key := c.l.Front(); key != nil && (c.usage+size > limit); key = c.l.Front() { + dropKey := key.Value.(ast.Value) + c.unsafeDelete(dropKey) + dropped++ + } + } + + // By deleting the old value, if it exists, we ensure the usage variable stays correct + c.unsafeDelete(k) + + c.items[k.String()] = cacheItem{ + value: v, + expiresAt: expiresAt, + keyElement: c.l.PushBack(k), + } + c.usage += size + return dropped +} + +func (c *cache) unsafeGet(k ast.Value) (cacheItem, bool) { + value, ok := c.items[k.String()] + return value, ok +} + +func (c *cache) unsafeDelete(k ast.Value) { + cacheItem, ok := c.unsafeGet(k) + if !ok { + return + } + + c.usage -= cacheItem.value.SizeInBytes() + delete(c.items, k.String()) + c.l.Remove(cacheItem.keyElement) +} + +func (c *cache) unsafeClone(value InterQueryCacheValue) (InterQueryCacheValue, error) { + return value.Clone() +} + +func (c *cache) maxSizeBytes() int64 { + if c.config == nil { + return defaultMaxSizeBytes + } + return *c.config.InterQueryBuiltinCache.MaxSizeBytes +} + +func (c *cache) forcedEvictionThresholdPercentage() int64 { + if c.config == nil { + return defaultForcedEvictionThresholdPercentage + } + return *c.config.InterQueryBuiltinCache.ForcedEvictionThresholdPercentage +} + +func (c *cache) staleEntryEvictionTimePeriodSeconds() int64 { + if c.config == nil { + return defaultStaleEntryEvictionPeriodSeconds + } + return *c.config.InterQueryBuiltinCache.StaleEntryEvictionPeriodSeconds +} + +func (c *cache) cleanStaleValues() (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + for key := c.l.Front(); key != nil; { + nextKey := key.Next() + // if expiresAt is zero, the item doesn't have an expiry + if ea := c.items[(key.Value.(ast.Value)).String()].expiresAt; !ea.IsZero() && ea.Before(time.Now()) { + c.unsafeDelete(key.Value.(ast.Value)) + dropped++ + } + key = nextKey + } + return dropped +} + +type InterQueryValueCacheBucket interface { + Get(key ast.Value) (value any, found bool) + Insert(key ast.Value, value any) int + Delete(key ast.Value) +} + +type interQueryValueCacheBucket struct { + items util.TypedHashMap[ast.Value, any] + config *NamedValueCacheConfig + mtx sync.RWMutex +} + +func newItemsMap() *util.TypedHashMap[ast.Value, any] { + return util.NewTypedHashMap[ast.Value, any]( + func(a, b ast.Value) bool { return a.Compare(b) == 0 }, + func(any, any) bool { return false }, // map equality not supported + func(a ast.Value) int { return a.Hash() }, + func(any) int { return 0 }, // map equality not supported + nil, + ) +} + +func (c *interQueryValueCacheBucket) Get(k ast.Value) (any, bool) { + c.mtx.RLock() + defer c.mtx.RUnlock() + return c.items.Get(k) +} + +func (c *interQueryValueCacheBucket) Insert(k ast.Value, v any) (dropped int) { + c.mtx.Lock() + defer c.mtx.Unlock() + + maxEntries := c.maxNumEntries() + if maxEntries > 0 { + l := c.items.Len() + if l >= maxEntries { + itemsToRemove := l - maxEntries + 1 + + // Delete a (semi-)random key to make room for the new one. + c.items.Iter(func(k ast.Value, _ any) bool { + c.items.Delete(k) + dropped++ + + return itemsToRemove == dropped + }) + } + } + + c.items.Put(k, v) + return dropped +} + +func (c *interQueryValueCacheBucket) Delete(k ast.Value) { + c.mtx.Lock() + defer c.mtx.Unlock() + c.items.Delete(k) +} + +func (c *interQueryValueCacheBucket) updateConfig(config *NamedValueCacheConfig) { + if config == nil { + return + } + c.mtx.Lock() + defer c.mtx.Unlock() + c.config = config +} + +func (c *interQueryValueCacheBucket) maxNumEntries() int { + if c.config == nil { + return defaultInterQueryBuiltinValueCacheSize + } + return *c.config.MaxNumEntries +} + +type InterQueryValueCache interface { + InterQueryValueCacheBucket + GetCache(name string) InterQueryValueCacheBucket + UpdateConfig(config *Config) +} + +func NewInterQueryValueCache(_ context.Context, config *Config) InterQueryValueCache { + var c *InterQueryBuiltinValueCacheConfig + var nc *NamedValueCacheConfig + if config != nil { + c = &config.InterQueryBuiltinValueCache + // NOTE: This is a side-effect of reusing the interQueryValueCacheBucket as the global cache. + // It's a hidden implementation detail that we can clean up in the future when revisiting the named caches + // to automatically apply them to any built-in instead of the global cache. + nc = &NamedValueCacheConfig{ + MaxNumEntries: c.MaxNumEntries, + } + } + + return &interQueryBuiltinValueCache{ + globalCache: interQueryValueCacheBucket{ + items: *newItemsMap(), + config: nc, + }, + namedCaches: map[string]*interQueryValueCacheBucket{}, + config: c, + } +} + +type interQueryBuiltinValueCache struct { + globalCache interQueryValueCacheBucket + namedCachesLock sync.RWMutex + namedCaches map[string]*interQueryValueCacheBucket + config *InterQueryBuiltinValueCacheConfig +} + +func (c *interQueryBuiltinValueCache) Get(k ast.Value) (any, bool) { + if c == nil { + return nil, false + } + + return c.globalCache.Get(k) +} + +func (c *interQueryBuiltinValueCache) Insert(k ast.Value, v any) int { + if c == nil { + return 0 + } + + return c.globalCache.Insert(k, v) +} + +func (c *interQueryBuiltinValueCache) Delete(k ast.Value) { + if c == nil { + return + } + + c.globalCache.Delete(k) +} + +func (c *interQueryBuiltinValueCache) GetCache(name string) InterQueryValueCacheBucket { + if c == nil { + return nil + } + + if c.namedCaches == nil { + return nil + } + + c.namedCachesLock.RLock() + nc, ok := c.namedCaches[name] + c.namedCachesLock.RUnlock() + + if !ok { + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + if nc, ok := c.namedCaches[name]; ok { + // Some other goroutine has created the cache while we were waiting for the lock. + return nc + } + + var config *NamedValueCacheConfig + if c.config != nil { + config = c.config.NamedCacheConfigs[name] + if config == nil { + config = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + } + + if config == nil { + // No config, cache disabled. + return nil + } + + nc = &interQueryValueCacheBucket{ + items: *newItemsMap(), + config: config, + } + + c.namedCaches[name] = nc + } + + return nc +} + +func (c *interQueryBuiltinValueCache) UpdateConfig(config *Config) { + if c == nil { + return + } + + if config == nil { + c.globalCache.updateConfig(nil) + } else { + + c.globalCache.updateConfig(&NamedValueCacheConfig{ + MaxNumEntries: config.InterQueryBuiltinValueCache.MaxNumEntries, + }) + } + + c.namedCachesLock.Lock() + defer c.namedCachesLock.Unlock() + + c.config = &config.InterQueryBuiltinValueCache + + for name, nc := range c.namedCaches { + // For each named cache: if it has a config, update it; if no config, remove it. + namedConfig := c.config.NamedCacheConfigs[name] + if namedConfig == nil { + namedConfig = getDefaultInterQueryBuiltinValueCacheConfig(name) + } + + if namedConfig == nil { + delete(c.namedCaches, name) + } else { + nc.updateConfig(namedConfig) + } + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go new file mode 100644 index 000000000..534e0799a --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cancel.go @@ -0,0 +1,33 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "sync/atomic" +) + +// Cancel defines the interface for cancelling topdown queries. Cancel +// operations are thread-safe and idempotent. +type Cancel interface { + Cancel() + Cancelled() bool +} + +type cancel struct { + flag int32 +} + +// NewCancel returns a new Cancel object. +func NewCancel() Cancel { + return &cancel{} +} + +func (c *cancel) Cancel() { + atomic.StoreInt32(&c.flag, 1) +} + +func (c *cancel) Cancelled() bool { + return atomic.LoadInt32(&c.flag) != 0 +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/casts.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go similarity index 82% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/casts.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go index 2eb8f97fc..9be7271c4 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/casts.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/casts.go @@ -6,24 +6,38 @@ package topdown import ( "strconv" + "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinToNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { switch a := operands[0].Value.(type) { case ast.Null: - return iter(ast.NumberTerm("0")) + return iter(ast.InternedIntNumberTerm(0)) case ast.Boolean: if a { - return iter(ast.NumberTerm("1")) + return iter(ast.InternedIntNumberTerm(1)) } - return iter(ast.NumberTerm("0")) + return iter(ast.InternedIntNumberTerm(0)) case ast.Number: return iter(ast.NewTerm(a)) case ast.String: - _, err := strconv.ParseFloat(string(a), 64) + strValue := string(a) + + if it := ast.InternedIntNumberTermFromString(strValue); it != nil { + return iter(it) + } + + trimmedVal := strings.TrimLeft(strValue, "+-") + lowerCaseVal := strings.ToLower(trimmedVal) + + if lowerCaseVal == "inf" || lowerCaseVal == "infinity" || lowerCaseVal == "nan" { + return builtins.NewOperandTypeErr(1, operands[0].Value, "valid number string") + } + + _, err := strconv.ParseFloat(strValue, 64) if err != nil { return err } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cidr.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/cidr.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go index 5b011bd16..113bd2f37 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/cidr.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/cidr.go @@ -8,9 +8,9 @@ import ( "net" "sort" - "github.com/open-policy-agent/opa/ast" cidrMerge "github.com/open-policy-agent/opa/internal/cidr/merge" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func getNetFromOperand(v ast.Value) (*net.IPNet, error) { @@ -75,7 +75,7 @@ func builtinNetCIDRIntersects(_ BuiltinContext, operands []*ast.Term, iter func( // If either net contains the others starting IP they are overlapping cidrsOverlap := cidrnetA.Contains(cidrnetB.IP) || cidrnetB.Contains(cidrnetA.IP) - return iter(ast.BooleanTerm(cidrsOverlap)) + return iter(ast.InternedBooleanTerm(cidrsOverlap)) } func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -92,7 +92,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a ip := net.ParseIP(string(bStr)) if ip != nil { - return iter(ast.BooleanTerm(cidrnetA.Contains(ip))) + return iter(ast.InternedBooleanTerm(cidrnetA.Contains(ip))) } // It wasn't an IP, try and parse it as a CIDR @@ -113,7 +113,7 @@ func builtinNetCIDRContains(_ BuiltinContext, operands []*ast.Term, iter func(*a cidrContained = cidrnetA.Contains(lastIP) } - return iter(ast.BooleanTerm(cidrContained)) + return iter(ast.InternedBooleanTerm(cidrContained)) } var errNetCIDRContainsMatchElementType = errors.New("element must be string or non-empty array") @@ -142,7 +142,7 @@ func evalNetCIDRContainsMatchesOperand(operand int, a *ast.Term, iter func(cidr, if err != nil { return fmt.Errorf("operand %v: %v", operand, err) } - if err := iter(cidr, ast.IntNumberTerm(i)); err != nil { + if err := iter(cidr, ast.InternedIntNumberTerm(i)); err != nil { return err } } @@ -219,13 +219,13 @@ func builtinNetCIDRExpand(bctx BuiltinContext, operands []*ast.Term, iter func(* func builtinNetCIDRIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { cidr, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } if _, _, err := net.ParseCIDR(string(cidr)); err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } - return iter(ast.BooleanTerm(true)) + return iter(ast.InternedBooleanTerm(true)) } type cidrBlockRange struct { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/comparison.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go similarity index 90% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/comparison.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go index 0d033d2c3..9e1585a28 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/comparison.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/comparison.go @@ -4,7 +4,7 @@ package topdown -import "github.com/open-policy-agent/opa/ast" +import "github.com/open-policy-agent/opa/v1/ast" type compareFunc func(a, b ast.Value) bool @@ -34,7 +34,7 @@ func compareEq(a, b ast.Value) bool { func builtinCompare(cmp compareFunc) BuiltinFunc { return func(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - return iter(ast.BooleanTerm(cmp(operands[0].Value, operands[1].Value))) + return iter(ast.InternedBooleanTerm(cmp(operands[0].Value, operands[1].Value))) } } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go similarity index 99% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go index 8824d19bd..233bbcad1 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/copypropagation.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/copypropagation.go @@ -8,7 +8,7 @@ import ( "fmt" "sort" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // CopyPropagator implements a simple copy propagation optimization to remove diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go similarity index 96% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go index 38ec56f31..679464250 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/copypropagation/unionfind.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/copypropagation/unionfind.go @@ -7,8 +7,8 @@ package copypropagation import ( "fmt" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/util" ) type rankFunc func(*unionFindRoot, *unionFindRoot) (*unionFindRoot, *unionFindRoot) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/crypto.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go similarity index 93% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/crypto.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go index f24432a26..ab499e3e8 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/crypto.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/crypto.go @@ -15,6 +15,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/base64" + "encoding/hex" "encoding/json" "encoding/pem" "fmt" @@ -25,9 +26,9 @@ import ( "github.com/open-policy-agent/opa/internal/jwx/jwk" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) const ( @@ -96,7 +97,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a } invalid := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewArray()), ) @@ -116,7 +117,7 @@ func builtinCryptoX509ParseAndVerifyCertificates(_ BuiltinContext, operands []*a } valid := ast.ArrayTerm( - ast.BooleanTerm(true), + ast.InternedBooleanTerm(true), ast.NewTerm(value), ) @@ -152,14 +153,12 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op return err } - invalid := ast.ArrayTerm( - ast.BooleanTerm(false), - ast.NewTerm(ast.NewArray()), - ) - certs, err := getX509CertsFromString(string(input)) if err != nil { - return iter(invalid) + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(false), + ast.NewTerm(ast.NewArray()), + )) } // Collect the cert verification options @@ -170,7 +169,10 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op verified, err := verifyX509CertificateChain(certs, verifyOpt) if err != nil { - return iter(invalid) + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(false), + ast.NewTerm(ast.NewArray()), + )) } value, err := ast.InterfaceToValue(verified) @@ -178,12 +180,10 @@ func builtinCryptoX509ParseAndVerifyCertificatesWithOptions(_ BuiltinContext, op return err } - valid := ast.ArrayTerm( - ast.BooleanTerm(true), + return iter(ast.ArrayTerm( + ast.InternedBooleanTerm(true), ast.NewTerm(value), - ) - - return iter(valid) + )) } func extractVerifyOpts(options ast.Object) (verifyOpt x509.VerifyOptions, err error) { @@ -374,7 +374,7 @@ func builtinCryptoJWKFromPrivateKey(_ BuiltinContext, operands []*ast.Term, iter } if len(rawKeys) == 0 { - return iter(ast.NullTerm()) + return iter(ast.InternedNullTerm) } key, err := jwk.New(rawKeys[0]) @@ -408,7 +408,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter } if string(input) == "" { - return iter(ast.NullTerm()) + return iter(ast.InternedNullTerm) } // get the raw private key @@ -418,7 +418,7 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter } if len(rawKeys) == 0 { - return iter(ast.NewTerm(ast.NewArray())) + return iter(emptyArr) } bs, err := json.Marshal(rawKeys) @@ -439,36 +439,43 @@ func builtinCryptoParsePrivateKeys(_ BuiltinContext, operands []*ast.Term, iter return iter(ast.NewTerm(value)) } -func hashHelper(a ast.Value, h func(ast.String) string) (ast.Value, error) { - s, err := builtins.StringOperand(a, 1) - if err != nil { - return nil, err - } - return ast.String(h(s)), nil +func toHexEncodedString(src []byte) string { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return util.ByteSliceToString(dst) } func builtinCryptoMd5(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", md5.Sum([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + md5sum := md5.Sum([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(md5sum[:]))) } func builtinCryptoSha1(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha1.Sum([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + sha1sum := sha1.Sum([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(sha1sum[:]))) } func builtinCryptoSha256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - res, err := hashHelper(operands[0].Value, func(s ast.String) string { return fmt.Sprintf("%x", sha256.Sum256([]byte(s))) }) + s, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } - return iter(ast.NewTerm(res)) + + sha256sum := sha256.Sum256([]byte(s)) + + return iter(ast.StringTerm(toHexEncodedString(sha256sum[:]))) } func hmacHelper(operands []*ast.Term, iter func(*ast.Term) error, h func() hash.Hash) error { @@ -522,7 +529,7 @@ func builtinCryptoHmacEqual(_ BuiltinContext, operands []*ast.Term, iter func(*a res := hmac.Equal([]byte(mac1), []byte(mac2)) - return iter(ast.BooleanTerm(res)) + return iter(ast.InternedBooleanTerm(res)) } func init() { @@ -725,9 +732,11 @@ func readCertFromFile(localCertFile string) ([]byte, error) { return certPEM, nil } +var beginPrefix = []byte("-----BEGIN ") + func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls.Certificate, error) { - if !strings.HasPrefix(string(certPemBlock), "-----BEGIN") { + if !bytes.HasPrefix(certPemBlock, beginPrefix) { s, err := base64.StdEncoding.DecodeString(string(certPemBlock)) if err != nil { return nil, err @@ -735,7 +744,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls. certPemBlock = s } - if !strings.HasPrefix(string(keyPemBlock), "-----BEGIN") { + if !bytes.HasPrefix(keyPemBlock, beginPrefix) { s, err := base64.StdEncoding.DecodeString(string(keyPemBlock)) if err != nil { return nil, err @@ -744,7 +753,7 @@ func getTLSx509KeyPairFromString(certPemBlock []byte, keyPemBlock []byte) (*tls. } // we assume it a DER certificate and try to convert it to a PEM. - if !bytes.HasPrefix(certPemBlock, []byte("-----BEGIN")) { + if !bytes.HasPrefix(certPemBlock, beginPrefix) { pemBlock := &pem.Block{ Type: "CERTIFICATE", diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go new file mode 100644 index 000000000..9aa7aa45c --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package topdown provides low-level query evaluation support. +// +// The topdown implementation is a modified version of the standard top-down +// evaluation algorithm used in Datalog. References and comprehensions are +// evaluated eagerly while all other terms are evaluated lazily. +package topdown diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/encoding.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/encoding.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go index f3475a60d..a27a9c245 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/encoding.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/encoding.go @@ -15,9 +15,9 @@ import ( "sigs.k8s.io/yaml" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/util" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" ) func builtinJSONMarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -144,10 +144,10 @@ func builtinJSONIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.T str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } - return iter(ast.BooleanTerm(json.Valid([]byte(str)))) + return iter(ast.InternedBooleanTerm(json.Valid([]byte(str)))) } func builtinBase64Encode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -175,11 +175,11 @@ func builtinBase64Decode(_ BuiltinContext, operands []*ast.Term, iter func(*ast. func builtinBase64IsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } _, err = base64.StdEncoding.DecodeString(string(str)) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedBooleanTerm(err == nil)) } func builtinBase64UrlEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -355,12 +355,12 @@ func builtinYAMLUnmarshal(_ BuiltinContext, operands []*ast.Term, iter func(*ast func builtinYAMLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { str, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { - return iter(ast.BooleanTerm(false)) + return iter(ast.InternedBooleanTerm(false)) } var x interface{} err = yaml.Unmarshal([]byte(str), &x) - return iter(ast.BooleanTerm(err == nil)) + return iter(ast.InternedBooleanTerm(err == nil)) } func builtinHexEncode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go new file mode 100644 index 000000000..cadd16319 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/errors.go @@ -0,0 +1,149 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "errors" + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Halt is a special error type that built-in function implementations return to indicate +// that policy evaluation should stop immediately. +type Halt struct { + Err error +} + +func (h Halt) Error() string { + return h.Err.Error() +} + +func (h Halt) Unwrap() error { return h.Err } + +// Error is the error type returned by the Eval and Query functions when +// an evaluation error occurs. +type Error struct { + Code string `json:"code"` + Message string `json:"message"` + Location *ast.Location `json:"location,omitempty"` + err error `json:"-"` +} + +const ( + + // InternalErr represents an unknown evaluation error. + InternalErr string = "eval_internal_error" + + // CancelErr indicates the evaluation process was cancelled. + CancelErr string = "eval_cancel_error" + + // ConflictErr indicates a conflict was encountered during evaluation. For + // instance, a conflict occurs if a rule produces multiple, differing values + // for the same key in an object. Conflict errors indicate the policy does + // not account for the data loaded into the policy engine. + ConflictErr string = "eval_conflict_error" + + // TypeErr indicates evaluation stopped because an expression was applied to + // a value of an inappropriate type. + TypeErr string = "eval_type_error" + + // BuiltinErr indicates a built-in function received a semantically invalid + // input or encountered some kind of runtime error, e.g., connection + // timeout, connection refused, etc. + BuiltinErr string = "eval_builtin_error" + + // WithMergeErr indicates that the real and replacement data could not be merged. + WithMergeErr string = "eval_with_merge_error" +) + +// IsError returns true if the err is an Error. +func IsError(err error) bool { + var e *Error + return errors.As(err, &e) +} + +// IsCancel returns true if err was caused by cancellation. +func IsCancel(err error) bool { + return errors.Is(err, &Error{Code: CancelErr}) +} + +// Is allows matching topdown errors using errors.Is (see IsCancel). +func (e *Error) Is(target error) bool { + var t *Error + if errors.As(target, &t) { + return (t.Code == "" || e.Code == t.Code) && + (t.Message == "" || e.Message == t.Message) && + (t.Location == nil || t.Location.Compare(e.Location) == 0) + } + return false +} + +func (e *Error) Error() string { + msg := fmt.Sprintf("%v: %v", e.Code, e.Message) + + if e.Location != nil { + msg = e.Location.String() + ": " + msg + } + + return msg +} + +func (e *Error) Wrap(err error) *Error { + e.err = err + return e +} + +func (e *Error) Unwrap() error { + return e.err +} + +func functionConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "functions must not produce multiple outputs for same inputs", + } +} + +func completeDocConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "complete rules must not produce multiple outputs", + } +} + +func objectDocKeyConflictErr(loc *ast.Location) error { + return &Error{ + Code: ConflictErr, + Location: loc, + Message: "object keys must be unique", + } +} + +func unsupportedBuiltinErr(loc *ast.Location) error { + return &Error{ + Code: InternalErr, + Location: loc, + Message: "unsupported built-in", + } +} + +func mergeConflictErr(loc *ast.Location) error { + return &Error{ + Code: WithMergeErr, + Location: loc, + Message: "real and replacement data could not be merged", + } +} + +func internalErr(loc *ast.Location, msg string) error { + return &Error{ + Code: InternalErr, + Location: loc, + Message: msg, + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/eval.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go similarity index 87% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/eval.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go index 2fcc431c8..4758759e7 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/eval.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/eval.go @@ -5,19 +5,21 @@ import ( "errors" "fmt" "io" - "sort" + "slices" "strconv" "strings" - - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/metrics" - "github.com/open-policy-agent/opa/storage" - "github.com/open-policy-agent/opa/topdown/builtins" - "github.com/open-policy-agent/opa/topdown/cache" - "github.com/open-policy-agent/opa/topdown/copypropagation" - "github.com/open-policy-agent/opa/topdown/print" - "github.com/open-policy-agent/opa/tracing" - "github.com/open-policy-agent/opa/types" + "sync" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/types" + "github.com/open-policy-agent/opa/v1/util" ) type evalIterator func(*eval) error @@ -57,59 +59,91 @@ func (ee deferredEarlyExitError) Error() string { return fmt.Sprintf("%v: deferred early exit", ee.e.query) } +// Note(æ): this struct is formatted for optimal alignment as it is big, internal and instantiated +// *very* frequently during evaluation. If you need to add fields here, please consider the alignment +// of the struct, and use something like betteralign (https://github.com/dkorunic/betteralign) if you +// need help with that. type eval struct { - ctx context.Context - metrics metrics.Metrics - seed io.Reader - time *ast.Term - queryID uint64 - queryIDFact *queryIDFactory - parent *eval - caller *eval - cancel Cancel - query ast.Body - queryCompiler ast.QueryCompiler - index int - indexing bool - earlyExit bool - bindings *bindings - store storage.Store - baseCache *baseCache - txn storage.Transaction - compiler *ast.Compiler - input *ast.Term - data *ast.Term - external *resolverTrie - targetStack *refStack - tracers []QueryTracer - traceEnabled bool - traceLastLocation *ast.Location // Last location of a trace event. - plugTraceVars bool - instr *Instrumentation - builtins map[string]*Builtin - builtinCache builtins.Cache - ndBuiltinCache builtins.NDBCache - functionMocks *functionMocksStack - virtualCache VirtualCache - comprehensionCache *comprehensionCache - interQueryBuiltinCache cache.InterQueryCache - saveSet *saveSet - saveStack *saveStack - saveSupport *saveSupport - saveNamespace *ast.Term - skipSaveNamespace bool - inliningControl *inliningControl - genvarprefix string - genvarid int - runtime *ast.Term - builtinErrors *builtinErrors - printHook print.Hook - tracingOpts tracing.Options - findOne bool - strictObjects bool + ctx context.Context + metrics metrics.Metrics + seed io.Reader + cancel Cancel + queryCompiler ast.QueryCompiler + store storage.Store + txn storage.Transaction + virtualCache VirtualCache + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + printHook print.Hook + time *ast.Term + queryIDFact *queryIDFactory + parent *eval + caller *eval + bindings *bindings + baseCache *baseCache + compiler *ast.Compiler + input *ast.Term + data *ast.Term + external *resolverTrie + targetStack *refStack + traceLastLocation *ast.Location // Last location of a trace event. + instr *Instrumentation + builtins map[string]*Builtin + builtinCache builtins.Cache + ndBuiltinCache builtins.NDBCache + functionMocks *functionMocksStack + comprehensionCache *comprehensionCache + saveSet *saveSet + saveStack *saveStack + saveSupport *saveSupport + saveNamespace *ast.Term + inliningControl *inliningControl + runtime *ast.Term + builtinErrors *builtinErrors + roundTripper CustomizeRoundTripper + genvarprefix string + query ast.Body + tracers []QueryTracer + tracingOpts tracing.Options + queryID uint64 + index int + genvarid int + indexing bool + earlyExit bool + traceEnabled bool + plugTraceVars bool + skipSaveNamespace bool + findOne bool + strictObjects bool + defined bool +} + +type evp struct { + pool sync.Pool +} + +func (ep *evp) Put(e *eval) { + ep.pool.Put(e) +} + +func (ep *evp) Get() *eval { + return ep.pool.Get().(*eval) +} + +var evalPool = evp{ + pool: sync.Pool{ + New: func() any { + return &eval{} + }, + }, } func (e *eval) Run(iter evalIterator) error { + if !e.traceEnabled { + // avoid function literal escaping to heap if we don't need the trace + return e.eval(iter) + } + e.traceEnter(e.query) return e.eval(func(e *eval) error { e.traceExit(e.query) @@ -150,25 +184,23 @@ func (e *eval) builtinFunc(name string) (*ast.Builtin, BuiltinFunc, bool) { return nil, nil, false } -func (e *eval) closure(query ast.Body) *eval { - cpy := *e +func (e *eval) closure(query ast.Body, cpy *eval) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() cpy.parent = e cpy.findOne = false - return &cpy } -func (e *eval) child(query ast.Body) *eval { - cpy := *e +func (e *eval) child(query ast.Body, cpy *eval) { + *cpy = *e cpy.index = 0 cpy.query = query cpy.queryID = cpy.queryIDFact.Next() cpy.bindings = newBindings(cpy.queryID, e.instr) cpy.parent = e cpy.findOne = false - return &cpy } func (e *eval) next(iter evalIterator) error { @@ -334,6 +366,13 @@ func (e *eval) evalExpr(iter evalIterator) error { } if e.cancel != nil && e.cancel.Cancelled() { + if e.ctx != nil && e.ctx.Err() != nil { + return &Error{ + Code: CancelErr, + Message: e.ctx.Err().Error(), + err: e.ctx.Err(), + } + } return &Error{ Code: CancelErr, Message: "caller cancelled query execution", @@ -345,9 +384,7 @@ func (e *eval) evalExpr(iter evalIterator) error { if err != nil { switch err := err.(type) { - case *deferredEarlyExitError: - return wrapErr(err) - case *earlyExitError: + case *deferredEarlyExitError, *earlyExitError: return wrapErr(err) default: return err @@ -373,46 +410,110 @@ func (e *eval) evalExpr(iter evalIterator) error { } func (e *eval) evalStep(iter evalIterator) error { - expr := e.query[e.index] if expr.Negated { return e.evalNot(iter) } - var defined bool var err error + + // NOTE(æ): the reason why there's one branch for the tracing case and one almost + // identical branch below for when tracing is disabled is that the tracing case + // allocates wildly. These allocations are cause by the "defined" boolean variable + // escaping to the heap as its value is set from inside of closures. There may very + // well be more elegant solutions to this problem, but this is one that works, and + // saves several *million* allocations for some workloads. So feel free to refactor + // this, but do make sure that the common non-tracing case doesn't pay in allocations + // for something that is only needed when tracing is enabled. + if e.traceEnabled { + var defined bool + switch terms := expr.Terms.(type) { + case []*ast.Term: + switch { + case expr.IsEquality(): + err = e.unify(terms[1], terms[2], func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + default: + err = e.evalCall(terms, func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + } + case *ast.Term: + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) + err = e.unify(terms, rterm, func() error { + if e.saveSet.Contains(rterm, e.bindings) { + return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { + return iter(e) + }) + } + if !e.bindings.Plug(rterm).Equal(ast.InternedBooleanTerm(false)) { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + } + return nil + }) + case *ast.Every: + eval := evalEvery{ + Every: terms, + e: e, + expr: expr, + } + err = eval.eval(func() error { + defined = true + err := iter(e) + e.traceRedo(expr) + return err + }) + + default: // guard-rail for adding extra (Expr).Terms types + return fmt.Errorf("got %T terms: %[1]v", terms) + } + + if err != nil { + return err + } + + if !defined { + e.traceFail(expr) + } + + return nil + } + switch terms := expr.Terms.(type) { case []*ast.Term: switch { case expr.IsEquality(): err = e.unify(terms[1], terms[2], func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: err = e.evalCall(terms, func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) } case *ast.Term: - rterm := e.generateVar(fmt.Sprintf("term_%d_%d", e.queryID, e.index)) + // generateVar inlined here to avoid extra allocations in hot path + rterm := ast.VarTerm(e.fmtVarTerm()) err = e.unify(terms, rterm, func() error { if e.saveSet.Contains(rterm, e.bindings) { return e.saveExpr(ast.NewExpr(rterm), e.bindings, func() error { return iter(e) }) } - if !e.bindings.Plug(rterm).Equal(ast.BooleanTerm(false)) { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + if !e.bindings.Plug(rterm).Equal(ast.InternedBooleanTerm(false)) { + return iter(e) } return nil }) @@ -423,25 +524,28 @@ func (e *eval) evalStep(iter evalIterator) error { expr: expr, } err = eval.eval(func() error { - defined = true - err := iter(e) - e.traceRedo(expr) - return err + return iter(e) }) default: // guard-rail for adding extra (Expr).Terms types return fmt.Errorf("got %T terms: %[1]v", terms) } - if err != nil { - return err - } + return err +} - if !defined { - e.traceFail(expr) - } +// Single-purpose fmt.Sprintf replacement for generating variable names with only +// one allocation performed instead of 4, and in 1/3 the time. +func (e *eval) fmtVarTerm() string { + buf := make([]byte, 0, len(e.genvarprefix)+util.NumDigitsUint(e.queryID)+util.NumDigitsInt(e.index)+7) - return nil + buf = append(buf, e.genvarprefix...) + buf = append(buf, "_term_"...) + buf = strconv.AppendUint(buf, e.queryID, 10) + buf = append(buf, '_') + buf = strconv.AppendInt(buf, int64(e.index), 10) + + return util.ByteSliceToString(buf) } func (e *eval) evalNot(iter evalIterator) error { @@ -452,27 +556,34 @@ func (e *eval) evalNot(iter evalIterator) error { return e.evalNotPartial(iter) } - negation := ast.NewBody(expr.Complement().NoWith()) - child := e.closure(negation) + negation := ast.NewBody(expr.ComplementNoWith()) + child := evalPool.Get() + defer evalPool.Put(child) - var defined bool - child.traceEnter(negation) + e.closure(negation, child) - err := child.eval(func(*eval) error { - child.traceExit(negation) - defined = true - child.traceRedo(negation) - return nil - }) + if e.traceEnabled { + child.traceEnter(negation) + } - if err != nil { + if err := child.eval(func(*eval) error { + if e.traceEnabled { + child.traceExit(negation) + child.traceRedo(negation) + } + child.defined = true + + return nil + }); err != nil { return err } - if !defined { + if !child.defined { return iter(e) } + child.defined = false + e.traceFail(expr) return nil } @@ -612,11 +723,14 @@ func (e *eval) evalWithPop(input, data *ast.Term) { } func (e *eval) evalNotPartial(iter evalIterator) error { - // Prepare query normally. expr := e.query[e.index] - negation := expr.Complement().NoWith() - child := e.closure(ast.NewBody(negation)) + negation := expr.ComplementNoWith() + + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(ast.NewBody(negation), child) // Unknowns is the set of variables that are marked as unknown. The variables // are namespaced with the query ID that they originate in. This ensures that @@ -709,9 +823,7 @@ func (e *eval) evalNotPartialSupport(negationID uint64, expr *ast.Expr, unknowns args = append(args, ast.NewTerm(v)) } - sort.Slice(args, func(i, j int) bool { - return args[i].Value.Compare(args[j].Value) < 0 - }) + slices.SortFunc(args, ast.TermValueCompare) if len(args) > 0 { head.Args = args @@ -769,7 +881,7 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { if ref[0].Equal(ast.DefaultRootDocument) { if mocked { f := e.compiler.TypeEnv.Get(ref).(*types.Function) - return e.evalCallValue(len(f.FuncArgs().Args), terms, mock, iter) + return e.evalCallValue(f.Arity(), terms, mock, iter) } var ir *ast.IndexResult @@ -799,11 +911,11 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { } if mocked { // value replacement of built-in call - return e.evalCallValue(len(bi.Decl.Args()), terms, mock, iter) + return e.evalCallValue(bi.Decl.Arity(), terms, mock, iter) } if e.unknown(e.query[e.index], e.bindings) { - return e.saveCall(len(bi.Decl.Args()), terms, iter) + return e.saveCall(bi.Decl.Arity(), terms, iter) } var parentID uint64 @@ -817,23 +929,25 @@ func (e *eval) evalCall(terms []*ast.Term, iter unifyIterator) error { } bctx := BuiltinContext{ - Context: e.ctx, - Metrics: e.metrics, - Seed: e.seed, - Time: e.time, - Cancel: e.cancel, - Runtime: e.runtime, - Cache: e.builtinCache, - InterQueryBuiltinCache: e.interQueryBuiltinCache, - NDBuiltinCache: e.ndBuiltinCache, - Location: e.query[e.index].Location, - QueryTracers: e.tracers, - TraceEnabled: e.traceEnabled, - QueryID: e.queryID, - ParentID: parentID, - PrintHook: e.printHook, - DistributedTracingOpts: e.tracingOpts, - Capabilities: capabilities, + Context: e.ctx, + Metrics: e.metrics, + Seed: e.seed, + Time: e.time, + Cancel: e.cancel, + Runtime: e.runtime, + Cache: e.builtinCache, + InterQueryBuiltinCache: e.interQueryBuiltinCache, + InterQueryBuiltinValueCache: e.interQueryBuiltinValueCache, + NDBuiltinCache: e.ndBuiltinCache, + Location: e.query[e.index].Location, + QueryTracers: e.tracers, + TraceEnabled: e.traceEnabled, + QueryID: e.queryID, + ParentID: parentID, + PrintHook: e.printHook, + DistributedTracingOpts: e.tracingOpts, + Capabilities: capabilities, + RoundTripper: e.roundTripper, } eval := evalBuiltin{ @@ -853,7 +967,7 @@ func (e *eval) evalCallValue(arity int, terms []*ast.Term, mock *ast.Term, iter return e.unify(terms[len(terms)-1], mock, iter) case len(terms) == arity+1: - if mock.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(mock.Value) { return iter() } return nil @@ -930,6 +1044,22 @@ func (e *eval) biunifyArraysRec(a, b *ast.Array, b1, b2 *bindings, iter unifyIte }) } +func (e *eval) biunifyTerms(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator) error { + if len(a) != len(b) { + return nil + } + return e.biunifyTermsRec(a, b, b1, b2, iter, 0) +} + +func (e *eval) biunifyTermsRec(a, b []*ast.Term, b1, b2 *bindings, iter unifyIterator, idx int) error { + if idx == len(a) { + return iter() + } + return e.biunify(a[idx], b[idx], b1, b2, func() error { + return e.biunifyTermsRec(a, b, b1, b2, iter, idx+1) + }) +} + func (e *eval) biunifyObjects(a, b ast.Object, b1, b2 *bindings, iter unifyIterator) error { if a.Len() != b.Len() { return nil @@ -1163,7 +1293,10 @@ func (e *eval) buildComprehensionCache(a *ast.Term) (*ast.Term, error) { } func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1182,7 +1315,10 @@ func (e *eval) buildComprehensionCacheArray(x *ast.ArrayComprehension, keys []*a } func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1202,7 +1338,10 @@ func (e *eval) buildComprehensionCacheSet(x *ast.SetComprehension, keys []*ast.T } func (e *eval) buildComprehensionCacheObject(x *ast.ObjectComprehension, keys []*ast.Term) (*comprehensionCacheElem, error) { - child := e.child(x.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.child(x.Body, child) node := newComprehensionCacheElem() return node, child.Run(func(child *eval) error { values := make([]*ast.Term, len(keys)) @@ -1283,7 +1422,11 @@ func (e *eval) amendComprehension(a *ast.Term, b1 *bindings) (*ast.Term, error) func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { result := ast.NewArray() - child := e.closure(x.Body) + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + err := child.Run(func(child *eval) error { result = result.Append(child.bindings.Plug(x.Term)) return nil @@ -1296,7 +1439,11 @@ func (e *eval) biunifyComprehensionArray(x *ast.ArrayComprehension, b *ast.Term, func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { result := ast.NewSet() - child := e.closure(x.Body) + child := evalPool.Get() + + e.closure(x.Body, child) + defer evalPool.Put(child) + err := child.Run(func(child *eval) error { result.Add(child.bindings.Plug(x.Term)) return nil @@ -1308,8 +1455,13 @@ func (e *eval) biunifyComprehensionSet(x *ast.SetComprehension, b *ast.Term, b1, } func (e *eval) biunifyComprehensionObject(x *ast.ObjectComprehension, b *ast.Term, b1, b2 *bindings, iter unifyIterator) error { + child := evalPool.Get() + defer evalPool.Put(child) + + e.closure(x.Body, child) + result := ast.NewObject() - child := e.closure(x.Body) + err := child.Run(func(child *eval) error { key := child.bindings.Plug(x.Key) value := child.bindings.Plug(x.Value) @@ -1446,12 +1598,22 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) return nil, nil } + resolver := resolverPool.Get().(*evalResolver) + defer func() { + resolver.e = nil + resolver.args = nil + resolverPool.Put(resolver) + }() + var result *ast.IndexResult var err error if e.indexing { - result, err = index.Lookup(&evalResolver{e: e, args: args}) + resolver.e = e + resolver.args = args + result, err = index.Lookup(resolver) } else { - result, err = index.AllRules(&evalResolver{e: e}) + resolver.e = e + result, err = index.AllRules(resolver) } if err != nil { return nil, err @@ -1459,20 +1621,27 @@ func (e *eval) getRules(ref ast.Ref, args []*ast.Term) (*ast.IndexResult, error) result.EarlyExit = result.EarlyExit && e.earlyExit - var msg strings.Builder - if len(result.Rules) == 1 { - msg.WriteString("(matched 1 rule") - } else { - msg.Grow(len("(matched NNNN rules)")) - msg.WriteString("(matched ") - msg.WriteString(strconv.Itoa(len(result.Rules))) - msg.WriteString(" rules") - } - if result.EarlyExit { - msg.WriteString(", early exit") + if e.traceEnabled { + var msg strings.Builder + if len(result.Rules) == 1 { + msg.WriteString("(matched 1 rule") + } else { + msg.Grow(len("(matched NNNN rules)")) + msg.WriteString("(matched ") + msg.WriteString(strconv.Itoa(len(result.Rules))) + msg.WriteString(" rules") + } + if result.EarlyExit { + msg.WriteString(", early exit") + } + msg.WriteRune(')') + + // Copy ref here as ref otherwise always escapes to the heap, + // whether tracing is enabled or not. + r := ref.Copy() + e.traceIndex(e.query[e.index], msg.String(), &r) } - msg.WriteRune(')') - e.traceIndex(e.query[e.index], msg.String(), &ref) + return result, err } @@ -1485,10 +1654,20 @@ type evalResolver struct { args []*ast.Term } +var ( + resolverPool = sync.Pool{ + New: func() any { + return &evalResolver{} + }, + } +) + func (e *evalResolver) Resolve(ref ast.Ref) (ast.Value, error) { e.e.instr.startTimer(evalOpResolve) - if e.e.inliningControl.Disabled(ref, true) || e.e.saveSet.Contains(ast.NewTerm(ref), nil) { + // NOTE(ae): nil check on saveSet to avoid ast.NewTerm allocation when not needed + if e.e.inliningControl.Disabled(ref, true) || (e.e.saveSet != nil && + e.e.saveSet.Contains(ast.NewTerm(ref), nil)) { e.e.instr.stopTimer(evalOpResolve) return nil, ast.UnknownValueErr{} } @@ -1652,7 +1831,13 @@ func (e *eval) resolveReadFromStorage(ref ast.Ref, a ast.Value) (ast.Value, erro } func (e *eval) generateVar(suffix string) *ast.Term { - return ast.VarTerm(fmt.Sprintf("%v_%v", e.genvarprefix, suffix)) + buf := make([]byte, 0, len(e.genvarprefix)+len(suffix)+1) + + buf = append(buf, e.genvarprefix...) + buf = append(buf, '_') + buf = append(buf, suffix...) + + return ast.VarTerm(util.ByteSliceToString(buf)) } func (e *eval) rewrittenVar(v ast.Var) (ast.Var, bool) { @@ -1679,7 +1864,7 @@ func (e *eval) getDeclArgsLen(x *ast.Expr) (int, error) { bi, _, ok := e.builtinFunc(operator.String()) if ok { - return len(bi.Decl.Args()), nil + return bi.Decl.Arity(), nil } ir, err := e.getRules(operator, nil) @@ -1714,7 +1899,7 @@ func (e *evalBuiltin) canUseNDBCache(bi *ast.Builtin) bool { return bi.Nondeterministic && e.bctx.NDBuiltinCache != nil } -func (e evalBuiltin) eval(iter unifyIterator) error { +func (e *evalBuiltin) eval(iter unifyIterator) error { operands := make([]*ast.Term, len(e.terms)) @@ -1722,7 +1907,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { operands[i] = e.e.bindings.Plug(e.terms[i]) } - numDeclArgs := len(e.bi.Decl.FuncArgs().Args) + numDeclArgs := e.bi.Decl.Arity() e.e.instr.startTimer(evalOpBuiltinCall) var err error @@ -1747,7 +1932,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: return iter() case len(operands) == numDeclArgs: - if v.Compare(ast.Boolean(false)) == 0 { + if ast.Boolean(false).Equal(v) { return nil // nothing to do } return iter() @@ -1771,7 +1956,7 @@ func (e evalBuiltin) eval(iter unifyIterator) error { case e.bi.Decl.Result() == nil: err = iter() case len(operands) == numDeclArgs: - if output.Value.Compare(ast.Boolean(false)) != 0 { + if !ast.Boolean(false).Equal(output.Value) { err = iter() } // else: nothing to do, don't iter() default: @@ -1811,9 +1996,9 @@ func (e evalBuiltin) eval(iter unifyIterator) error { type evalFunc struct { e *eval + ir *ast.IndexResult ref ast.Ref terms []*ast.Term - ir *ast.IndexResult } func (e evalFunc) eval(iter unifyIterator) error { @@ -1852,9 +2037,9 @@ func (e evalFunc) eval(iter unifyIterator) error { func (e evalFunc) evalValue(iter unifyIterator, argCount int, findOne bool) error { var cacheKey ast.Ref - var hit bool - var err error if !e.e.partial() { + var hit bool + var err error cacheKey, hit, err = e.evalCache(argCount, iter) if err != nil { return err @@ -1920,9 +2105,15 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er } else { plen = len(e.terms) } + cacheKey := make([]*ast.Term, plen) for i := 0; i < plen; i++ { - cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + if e.terms[i].IsGround() { + // Avoid expensive copying of ref if it is ground. + cacheKey[i] = e.terms[i] + } else { + cacheKey[i] = e.e.bindings.Plug(e.terms[i]) + } } cached, _ := e.e.virtualCache.Get(cacheKey) @@ -1942,8 +2133,10 @@ func (e evalFunc) evalCache(argCount int, iter unifyIterator) (ast.Ref, bool, er } func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.Ref, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.findOne = findOne args := make([]*ast.Term, len(e.terms)-1) @@ -1957,7 +2150,7 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R child.traceEnter(rule) - err := child.biunifyArrays(ast.NewArray(e.terms[1:]...), ast.NewArray(args...), e.e.bindings, child.bindings, func() error { + err := child.biunifyTerms(e.terms[1:], args, e.e.bindings, child.bindings, func() error { return child.eval(func(child *eval) error { child.traceExit(rule) @@ -1975,8 +2168,8 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R } if len(rule.Head.Args) == len(e.terms)-1 { - if result.Value.Compare(ast.Boolean(false)) == 0 { - if prev != nil && ast.Compare(prev, result) != 0 { + if ast.Boolean(false).Equal(result.Value) { + if prev != nil && !prev.Equal(result) { return functionConflictErr(rule.Location) } prev = result @@ -1990,7 +2183,7 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R // an example. if !e.e.partial() { if prev != nil { - if ast.Compare(prev, result) != 0 { + if !prev.Equal(result) { return functionConflictErr(rule.Location) } child.traceRedo(rule) @@ -2015,7 +2208,6 @@ func (e evalFunc) evalOneRule(iter unifyIterator, rule *ast.Rule, cacheKey ast.R func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error { path := e.e.namespaceRef(e.ref) - term := ast.NewTerm(path) if !e.e.saveSupport.Exists(path) { for _, rule := range e.ir.Rules { @@ -2030,12 +2222,16 @@ func (e evalFunc) partialEvalSupport(declArgsLen int, iter unifyIterator) error return nil } + term := ast.NewTerm(path) + return e.e.saveCall(declArgsLen, append([]*ast.Term{term}, e.terms[1:]...), iter) } func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -2084,13 +2280,13 @@ func (e evalFunc) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) error { type evalTree struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings node *ast.TreeNode + ref ast.Ref + plugged ast.Ref + pos int } func (e evalTree) eval(iter unifyIterator) error { @@ -2113,9 +2309,7 @@ func (e evalTree) finish(iter unifyIterator) error { // In some cases, it may not be possible to PE the ref. If the path refers // to virtual docs that PE does not support or base documents where inlining // has been disabled, then we have to save. - save := e.e.unknown(e.plugged, e.e.bindings) - - if save { + if e.e.partial() && e.e.unknown(e.plugged, e.e.bindings) { return e.e.saveUnify(ast.NewTerm(e.plugged), e.rterm, e.bindings, e.rbindings, iter) } @@ -2185,7 +2379,7 @@ func (e evalTree) enumerate(iter unifyIterator) error { switch doc := doc.(type) { case *ast.Array: for i := 0; i < doc.Len(); i++ { - k := ast.IntNumberTerm(i) + k := ast.InternedIntNumberTerm(i) err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) }) @@ -2315,12 +2509,12 @@ func (e evalTree) leaves(plugged ast.Ref, node *ast.TreeNode) (ast.Object, error type evalVirtual struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtual) eval(iter unifyIterator) error { @@ -2391,14 +2585,14 @@ func (e evalVirtual) eval(iter unifyIterator) error { type evalVirtualPartial struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings empty *ast.Term + ref ast.Ref + plugged ast.Ref + pos int } type evalVirtualPartialCacheHint struct { @@ -2457,14 +2651,16 @@ func (e evalVirtualPartial) evalEachRule(iter unifyIterator, unknown bool) error return nil } - m := maxRefLength(e.ir.Rules, len(e.ref)) - if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { - for _, rule := range e.ir.Rules { - if err := e.evalOneRulePostUnify(iter, rule); err != nil { - return err + if e.e.partial() { + m := maxRefLength(e.ir.Rules, len(e.ref)) + if e.e.unknown(e.ref[e.pos+1:m], e.bindings) { + for _, rule := range e.ir.Rules { + if err := e.evalOneRulePostUnify(iter, rule); err != nil { + return err + } } + return nil } - return nil } hint, err := e.evalCache(iter) @@ -2534,8 +2730,11 @@ func (e evalVirtualPartial) evalAllRulesNoCache(rules []*ast.Rule) (*ast.Term, e var visitedRefs []ast.Ref + child := evalPool.Get() + defer evalPool.Put(child) + for _, rule := range rules { - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) err := child.eval(func(*eval) error { child.traceExit(rule) @@ -2568,8 +2767,10 @@ func wrapInObjects(leaf *ast.Term, ref ast.Ref) *ast.Term { } func (e evalVirtualPartial) evalOneRulePreUnify(iter unifyIterator, rule *ast.Rule, result *ast.Term, unknown bool, visitedRefs *[]ast.Ref) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) var defined bool @@ -2661,7 +2862,10 @@ func (e *eval) biunifyDynamicRef(pos int, a, b ast.Ref, b1, b2 *bindings, iter u } func (e evalVirtualPartial) evalOneRulePostUnify(iter unifyIterator, rule *ast.Rule) error { - child := e.e.child(rule.Body) + child := evalPool.Get() + defer evalPool.Put(child) + + e.e.child(rule.Body, child) child.traceEnter(rule) var defined bool @@ -2745,8 +2949,10 @@ func (e evalVirtualPartial) partialEvalSupport(iter unifyIterator) error { } func (e evalVirtualPartial) partialEvalSupportRule(rule *ast.Rule, _ ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3109,13 +3315,13 @@ func (e evalVirtualPartial) reduce(rule *ast.Rule, b *bindings, result *ast.Term type evalVirtualComplete struct { e *eval - ref ast.Ref - plugged ast.Ref - pos int ir *ast.IndexResult bindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + plugged ast.Ref + pos int } func (e evalVirtualComplete) eval(iter unifyIterator) error { @@ -3224,8 +3430,10 @@ func (e evalVirtualComplete) evalValue(iter unifyIterator, findOne bool) error { } func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, prev *ast.Term, findOne bool) (*ast.Term, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.findOne = findOne child.traceEnter(rule) var result *ast.Term @@ -3260,9 +3468,11 @@ func (e evalVirtualComplete) evalValueRule(iter unifyIterator, rule *ast.Rule, p } func (e evalVirtualComplete) partialEval(iter unifyIterator) error { + child := evalPool.Get() + defer evalPool.Put(child) for _, rule := range e.ir.Rules { - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) err := child.eval(func(child *eval) error { @@ -3325,8 +3535,10 @@ func (e evalVirtualComplete) partialEvalSupport(iter unifyIterator) error { } func (e evalVirtualComplete) partialEvalSupportRule(rule *ast.Rule, path ast.Ref) (bool, error) { + child := evalPool.Get() + defer evalPool.Put(child) - child := e.e.child(rule.Body) + e.e.child(rule.Body, child) child.traceEnter(rule) e.e.saveStack.PushQuery(nil) @@ -3381,13 +3593,13 @@ func (e evalVirtualComplete) evalTerm(iter unifyIterator, term *ast.Term, termbi type evalTerm struct { e *eval - ref ast.Ref - pos int bindings *bindings term *ast.Term termbindings *bindings rterm *ast.Term rbindings *bindings + ref ast.Ref + pos int } func (e evalTerm) eval(iter unifyIterator) error { @@ -3439,32 +3651,28 @@ func (e evalTerm) enumerate(iter unifyIterator) error { switch v := e.term.Value.(type) { case *ast.Array: for i := 0; i < v.Len(); i++ { - k := ast.IntNumberTerm(i) - err := e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { + k := ast.InternedIntNumberTerm(i) + if err := handleErr(e.e.biunify(k, e.ref[e.pos], e.bindings, e.bindings, func() error { return e.next(iter, k) - }) - - if err := handleErr(err); err != nil { + })); err != nil { return err } } case ast.Object: - if err := v.Iter(func(k, _ *ast.Term) error { - err := e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { + for _, k := range v.Keys() { + if err := handleErr(e.e.biunify(k, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(k)) - }) - return handleErr(err) - }); err != nil { - return err + })); err != nil { + return err + } } case ast.Set: - if err := v.Iter(func(elem *ast.Term) error { - err := e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { + for _, elem := range v.Slice() { + if err := handleErr(e.e.biunify(elem, e.ref[e.pos], e.termbindings, e.bindings, func() error { return e.next(iter, e.termbindings.Plug(elem)) - }) - return handleErr(err) - }); err != nil { - return err + })); err != nil { + return err + } } } @@ -3567,7 +3775,11 @@ func (e evalEvery) eval(iter unifyIterator) error { ).SetLocation(e.Domain.Location), ) - domain := e.e.closure(generator) + domain := evalPool.Get() + defer evalPool.Put(domain) + + e.e.closure(generator, domain) + all := true // all generator evaluations yield one successful body evaluation domain.traceEnter(e.expr) @@ -3578,7 +3790,11 @@ func (e evalEvery) eval(iter unifyIterator) error { // This would do extra work, like iterating needlessly if domain was a large array. return nil } - body := child.closure(e.Body) + + body := evalPool.Get() + defer evalPool.Put(body) + + child.closure(e.Body, body) body.findOne = true body.traceEnter(e.Body) done := false @@ -3705,10 +3921,12 @@ func applyCopyPropagation(p *copypropagation.CopyPropagator, instr *Instrumentat return result } +func nonGroundKey(k, _ *ast.Term) bool { + return !k.IsGround() +} + func nonGroundKeys(a ast.Object) bool { - return a.Until(func(k, _ *ast.Term) bool { - return !k.IsGround() - }) + return a.Until(nonGroundKey) } func plugKeys(a ast.Object, b *bindings) ast.Object { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go new file mode 100644 index 000000000..efaf1d124 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/glob.go @@ -0,0 +1,127 @@ +package topdown + +import ( + "strings" + "sync" + + "github.com/gobwas/glob" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const globCacheMaxSize = 100 +const globInterQueryValueCacheHits = "rego_builtin_glob_interquery_value_cache_hits" + +var noDelimiters = []rune{} +var dotDelimiters = []rune{'.'} +var globCacheLock = sync.RWMutex{} +var globCache = map[string]glob.Glob{} + +func builtinGlobMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + pattern, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + var delimiters []rune + switch operands[1].Value.(type) { + case ast.Null: + delimiters = noDelimiters + case *ast.Array: + delimiters, err = builtins.RuneSliceOperand(operands[1].Value, 2) + if err != nil { + return err + } + if len(delimiters) == 0 { + delimiters = dotDelimiters + } + default: + return builtins.NewOperandTypeErr(2, operands[1].Value, "array", "null") + } + + match, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + + builder := strings.Builder{} + builder.WriteString(string(pattern)) + builder.WriteRune('-') + for _, v := range delimiters { + builder.WriteRune(v) + } + id := builder.String() + + m, err := globCompileAndMatch(bctx, id, string(pattern), string(match), delimiters) + if err != nil { + return err + } + return iter(ast.InternedBooleanTerm(m)) +} + +func globCompileAndMatch(bctx BuiltinContext, id, pattern, match string, delimiters []rune) (bool, error) { + + if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache + val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(id)) + if ok { + pat, valid := val.(glob.Glob) + if !valid { + // The cache key may exist for a different value type (eg. regex). + // In this case, we calculate the glob and return the result w/o updating the cache. + var err error + if pat, err = glob.Compile(pattern, delimiters...); err != nil { + return false, err + } + return pat.Match(match), nil + } + bctx.Metrics.Counter(globInterQueryValueCacheHits).Incr() + out := pat.Match(match) + return out, nil + } + + res, err := glob.Compile(pattern, delimiters...) + if err != nil { + return false, err + } + bctx.InterQueryBuiltinValueCache.Insert(ast.String(id), res) + return res.Match(match), nil + } + + globCacheLock.RLock() + p, ok := globCache[id] + globCacheLock.RUnlock() + if !ok { + var err error + if p, err = glob.Compile(pattern, delimiters...); err != nil { + return false, err + } + globCacheLock.Lock() + if len(globCache) >= globCacheMaxSize { + // Delete a (semi-)random key to make room for the new one. + for k := range globCache { + delete(globCache, k) + break + } + } + globCache[id] = p + globCacheLock.Unlock() + } + + return p.Match(match), nil +} + +func builtinGlobQuoteMeta(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + pattern, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + return iter(ast.StringTerm(glob.QuoteMeta(string(pattern)))) +} + +func init() { + RegisterBuiltinFunc(ast.GlobMatch.Name, builtinGlobMatch) + RegisterBuiltinFunc(ast.GlobQuoteMeta.Name, builtinGlobQuoteMeta) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go new file mode 100644 index 000000000..0ad1cfdb5 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/graphql.go @@ -0,0 +1,485 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "encoding/json" + "fmt" + "strings" + + gqlast "github.com/open-policy-agent/opa/internal/gqlparser/ast" + gqlparser "github.com/open-policy-agent/opa/internal/gqlparser/parser" + gqlvalidator "github.com/open-policy-agent/opa/internal/gqlparser/validator" + + // Side-effecting import. Triggers GraphQL library's validation rule init() functions. + _ "github.com/open-policy-agent/opa/internal/gqlparser/validator/rules" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +// Parses a GraphQL schema, and returns the GraphQL AST for the schema. +func parseSchema(schema string) (*gqlast.SchemaDocument, error) { + // NOTE(philipc): We don't include the "built-in schema defs" from the + // underlying graphql parsing library here, because those definitions + // generate enormous AST blobs. In the future, if there is demand for + // a "full-spec" version of schema ASTs, we may need to provide a + // version of this function that includes the built-in schema + // definitions. + schemaAST, err := gqlparser.ParseSchema(&gqlast.Source{Input: schema}) + if err != nil { + errorParts := strings.SplitN(err.Error(), ":", 4) + msg := strings.TrimLeft(errorParts[3], " ") + return nil, fmt.Errorf("%s in GraphQL string at location %s:%s", msg, errorParts[1], errorParts[2]) + } + return schemaAST, nil +} + +// Parses a GraphQL query, and returns the GraphQL AST for the query. +func parseQuery(query string) (*gqlast.QueryDocument, error) { + queryAST, err := gqlparser.ParseQuery(&gqlast.Source{Input: query}) + if err != nil { + errorParts := strings.SplitN(err.Error(), ":", 4) + msg := strings.TrimLeft(errorParts[3], " ") + return nil, fmt.Errorf("%s in GraphQL string at location %s:%s", msg, errorParts[1], errorParts[2]) + } + return queryAST, nil +} + +// Validates a GraphQL query against a schema, and returns an error. +// In this case, we get a wrappered error list type, and pluck out +// just the first error message in the list. +func validateQuery(schema *gqlast.Schema, query *gqlast.QueryDocument) error { + // Validate the query against the schema, erroring if there's an issue. + err := gqlvalidator.Validate(schema, query) + if err != nil { + // We use strings.TrimSuffix to remove the '.' characters that the library + // authors include on most of their validation errors. This should be safe, + // since variable names in their error messages are usually quoted, and + // this affects only the last character(s) in the string. + // NOTE(philipc): We know the error location will be in the query string, + // because schema validation always happens before this function is called. + errorParts := strings.SplitN(err.Error(), ":", 4) + msg := strings.TrimSuffix(strings.TrimLeft(errorParts[3], " "), ".\n") + return fmt.Errorf("%s in GraphQL query string at location %s:%s", msg, errorParts[1], errorParts[2]) + } + return nil +} + +func getBuiltinSchema() *gqlast.SchemaDocument { + schema, err := gqlparser.ParseSchema(gqlvalidator.Prelude) + if err != nil { + panic(fmt.Errorf("Error in gqlparser Prelude (should be impossible): %w", err)) + } + return schema +} + +// NOTE(philipc): This function expects *validated* schema documents, and will break +// if it is fed arbitrary structures. +func mergeSchemaDocuments(docA *gqlast.SchemaDocument, docB *gqlast.SchemaDocument) *gqlast.SchemaDocument { + ast := &gqlast.SchemaDocument{} + ast.Merge(docA) + ast.Merge(docB) + return ast +} + +// Converts a SchemaDocument into a gqlast.Schema object that can be used for validation. +// It merges in the builtin schema typedefs exactly as gqltop.LoadSchema did internally. +func convertSchema(schemaDoc *gqlast.SchemaDocument) (*gqlast.Schema, error) { + // Merge builtin schema + schema we were provided. + builtinsSchemaDoc := getBuiltinSchema() + mergedSchemaDoc := mergeSchemaDocuments(builtinsSchemaDoc, schemaDoc) + schema, err := gqlvalidator.ValidateSchemaDocument(mergedSchemaDoc) + if err != nil { + return nil, fmt.Errorf("Error in gqlparser SchemaDocument to Schema conversion: %w", err) + } + return schema, nil +} + +// Converts an ast.Object into a gqlast.QueryDocument object. +func objectToQueryDocument(value ast.Object) (*gqlast.QueryDocument, error) { + // Convert ast.Term to interface{} for JSON encoding below. + asJSON, err := ast.JSON(value) + if err != nil { + return nil, err + } + // Marshal to JSON. + bs, err := json.Marshal(asJSON) + if err != nil { + return nil, err + } + // Unmarshal from JSON -> gqlast.QueryDocument. + var result gqlast.QueryDocument + err = json.Unmarshal(bs, &result) + if err != nil { + return nil, err + } + return &result, nil +} + +// Converts an ast.Object into a gqlast.SchemaDocument object. +func objectToSchemaDocument(value ast.Object) (*gqlast.SchemaDocument, error) { + // Convert ast.Term to interface{} for JSON encoding below. + asJSON, err := ast.JSON(value) + if err != nil { + return nil, err + } + // Marshal to JSON. + bs, err := json.Marshal(asJSON) + if err != nil { + return nil, err + } + // Unmarshal from JSON -> gqlast.SchemaDocument. + var result gqlast.SchemaDocument + err = json.Unmarshal(bs, &result) + if err != nil { + return nil, err + } + return &result, nil +} + +// Recursively traverses an AST that has been run through InterfaceToValue, +// and prunes away the fields with null or empty values, and all `Position` +// structs. +// NOTE(philipc): We currently prune away null values to reduce the level +// of clutter in the returned AST objects. In the future, if there is demand +// for ASTs that have a more regular/fixed structure, we may need to provide +// a "raw" version of the AST, where we still prune away the `Position` +// structs, but leave in the null fields. +func pruneIrrelevantGraphQLASTNodes(value ast.Value) ast.Value { + // We iterate over the Value we've been provided, and recurse down + // in the case of complex types, such as Arrays/Objects. + // We are guaranteed to only have to deal with standard JSON types, + // so this is much less ugly than what we'd need for supporting every + // extant ast type! + switch x := value.(type) { + case *ast.Array: + result := ast.NewArray() + // Iterate over the array's elements, and do the following: + // - Drop any Nulls + // - Drop any any empty object/array value (after running the pruner) + for i := 0; i < x.Len(); i++ { + vTerm := x.Elem(i) + switch v := vTerm.Value.(type) { + case ast.Null: + continue + case *ast.Array: + // Safe, because we knew the type before going to prune it. + va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) + if va.Len() > 0 { + result = result.Append(ast.NewTerm(va)) + } + case ast.Object: + // Safe, because we knew the type before going to prune it. + vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) + if vo.Len() > 0 { + result = result.Append(ast.NewTerm(vo)) + } + default: + result = result.Append(vTerm) + } + } + return result + case ast.Object: + result := ast.NewObject() + // Iterate over our object's keys, and do the following: + // - Drop "Position". + // - Drop any key with a Null value. + // - Drop any key with an empty object/array value (after running the pruner) + keys := x.Keys() + for _, k := range keys { + // We drop the "Position" objects because we don't need the + // source-backref/location info they provide for policy rules. + // Note that keys are ast.Strings. + if ast.String("Position").Equal(k.Value) { + continue + } + vTerm := x.Get(k) + switch v := vTerm.Value.(type) { + case ast.Null: + continue + case *ast.Array: + // Safe, because we knew the type before going to prune it. + va := pruneIrrelevantGraphQLASTNodes(v).(*ast.Array) + if va.Len() > 0 { + result.Insert(k, ast.NewTerm(va)) + } + case ast.Object: + // Safe, because we knew the type before going to prune it. + vo := pruneIrrelevantGraphQLASTNodes(v).(ast.Object) + if vo.Len() > 0 { + result.Insert(k, ast.NewTerm(vo)) + } + default: + result.Insert(k, vTerm) + } + } + return result + default: + return x + } +} + +// Reports errors from parsing/validation. +func builtinGraphQLParse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var err error + + // Parse/translate query if it's a string/object. + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return builtins.NewOperandTypeErr(0, x, "string", "object") + } + if err != nil { + return err + } + + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return builtins.NewOperandTypeErr(1, x, "string", "object") + } + if err != nil { + return err + } + + // Transform the ASTs into Objects. + queryASTValue, err := ast.InterfaceToValue(queryDoc) + if err != nil { + return err + } + schemaASTValue, err := ast.InterfaceToValue(schemaDoc) + if err != nil { + return err + } + + // Validate the query against the schema, erroring if there's an issue. + schema, err := convertSchema(schemaDoc) + if err != nil { + return err + } + if err := validateQuery(schema, queryDoc); err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) + querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + + // Construct return value. + verified := ast.ArrayTerm( + ast.NewTerm(queryResult), + ast.NewTerm(querySchema), + ) + + return iter(verified) +} + +// Returns default value when errors occur. +func builtinGraphQLParseAndVerify(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var err error + + unverified := ast.ArrayTerm( + ast.InternedBooleanTerm(false), + ast.NewTerm(ast.NewObject()), + ast.NewTerm(ast.NewObject()), + ) + + // Parse/translate query if it's a string/object. + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return iter(unverified) + } + if err != nil { + return iter(unverified) + } + + // Parse/translate schema if it's a string/object. + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(unverified) + } + if err != nil { + return iter(unverified) + } + + // Transform the ASTs into Objects. + queryASTValue, err := ast.InterfaceToValue(queryDoc) + if err != nil { + return iter(unverified) + } + schemaASTValue, err := ast.InterfaceToValue(schemaDoc) + if err != nil { + return iter(unverified) + } + + // Validate the query against the schema, erroring if there's an issue. + schema, err := convertSchema(schemaDoc) + if err != nil { + return iter(unverified) + } + if err := validateQuery(schema, queryDoc); err != nil { + return iter(unverified) + } + + // Recursively remove irrelevant AST structures. + queryResult := pruneIrrelevantGraphQLASTNodes(queryASTValue.(ast.Object)) + querySchema := pruneIrrelevantGraphQLASTNodes(schemaASTValue.(ast.Object)) + + // Construct return value. + verified := ast.ArrayTerm( + ast.InternedBooleanTerm(true), + ast.NewTerm(queryResult), + ast.NewTerm(querySchema), + ) + + return iter(verified) +} + +func builtinGraphQLParseQuery(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Get the highly-nested AST struct, along with any errors generated. + query, err := parseQuery(string(raw)) + if err != nil { + return err + } + + // Transform the AST into an Object. + value, err := ast.InterfaceToValue(query) + if err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + + return iter(ast.NewTerm(result)) +} + +func builtinGraphQLParseSchema(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + raw, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Get the highly-nested AST struct, along with any errors generated. + schema, err := parseSchema(string(raw)) + if err != nil { + return err + } + + // Transform the AST into an Object. + value, err := ast.InterfaceToValue(schema) + if err != nil { + return err + } + + // Recursively remove irrelevant AST structures. + result := pruneIrrelevantGraphQLASTNodes(value.(ast.Object)) + + return iter(ast.NewTerm(result)) +} + +func builtinGraphQLIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var queryDoc *gqlast.QueryDocument + var schemaDoc *gqlast.SchemaDocument + var err error + + switch x := operands[0].Value.(type) { + case ast.String: + queryDoc, err = parseQuery(string(x)) + case ast.Object: + queryDoc, err = objectToQueryDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedBooleanTerm(false)) + } + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + switch x := operands[1].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedBooleanTerm(false)) + } + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + // Validate the query against the schema, erroring if there's an issue. + schema, err := convertSchema(schemaDoc) + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + if err := validateQuery(schema, queryDoc); err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + // If we got this far, the GraphQL query passed validation. + return iter(ast.InternedBooleanTerm(true)) +} + +func builtinGraphQLSchemaIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var schemaDoc *gqlast.SchemaDocument + var err error + + switch x := operands[0].Value.(type) { + case ast.String: + schemaDoc, err = parseSchema(string(x)) + case ast.Object: + schemaDoc, err = objectToSchemaDocument(x) + default: + // Error if wrong type. + return iter(ast.InternedBooleanTerm(false)) + } + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + // Validate the schema, this determines the result + _, err = convertSchema(schemaDoc) + return iter(ast.InternedBooleanTerm(err == nil)) +} + +func init() { + RegisterBuiltinFunc(ast.GraphQLParse.Name, builtinGraphQLParse) + RegisterBuiltinFunc(ast.GraphQLParseAndVerify.Name, builtinGraphQLParseAndVerify) + RegisterBuiltinFunc(ast.GraphQLParseQuery.Name, builtinGraphQLParseQuery) + RegisterBuiltinFunc(ast.GraphQLParseSchema.Name, builtinGraphQLParseSchema) + RegisterBuiltinFunc(ast.GraphQLIsValid.Name, builtinGraphQLIsValid) + RegisterBuiltinFunc(ast.GraphQLSchemaIsValid.Name, builtinGraphQLSchemaIsValid) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go new file mode 100644 index 000000000..71c7c7d9e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http.go @@ -0,0 +1,1639 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/open-policy-agent/opa/internal/version" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/tracing" + "github.com/open-policy-agent/opa/v1/util" +) + +type cachingMode string + +const ( + defaultHTTPRequestTimeoutEnv = "HTTP_SEND_TIMEOUT" + defaultCachingMode cachingMode = "serialized" + cachingModeDeserialized cachingMode = "deserialized" +) + +var defaultHTTPRequestTimeout = time.Second * 5 + +var allowedKeyNames = [...]string{ + "method", + "url", + "body", + "enable_redirect", + "force_json_decode", + "force_yaml_decode", + "headers", + "raw_body", + "tls_use_system_certs", + "tls_ca_cert", + "tls_ca_cert_file", + "tls_ca_cert_env_variable", + "tls_client_cert", + "tls_client_cert_file", + "tls_client_cert_env_variable", + "tls_client_key", + "tls_client_key_file", + "tls_client_key_env_variable", + "tls_insecure_skip_verify", + "tls_server_name", + "timeout", + "cache", + "force_cache", + "force_cache_duration_seconds", + "raise_error", + "caching_mode", + "max_retry_attempts", + "cache_ignored_headers", +} + +// ref: https://www.rfc-editor.org/rfc/rfc7231#section-6.1 +var cacheableHTTPStatusCodes = [...]int{ + http.StatusOK, + http.StatusNonAuthoritativeInfo, + http.StatusNoContent, + http.StatusPartialContent, + http.StatusMultipleChoices, + http.StatusMovedPermanently, + http.StatusNotFound, + http.StatusMethodNotAllowed, + http.StatusGone, + http.StatusRequestURITooLong, + http.StatusNotImplemented, +} + +var ( + codeTerm = ast.StringTerm("code") + messageTerm = ast.StringTerm("message") + statusCodeTerm = ast.StringTerm("status_code") + errorTerm = ast.StringTerm("error") + methodTerm = ast.StringTerm("method") + urlTerm = ast.StringTerm("url") + + httpSendNetworkErrTerm = ast.StringTerm(HTTPSendNetworkErr) + httpSendInternalErrTerm = ast.StringTerm(HTTPSendInternalErr) +) + +var ( + allowedKeys = ast.NewSet() + keyCache = make(map[string]*ast.Term, len(allowedKeyNames)) + cacheableCodes = ast.NewSet() + requiredKeys = ast.NewSet(methodTerm, urlTerm) + httpSendLatencyMetricKey = "rego_builtin_http_send" + httpSendInterQueryCacheHits = httpSendLatencyMetricKey + "_interquery_cache_hits" +) + +type httpSendKey string + +// CustomizeRoundTripper allows customizing an existing http.Transport, +// to the returned value, which could be the same Transport or a new one. +type CustomizeRoundTripper func(*http.Transport) http.RoundTripper + +const ( + // httpSendBuiltinCacheKey is the key in the builtin context cache that + // points to the http.send() specific cache resides at. + httpSendBuiltinCacheKey httpSendKey = "HTTP_SEND_CACHE_KEY" + + // HTTPSendInternalErr represents a runtime evaluation error. + HTTPSendInternalErr string = "eval_http_send_internal_error" + + // HTTPSendNetworkErr represents a network error. + HTTPSendNetworkErr string = "eval_http_send_network_error" + + // minRetryDelay is amount of time to backoff after the first failure. + minRetryDelay = time.Millisecond * 100 + + // maxRetryDelay is the upper bound of backoff delay. + maxRetryDelay = time.Second * 60 +) + +func builtinHTTPSend(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + obj, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) + } + + raiseError, err := getRaiseErrorValue(obj) + if err != nil { + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) + } + + req, err := validateHTTPRequestOperand(operands[0], 1) + if err != nil { + if raiseError { + return handleHTTPSendErr(bctx, err) + } + + return iter(generateRaiseErrorResult(handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err))) + } + + result, err := getHTTPResponse(bctx, req) + if err != nil { + if raiseError { + return handleHTTPSendErr(bctx, err) + } + + result = generateRaiseErrorResult(err) + } + return iter(result) +} + +func generateRaiseErrorResult(err error) *ast.Term { + var errObj ast.Object + switch err.(type) { + case *url.Error: + errObj = ast.NewObject( + ast.Item(codeTerm, httpSendNetworkErrTerm), + ast.Item(messageTerm, ast.StringTerm(err.Error())), + ) + default: + errObj = ast.NewObject( + ast.Item(codeTerm, httpSendInternalErrTerm), + ast.Item(messageTerm, ast.StringTerm(err.Error())), + ) + } + + return ast.NewTerm(ast.NewObject( + ast.Item(statusCodeTerm, ast.InternedIntNumberTerm(0)), + ast.Item(errorTerm, ast.NewTerm(errObj)), + )) +} + +func getHTTPResponse(bctx BuiltinContext, req ast.Object) (*ast.Term, error) { + + bctx.Metrics.Timer(httpSendLatencyMetricKey).Start() + defer bctx.Metrics.Timer(httpSendLatencyMetricKey).Stop() + + key, err := getKeyFromRequest(req) + if err != nil { + return nil, err + } + + reqExecutor, err := newHTTPRequestExecutor(bctx, req, key) + if err != nil { + return nil, err + } + // Check if cache already has a response for this query + // set headers to exclude cache_ignored_headers + resp, err := reqExecutor.CheckCache() + if err != nil { + return nil, err + } + + if resp == nil { + httpResp, err := reqExecutor.ExecuteHTTPRequest() + if err != nil { + reqExecutor.InsertErrorIntoCache(err) + return nil, err + } + defer util.Close(httpResp) + // Add result to intra/inter-query cache. + resp, err = reqExecutor.InsertIntoCache(httpResp) + if err != nil { + return nil, err + } + } + + return ast.NewTerm(resp), nil +} + +// getKeyFromRequest returns a key to be used for caching HTTP responses +// deletes headers from request object mentioned in cache_ignored_headers +func getKeyFromRequest(req ast.Object) (ast.Object, error) { + // deep copy so changes to key do not reflect in the request object + key := req.Copy() + cacheIgnoredHeadersTerm := req.Get(keyCache["cache_ignored_headers"]) + allHeadersTerm := req.Get(ast.StringTerm("headers")) + // skip because no headers to delete + if cacheIgnoredHeadersTerm == nil || allHeadersTerm == nil { + // need to explicitly set cache_ignored_headers to null + // equivalent requests might have different sets of exclusion lists + key.Insert(ast.StringTerm("cache_ignored_headers"), ast.InternedNullTerm) + return key, nil + } + var cacheIgnoredHeaders []string + err := ast.As(cacheIgnoredHeadersTerm.Value, &cacheIgnoredHeaders) + if err != nil { + return nil, err + } + var allHeaders map[string]interface{} + err = ast.As(allHeadersTerm.Value, &allHeaders) + if err != nil { + return nil, err + } + for _, header := range cacheIgnoredHeaders { + delete(allHeaders, header) + } + val, err := ast.InterfaceToValue(allHeaders) + if err != nil { + return nil, err + } + key.Insert(keyCache["headers"], ast.NewTerm(val)) + // remove cache_ignored_headers key + key.Insert(keyCache["cache_ignored_headers"], ast.InternedNullTerm) + return key, nil +} + +func init() { + createKeys() + createCacheableHTTPStatusCodes() + initDefaults() + RegisterBuiltinFunc(ast.HTTPSend.Name, builtinHTTPSend) +} + +func handleHTTPSendErr(bctx BuiltinContext, err error) error { + // Return HTTP client timeout errors in a generic error message to avoid confusion about what happened. + // Do not do this if the builtin context was cancelled and is what caused the request to stop. + if urlErr, ok := err.(*url.Error); ok && urlErr.Timeout() && bctx.Context.Err() == nil { + err = fmt.Errorf("%s %s: request timed out", urlErr.Op, urlErr.URL) + } + if err := bctx.Context.Err(); err != nil { + return Halt{ + Err: &Error{ + Code: CancelErr, + Message: fmt.Sprintf("http.send: timed out (%s)", err.Error()), + }, + } + } + return handleBuiltinErr(ast.HTTPSend.Name, bctx.Location, err) +} + +func initDefaults() { + timeoutDuration := os.Getenv(defaultHTTPRequestTimeoutEnv) + if timeoutDuration != "" { + var err error + defaultHTTPRequestTimeout, err = time.ParseDuration(timeoutDuration) + if err != nil { + // If it is set to something not valid don't let the process continue in a state + // that will almost definitely give unexpected results by having it set at 0 + // which means no timeout.. + // This environment variable isn't considered part of the public API. + // TODO(patrick-east): Remove the environment variable + panic(fmt.Sprintf("invalid value for HTTP_SEND_TIMEOUT: %s", err)) + } + } +} + +func validateHTTPRequestOperand(term *ast.Term, pos int) (ast.Object, error) { + + obj, err := builtins.ObjectOperand(term.Value, pos) + if err != nil { + return nil, err + } + + requestKeys := ast.NewSet(obj.Keys()...) + + invalidKeys := requestKeys.Diff(allowedKeys) + if invalidKeys.Len() != 0 { + return nil, builtins.NewOperandErr(pos, "invalid request parameters(s): %v", invalidKeys) + } + + missingKeys := requiredKeys.Diff(requestKeys) + if missingKeys.Len() != 0 { + return nil, builtins.NewOperandErr(pos, "missing required request parameters(s): %v", missingKeys) + } + + return obj, nil + +} + +// canonicalizeHeaders returns a copy of the headers where the keys are in +// canonical HTTP form. +func canonicalizeHeaders(headers map[string]interface{}) map[string]interface{} { + canonicalized := map[string]interface{}{} + + for k, v := range headers { + canonicalized[http.CanonicalHeaderKey(k)] = v + } + + return canonicalized +} + +// useSocket examines the url for "unix://" and returns a *http.Transport with +// a DialContext that opens a socket (specified in the http call). +// The url is expected to contain socket=/path/to/socket (url encoded) +// Ex. "unix://localhost/end/point?socket=%2Ftmp%2Fhttp.sock" +func useSocket(rawURL string, tlsConfig *tls.Config) (bool, string, *http.Transport) { + u, err := url.Parse(rawURL) + if err != nil { + return false, "", nil + } + + if u.Scheme != "unix" || u.RawQuery == "" { + return false, rawURL, nil + } + + v, err := url.ParseQuery(u.RawQuery) + if err != nil { + return false, rawURL, nil + } + + // Rewrite URL targeting the UNIX domain socket. + u.Scheme = "http" + + // Extract the path to the socket. + // Only retrieve the first value. Subsequent values are ignored and removed + // to prevent HTTP parameter pollution. + socket := v.Get("socket") + v.Del("socket") + u.RawQuery = v.Encode() + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + return http.DefaultTransport.(*http.Transport).DialContext(ctx, "unix", socket) + } + tr.TLSClientConfig = tlsConfig + tr.DisableKeepAlives = true + + return true, u.String(), tr +} + +func verifyHost(bctx BuiltinContext, host string) error { + if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { + return nil + } + + for _, allowed := range bctx.Capabilities.AllowNet { + if allowed == host { + return nil + } + } + + return fmt.Errorf("unallowed host: %s", host) +} + +func verifyURLHost(bctx BuiltinContext, unverifiedURL string) error { + // Eager return to avoid unnecessary URL parsing + if bctx.Capabilities == nil || bctx.Capabilities.AllowNet == nil { + return nil + } + + parsedURL, err := url.Parse(unverifiedURL) + if err != nil { + return err + } + + host := strings.Split(parsedURL.Host, ":")[0] + + return verifyHost(bctx, host) +} + +func createHTTPRequest(bctx BuiltinContext, obj ast.Object) (*http.Request, *http.Client, error) { + var ( + url, method string + // Additional CA certificates loading options. + tlsCaCert []byte + tlsCaCertEnvVar, tlsCaCertFile string + // Client TLS certificate and key options. Each input source + // comes in a matched pair. + tlsClientCert, tlsClientKey []byte + tlsClientCertEnvVar, tlsClientKeyEnvVar string + tlsClientCertFile, tlsClientKeyFile, tlsServerName string + + body, rawBody *bytes.Buffer + enableRedirect, tlsInsecureSkipVerify bool + tlsUseSystemCerts *bool + tlsConfig tls.Config + customHeaders map[string]interface{} + ) + + timeout := defaultHTTPRequestTimeout + + for _, val := range obj.Keys() { + key, err := ast.JSON(val.Value) + if err != nil { + return nil, nil, err + } + + key = key.(string) + + var strVal string + + if s, ok := obj.Get(val).Value.(ast.String); ok { + strVal = strings.Trim(string(s), "\"") + } else { + // Most parameters are strings, so consolidate the type checking. + switch key { + case "method", + "url", + "raw_body", + "tls_ca_cert", + "tls_ca_cert_file", + "tls_ca_cert_env_variable", + "tls_client_cert", + "tls_client_cert_file", + "tls_client_cert_env_variable", + "tls_client_key", + "tls_client_key_file", + "tls_client_key_env_variable", + "tls_server_name": + return nil, nil, fmt.Errorf("%q must be a string", key) + } + } + + switch key { + case "method": + method = strings.ToUpper(strVal) + case "url": + err := verifyURLHost(bctx, strVal) + if err != nil { + return nil, nil, err + } + url = strVal + case "enable_redirect": + enableRedirect, err = strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + case "body": + bodyVal := obj.Get(val).Value + bodyValInterface, err := ast.JSON(bodyVal) + if err != nil { + return nil, nil, err + } + + bodyValBytes, err := json.Marshal(bodyValInterface) + if err != nil { + return nil, nil, err + } + body = bytes.NewBuffer(bodyValBytes) + case "raw_body": + rawBody = bytes.NewBufferString(strVal) + case "tls_use_system_certs": + tempTLSUseSystemCerts, err := strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + tlsUseSystemCerts = &tempTLSUseSystemCerts + case "tls_ca_cert": + tlsCaCert = []byte(strVal) + case "tls_ca_cert_file": + tlsCaCertFile = strVal + case "tls_ca_cert_env_variable": + tlsCaCertEnvVar = strVal + case "tls_client_cert": + tlsClientCert = []byte(strVal) + case "tls_client_cert_file": + tlsClientCertFile = strVal + case "tls_client_cert_env_variable": + tlsClientCertEnvVar = strVal + case "tls_client_key": + tlsClientKey = []byte(strVal) + case "tls_client_key_file": + tlsClientKeyFile = strVal + case "tls_client_key_env_variable": + tlsClientKeyEnvVar = strVal + case "tls_server_name": + tlsServerName = strVal + case "headers": + headersVal := obj.Get(val).Value + headersValInterface, err := ast.JSON(headersVal) + if err != nil { + return nil, nil, err + } + var ok bool + customHeaders, ok = headersValInterface.(map[string]interface{}) + if !ok { + return nil, nil, fmt.Errorf("invalid type for headers key") + } + case "tls_insecure_skip_verify": + tlsInsecureSkipVerify, err = strconv.ParseBool(obj.Get(val).String()) + if err != nil { + return nil, nil, err + } + case "timeout": + timeout, err = parseTimeout(obj.Get(val).Value) + if err != nil { + return nil, nil, err + } + case "cache", "caching_mode", + "force_cache", "force_cache_duration_seconds", + "force_json_decode", "force_yaml_decode", + "raise_error", "max_retry_attempts", "cache_ignored_headers": // no-op + default: + return nil, nil, fmt.Errorf("invalid parameter %q", key) + } + } + + isTLS := false + client := &http.Client{ + Timeout: timeout, + CheckRedirect: func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + if tlsInsecureSkipVerify { + isTLS = true + tlsConfig.InsecureSkipVerify = tlsInsecureSkipVerify + } + + if len(tlsClientCert) > 0 && len(tlsClientKey) > 0 { + cert, err := tls.X509KeyPair(tlsClientCert, tlsClientKey) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + if tlsClientCertFile != "" && tlsClientKeyFile != "" { + cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + if tlsClientCertEnvVar != "" && tlsClientKeyEnvVar != "" { + cert, err := tls.X509KeyPair( + []byte(os.Getenv(tlsClientCertEnvVar)), + []byte(os.Getenv(tlsClientKeyEnvVar))) + if err != nil { + return nil, nil, fmt.Errorf("cannot extract public/private key pair from envvars %q, %q: %w", + tlsClientCertEnvVar, tlsClientKeyEnvVar, err) + } + + isTLS = true + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + + // Use system certs if no CA cert is provided + // or system certs flag is not set + if len(tlsCaCert) == 0 && tlsCaCertFile == "" && tlsCaCertEnvVar == "" && tlsUseSystemCerts == nil { + trueValue := true + tlsUseSystemCerts = &trueValue + } + + // Check the system certificates config first so that we + // load additional certificated into the correct pool. + if tlsUseSystemCerts != nil && *tlsUseSystemCerts && runtime.GOOS != "windows" { + pool, err := x509.SystemCertPool() + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if len(tlsCaCert) != 0 { + tlsCaCert = bytes.Replace(tlsCaCert, []byte("\\n"), []byte("\n"), -1) + pool, err := addCACertsFromBytes(tlsConfig.RootCAs, tlsCaCert) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if tlsCaCertFile != "" { + pool, err := addCACertsFromFile(tlsConfig.RootCAs, tlsCaCertFile) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + if tlsCaCertEnvVar != "" { + pool, err := addCACertsFromEnv(tlsConfig.RootCAs, tlsCaCertEnvVar) + if err != nil { + return nil, nil, err + } + + isTLS = true + tlsConfig.RootCAs = pool + } + + var transport *http.Transport + if isTLS { + if ok, parsedURL, tr := useSocket(url, &tlsConfig); ok { + transport = tr + url = parsedURL + } else { + transport = http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tlsConfig + transport.DisableKeepAlives = true + } + } else { + if ok, parsedURL, tr := useSocket(url, nil); ok { + transport = tr + url = parsedURL + } + } + + if bctx.RoundTripper != nil { + client.Transport = bctx.RoundTripper(transport) + } else if transport != nil { + client.Transport = transport + } + + // check if redirects are enabled + if enableRedirect { + client.CheckRedirect = func(req *http.Request, _ []*http.Request) error { + return verifyURLHost(bctx, req.URL.String()) + } + } + + if rawBody != nil { + body = rawBody + } else if body == nil { + body = bytes.NewBufferString("") + } + + // create the http request, use the builtin context's context to ensure + // the request is cancelled if evaluation is cancelled. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, nil, err + } + + req = req.WithContext(bctx.Context) + + // Add custom headers + if len(customHeaders) != 0 { + customHeaders = canonicalizeHeaders(customHeaders) + + for k, v := range customHeaders { + header, ok := v.(string) + if !ok { + return nil, nil, fmt.Errorf("invalid type for headers value %q", v) + } + + req.Header.Add(k, header) + } + + // Don't overwrite or append to one that was set in the custom headers + if _, hasUA := customHeaders["User-Agent"]; !hasUA { + req.Header.Add("User-Agent", version.UserAgent) + } + + // If the caller specifies the Host header, use it for the HTTP + // request host and the TLS server name. + if host, hasHost := customHeaders["Host"]; hasHost { + host := host.(string) // We already checked that it's a string. + req.Host = host + + // Only default the ServerName if the caller has + // specified the host. If we don't specify anything, + // Go will default to the target hostname. This name + // is not the same as the default that Go populates + // `req.Host` with, which is why we don't just set + // this unconditionally. + tlsConfig.ServerName = host + } + } + + if tlsServerName != "" { + tlsConfig.ServerName = tlsServerName + } + + if len(bctx.DistributedTracingOpts) > 0 { + client.Transport = tracing.NewTransport(client.Transport, bctx.DistributedTracingOpts) + } + + return req, client, nil +} + +func executeHTTPRequest(req *http.Request, client *http.Client, inputReqObj ast.Object) (*http.Response, error) { + var err error + var retry int + + retry, err = getNumberValFromReqObj(inputReqObj, keyCache["max_retry_attempts"]) + if err != nil { + return nil, err + } + + for i := 0; true; i++ { + + var resp *http.Response + resp, err = client.Do(req) + if err == nil { + return resp, nil + } + + // final attempt + if i == retry { + break + } + + if err == context.Canceled { + return nil, err + } + + delay := util.DefaultBackoff(float64(minRetryDelay), float64(maxRetryDelay), i) + timer, timerCancel := util.TimerWithCancel(delay) + select { + case <-timer.C: + case <-req.Context().Done(): + timerCancel() // explicitly cancel the timer. + return nil, context.Canceled + } + } + return nil, err +} + +func isContentType(header http.Header, typ ...string) bool { + for _, t := range typ { + if strings.Contains(header.Get("Content-Type"), t) { + return true + } + } + return false +} + +type httpSendCacheEntry struct { + response *ast.Value + error error +} + +// The httpSendCache is used for intra-query caching of http.send results. +type httpSendCache struct { + entries *util.HashMap +} + +func newHTTPSendCache() *httpSendCache { + return &httpSendCache{ + entries: util.NewHashMap(valueEq, valueHash), + } +} + +func valueHash(v util.T) int { + return ast.StringTerm(v.(ast.Value).String()).Hash() +} + +func valueEq(a, b util.T) bool { + av := a.(ast.Value) + bv := b.(ast.Value) + return av.String() == bv.String() +} + +func (cache *httpSendCache) get(k ast.Value) *httpSendCacheEntry { + if v, ok := cache.entries.Get(k); ok { + v := v.(httpSendCacheEntry) + return &v + } + return nil +} + +func (cache *httpSendCache) putResponse(k ast.Value, v *ast.Value) { + cache.entries.Put(k, httpSendCacheEntry{response: v}) +} + +func (cache *httpSendCache) putError(k ast.Value, v error) { + cache.entries.Put(k, httpSendCacheEntry{error: v}) +} + +// In the BuiltinContext cache we only store a single entry that points to +// our ValueMap which is the "real" http.send() cache. +func getHTTPSendCache(bctx BuiltinContext) *httpSendCache { + raw, ok := bctx.Cache.Get(httpSendBuiltinCacheKey) + if !ok { + // Initialize if it isn't there + c := newHTTPSendCache() + bctx.Cache.Put(httpSendBuiltinCacheKey, c) + return c + } + + c, ok := raw.(*httpSendCache) + if !ok { + return nil + } + return c +} + +// checkHTTPSendCache checks for the given key's value in the cache +func checkHTTPSendCache(bctx BuiltinContext, key ast.Object) (ast.Value, error) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + return nil, nil + } + + v := requestCache.get(key) + if v != nil { + if v.error != nil { + return nil, v.error + } + if v.response != nil { + return *v.response, nil + } + // This should never happen + } + + return nil, nil +} + +func insertIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, value ast.Value) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + // Should never happen.. if it does just skip caching the value + // FIXME: return error instead, to prevent inconsistencies? + return + } + requestCache.putResponse(key, &value) +} + +func insertErrorIntoHTTPSendCache(bctx BuiltinContext, key ast.Object, err error) { + requestCache := getHTTPSendCache(bctx) + if requestCache == nil { + // Should never happen.. if it does just skip caching the value + // FIXME: return error instead, to prevent inconsistencies? + return + } + requestCache.putError(key, err) +} + +// checkHTTPSendInterQueryCache checks for the given key's value in the inter-query cache +func (c *interQueryCache) checkHTTPSendInterQueryCache() (ast.Value, error) { + requestCache := c.bctx.InterQueryBuiltinCache + + cachedValue, found := requestCache.Get(c.key) + if !found { + return nil, nil + } + + value, cerr := requestCache.Clone(cachedValue) + if cerr != nil { + return nil, handleHTTPSendErr(c.bctx, cerr) + } + + c.bctx.Metrics.Counter(httpSendInterQueryCacheHits).Incr() + var cachedRespData *interQueryCacheData + + switch v := value.(type) { + case *interQueryCacheValue: + var err error + cachedRespData, err = v.copyCacheData() + if err != nil { + return nil, err + } + case *interQueryCacheData: + cachedRespData = v + default: + return nil, nil + } + + if getCurrentTime(c.bctx).Before(cachedRespData.ExpiresAt) { + return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) + } + + var err error + c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.key) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + headers := parseResponseHeaders(cachedRespData.Headers) + + // check with the server if the stale response is still up-to-date. + // If server returns a new response (ie. status_code=200), update the cache with the new response + // If server returns an unmodified response (ie. status_code=304), update the headers for the existing response + result, modified, err := revalidateCachedResponse(c.httpReq, c.httpClient, c.key, headers) + requestCache.Delete(c.key) + if err != nil || result == nil { + return nil, err + } + + defer result.Body.Close() + + if !modified { + // update the headers in the cached response with their corresponding values from the 304 (Not Modified) response + for headerName, values := range result.Header { + cachedRespData.Headers.Del(headerName) + for _, v := range values { + cachedRespData.Headers.Add(headerName, v) + } + } + + if forceCaching(c.forceCacheParams) { + createdAt := getCurrentTime(c.bctx) + cachedRespData.ExpiresAt = createdAt.Add(time.Second * time.Duration(c.forceCacheParams.forceCacheDurationSeconds)) + } else { + expiresAt, err := expiryFromHeaders(result.Header) + if err != nil { + return nil, err + } + cachedRespData.ExpiresAt = expiresAt + } + + cachingMode, err := getCachingMode(c.key) + if err != nil { + return nil, err + } + + var pcv cache.InterQueryCacheValue + + if cachingMode == defaultCachingMode { + pcv, err = cachedRespData.toCacheValue() + if err != nil { + return nil, err + } + } else { + pcv = cachedRespData + } + + c.bctx.InterQueryBuiltinCache.InsertWithExpiry(c.key, pcv, cachedRespData.ExpiresAt) + + return cachedRespData.formatToAST(c.forceJSONDecode, c.forceYAMLDecode) + } + + newValue, respBody, err := formatHTTPResponseToAST(result, c.forceJSONDecode, c.forceYAMLDecode) + if err != nil { + return nil, err + } + + if err := insertIntoHTTPSendInterQueryCache(c.bctx, c.key, result, respBody, c.forceCacheParams); err != nil { + return nil, err + } + + return newValue, nil +} + +// insertIntoHTTPSendInterQueryCache inserts given key and value in the inter-query cache +func insertIntoHTTPSendInterQueryCache(bctx BuiltinContext, key ast.Value, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) error { + if resp == nil || (!forceCaching(cacheParams) && !canStore(resp.Header)) || !cacheableCodes.Contains(ast.InternedIntNumberTerm(resp.StatusCode)) { + return nil + } + + requestCache := bctx.InterQueryBuiltinCache + + obj, ok := key.(ast.Object) + if !ok { + return fmt.Errorf("interface conversion error") + } + + cachingMode, err := getCachingMode(obj) + if err != nil { + return err + } + + var pcv cache.InterQueryCacheValue + var pcvData *interQueryCacheData + if cachingMode == defaultCachingMode { + pcv, pcvData, err = newInterQueryCacheValue(bctx, resp, respBody, cacheParams) + } else { + pcvData, err = newInterQueryCacheData(bctx, resp, respBody, cacheParams) + pcv = pcvData + } + + if err != nil { + return err + } + + requestCache.InsertWithExpiry(key, pcv, pcvData.ExpiresAt) + return nil +} + +func createKeys() { + for _, element := range allowedKeyNames { + term := ast.StringTerm(element) + + allowedKeys.Add(term) + keyCache[element] = term + } +} + +func createCacheableHTTPStatusCodes() { + for _, element := range cacheableHTTPStatusCodes { + cacheableCodes.Add(ast.InternedIntNumberTerm(element)) + } +} + +func parseTimeout(timeoutVal ast.Value) (time.Duration, error) { + var timeout time.Duration + switch t := timeoutVal.(type) { + case ast.Number: + timeoutInt, ok := t.Int64() + if !ok { + return timeout, fmt.Errorf("invalid timeout number value %v, must be int64", timeoutVal) + } + return time.Duration(timeoutInt), nil + case ast.String: + // Support strings without a unit, treat them the same as just a number value (ns) + var err error + timeoutInt, err := strconv.ParseInt(string(t), 10, 64) + if err == nil { + return time.Duration(timeoutInt), nil + } + + // Try parsing it as a duration (requires a supported units suffix) + timeout, err = time.ParseDuration(string(t)) + if err != nil { + return timeout, fmt.Errorf("invalid timeout value %v: %s", timeoutVal, err) + } + return timeout, nil + default: + return timeout, builtins.NewOperandErr(1, "'timeout' must be one of {string, number} but got %s", ast.ValueName(t)) + } +} + +func getBoolValFromReqObj(req ast.Object, key *ast.Term) (bool, error) { + var b ast.Boolean + var ok bool + if v := req.Get(key); v != nil { + if b, ok = v.Value.(ast.Boolean); !ok { + return false, fmt.Errorf("invalid value for %v field", key.String()) + } + } + return bool(b), nil +} + +func getNumberValFromReqObj(req ast.Object, key *ast.Term) (int, error) { + term := req.Get(key) + if term == nil { + return 0, nil + } + + if t, ok := term.Value.(ast.Number); ok { + num, ok := t.Int() + if !ok || num < 0 { + return 0, fmt.Errorf("invalid value %v for field %v", t.String(), key.String()) + } + return num, nil + } + + return 0, fmt.Errorf("invalid value %v for field %v", term.String(), key.String()) +} + +func getCachingMode(req ast.Object) (cachingMode, error) { + key := keyCache["caching_mode"] + var s ast.String + var ok bool + if v := req.Get(key); v != nil { + if s, ok = v.Value.(ast.String); !ok { + return "", fmt.Errorf("invalid value for %v field", key.String()) + } + + switch cachingMode(s) { + case defaultCachingMode, cachingModeDeserialized: + return cachingMode(s), nil + default: + return "", fmt.Errorf("invalid value specified for %v field: %v", key.String(), string(s)) + } + } + return defaultCachingMode, nil +} + +type interQueryCacheValue struct { + Data []byte +} + +func newInterQueryCacheValue(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheValue, *interQueryCacheData, error) { + data, err := newInterQueryCacheData(bctx, resp, respBody, cacheParams) + if err != nil { + return nil, nil, err + } + + b, err := json.Marshal(data) + if err != nil { + return nil, nil, err + } + return &interQueryCacheValue{Data: b}, data, nil +} + +func (cb interQueryCacheValue) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(cb.Data)) + copy(dup, cb.Data) + return &interQueryCacheValue{Data: dup}, nil +} + +func (cb interQueryCacheValue) SizeInBytes() int64 { + return int64(len(cb.Data)) +} + +func (cb *interQueryCacheValue) copyCacheData() (*interQueryCacheData, error) { + var res interQueryCacheData + err := util.UnmarshalJSON(cb.Data, &res) + if err != nil { + return nil, err + } + return &res, nil +} + +type interQueryCacheData struct { + RespBody []byte + Status string + StatusCode int + Headers http.Header + ExpiresAt time.Time +} + +func forceCaching(cacheParams *forceCacheParams) bool { + return cacheParams != nil && cacheParams.forceCacheDurationSeconds > 0 +} + +func expiryFromHeaders(headers http.Header) (time.Time, error) { + var expiresAt time.Time + maxAge, err := parseMaxAgeCacheDirective(parseCacheControlHeader(headers)) + if err != nil { + return time.Time{}, err + } + if maxAge != -1 { + createdAt, err := getResponseHeaderDate(headers) + if err != nil { + return time.Time{}, err + } + expiresAt = createdAt.Add(time.Second * time.Duration(maxAge)) + } else { + expiresAt = getResponseHeaderExpires(headers) + } + return expiresAt, nil +} + +func newInterQueryCacheData(bctx BuiltinContext, resp *http.Response, respBody []byte, cacheParams *forceCacheParams) (*interQueryCacheData, error) { + var expiresAt time.Time + + if forceCaching(cacheParams) { + createdAt := getCurrentTime(bctx) + expiresAt = createdAt.Add(time.Second * time.Duration(cacheParams.forceCacheDurationSeconds)) + } else { + var err error + expiresAt, err = expiryFromHeaders(resp.Header) + if err != nil { + return nil, err + } + } + + cv := interQueryCacheData{ + ExpiresAt: expiresAt, + RespBody: respBody, + Status: resp.Status, + StatusCode: resp.StatusCode, + Headers: resp.Header} + + return &cv, nil +} + +func (c *interQueryCacheData) formatToAST(forceJSONDecode, forceYAMLDecode bool) (ast.Value, error) { + return prepareASTResult(c.Headers, forceJSONDecode, forceYAMLDecode, c.RespBody, c.Status, c.StatusCode) +} + +func (c *interQueryCacheData) toCacheValue() (*interQueryCacheValue, error) { + b, err := json.Marshal(c) + if err != nil { + return nil, err + } + return &interQueryCacheValue{Data: b}, nil +} + +func (c *interQueryCacheData) SizeInBytes() int64 { + return 0 +} + +func (c *interQueryCacheData) Clone() (cache.InterQueryCacheValue, error) { + dup := make([]byte, len(c.RespBody)) + copy(dup, c.RespBody) + + return &interQueryCacheData{ + ExpiresAt: c.ExpiresAt, + RespBody: dup, + Status: c.Status, + StatusCode: c.StatusCode, + Headers: c.Headers.Clone()}, nil +} + +type responseHeaders struct { + etag string // identifier for a specific version of the response + lastModified string // date and time response was last modified as per origin server +} + +// deltaSeconds specifies a non-negative integer, representing +// time in seconds: http://tools.ietf.org/html/rfc7234#section-1.2.1 +type deltaSeconds int32 + +func parseResponseHeaders(headers http.Header) *responseHeaders { + result := responseHeaders{} + + result.etag = headers.Get("etag") + + result.lastModified = headers.Get("last-modified") + + return &result +} + +func revalidateCachedResponse(req *http.Request, client *http.Client, inputReqObj ast.Object, headers *responseHeaders) (*http.Response, bool, error) { + etag := headers.etag + lastModified := headers.lastModified + + if etag == "" && lastModified == "" { + return nil, false, nil + } + + cloneReq := req.Clone(req.Context()) + + if etag != "" { + cloneReq.Header.Set("if-none-match", etag) + } + + if lastModified != "" { + cloneReq.Header.Set("if-modified-since", lastModified) + } + + response, err := executeHTTPRequest(cloneReq, client, inputReqObj) + if err != nil { + return nil, false, err + } + + switch response.StatusCode { + case http.StatusOK: + return response, true, nil + + case http.StatusNotModified: + return response, false, nil + } + util.Close(response) + return nil, false, nil +} + +func canStore(headers http.Header) bool { + ccHeaders := parseCacheControlHeader(headers) + + // Check "no-store" cache directive + // The "no-store" response directive indicates that a cache MUST NOT + // store any part of either the immediate request or response. + if _, ok := ccHeaders["no-store"]; ok { + return false + } + return true +} + +func getCurrentTime(bctx BuiltinContext) time.Time { + var current time.Time + + value, err := ast.JSON(bctx.Time.Value) + if err != nil { + return current + } + + valueNum, ok := value.(json.Number) + if !ok { + return current + } + + valueNumInt, err := valueNum.Int64() + if err != nil { + return current + } + + current = time.Unix(0, valueNumInt).UTC() + return current +} + +func parseCacheControlHeader(headers http.Header) map[string]string { + ccDirectives := map[string]string{} + ccHeader := headers.Get("cache-control") + + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + items := strings.Split(part, "=") + if len(items) != 2 { + continue + } + ccDirectives[strings.Trim(items[0], " ")] = strings.Trim(items[1], ",") + } else { + ccDirectives[part] = "" + } + } + + return ccDirectives +} + +func getResponseHeaderDate(headers http.Header) (date time.Time, err error) { + dateHeader := headers.Get("date") + if dateHeader == "" { + err = fmt.Errorf("no date header") + return + } + return http.ParseTime(dateHeader) +} + +func getResponseHeaderExpires(headers http.Header) time.Time { + expiresHeader := headers.Get("expires") + if expiresHeader == "" { + return time.Time{} + } + + date, err := http.ParseTime(expiresHeader) + if err != nil { + // servers can set `Expires: 0` which is an invalid date to indicate expired content + return time.Time{} + } + + return date +} + +// parseMaxAgeCacheDirective parses the max-age directive expressed in delta-seconds as per +// https://tools.ietf.org/html/rfc7234#section-1.2.1 +func parseMaxAgeCacheDirective(cc map[string]string) (deltaSeconds, error) { + maxAge, ok := cc["max-age"] + if !ok { + return deltaSeconds(-1), nil + } + + val, err := strconv.ParseUint(maxAge, 10, 32) + if err != nil { + if numError, ok := err.(*strconv.NumError); ok { + if numError.Err == strconv.ErrRange { + return deltaSeconds(math.MaxInt32), nil + } + } + return deltaSeconds(-1), err + } + + if val > math.MaxInt32 { + return deltaSeconds(math.MaxInt32), nil + } + return deltaSeconds(val), nil +} + +func formatHTTPResponseToAST(resp *http.Response, forceJSONDecode, forceYAMLDecode bool) (ast.Value, []byte, error) { + + resultRawBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + resultObj, err := prepareASTResult(resp.Header, forceJSONDecode, forceYAMLDecode, resultRawBody, resp.Status, resp.StatusCode) + if err != nil { + return nil, nil, err + } + + return resultObj, resultRawBody, nil +} + +func prepareASTResult(headers http.Header, forceJSONDecode, forceYAMLDecode bool, body []byte, status string, statusCode int) (ast.Value, error) { + var resultBody interface{} + + // If the response body cannot be JSON/YAML decoded, + // an error will not be returned. Instead, the "body" field + // in the result will be null. + switch { + case forceJSONDecode || isContentType(headers, "application/json"): + _ = util.UnmarshalJSON(body, &resultBody) + case forceYAMLDecode || isContentType(headers, "application/yaml", "application/x-yaml"): + _ = util.Unmarshal(body, &resultBody) + } + + result := make(map[string]interface{}) + result["status"] = status + result["status_code"] = statusCode + result["body"] = resultBody + result["raw_body"] = string(body) + result["headers"] = getResponseHeaders(headers) + + resultObj, err := ast.InterfaceToValue(result) + if err != nil { + return nil, err + } + + return resultObj, nil +} + +func getResponseHeaders(headers http.Header) map[string]interface{} { + respHeaders := map[string]interface{}{} + for headerName, values := range headers { + var respValues []interface{} + for _, v := range values { + respValues = append(respValues, v) + } + respHeaders[strings.ToLower(headerName)] = respValues + } + return respHeaders +} + +// httpRequestExecutor defines an interface for the http send cache +type httpRequestExecutor interface { + CheckCache() (ast.Value, error) + InsertIntoCache(value *http.Response) (ast.Value, error) + InsertErrorIntoCache(err error) + ExecuteHTTPRequest() (*http.Response, error) +} + +// newHTTPRequestExecutor returns a new HTTP request executor that wraps either an inter-query or +// intra-query cache implementation +func newHTTPRequestExecutor(bctx BuiltinContext, req ast.Object, key ast.Object) (httpRequestExecutor, error) { + useInterQueryCache, forceCacheParams, err := useInterQueryCache(req) + if err != nil { + return nil, handleHTTPSendErr(bctx, err) + } + + if useInterQueryCache && bctx.InterQueryBuiltinCache != nil { + return newInterQueryCache(bctx, req, key, forceCacheParams) + } + return newIntraQueryCache(bctx, req, key) +} + +type interQueryCache struct { + bctx BuiltinContext + req ast.Object + key ast.Object + httpReq *http.Request + httpClient *http.Client + forceJSONDecode bool + forceYAMLDecode bool + forceCacheParams *forceCacheParams +} + +func newInterQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object, forceCacheParams *forceCacheParams) (*interQueryCache, error) { + return &interQueryCache{bctx: bctx, req: req, key: key, forceCacheParams: forceCacheParams}, nil +} + +// CheckCache checks the cache for the value of the key set on this object +func (c *interQueryCache) CheckCache() (ast.Value, error) { + var err error + + // Checking the intra-query cache first ensures consistency of errors and HTTP responses within a query. + resp, err := checkHTTPSendCache(c.bctx, c.key) + if err != nil { + return nil, err + } + if resp != nil { + return resp, nil + } + + c.forceJSONDecode, err = getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + c.forceYAMLDecode, err = getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + resp, err = c.checkHTTPSendInterQueryCache() + // Always insert the result of the inter-query cache into the intra-query cache, to maintain consistency within the same query. + if err != nil { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) + } + if resp != nil { + insertIntoHTTPSendCache(c.bctx, c.key, resp) + } + return resp, err +} + +// InsertIntoCache inserts the key set on this object into the cache with the given value +func (c *interQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { + result, respBody, err := formatHTTPResponseToAST(value, c.forceJSONDecode, c.forceYAMLDecode) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + // Always insert into the intra-query cache, to maintain consistency within the same query. + insertIntoHTTPSendCache(c.bctx, c.key, result) + + // We ignore errors when populating the inter-query cache, because we've already populated the intra-cache, + // and query consistency is our primary concern. + _ = insertIntoHTTPSendInterQueryCache(c.bctx, c.key, value, respBody, c.forceCacheParams) + return result, nil +} + +func (c *interQueryCache) InsertErrorIntoCache(err error) { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) +} + +// ExecuteHTTPRequest executes a HTTP request +func (c *interQueryCache) ExecuteHTTPRequest() (*http.Response, error) { + var err error + c.httpReq, c.httpClient, err = createHTTPRequest(c.bctx, c.req) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + return executeHTTPRequest(c.httpReq, c.httpClient, c.req) +} + +type intraQueryCache struct { + bctx BuiltinContext + req ast.Object + key ast.Object +} + +func newIntraQueryCache(bctx BuiltinContext, req ast.Object, key ast.Object) (*intraQueryCache, error) { + return &intraQueryCache{bctx: bctx, req: req, key: key}, nil +} + +// CheckCache checks the cache for the value of the key set on this object +func (c *intraQueryCache) CheckCache() (ast.Value, error) { + return checkHTTPSendCache(c.bctx, c.key) +} + +// InsertIntoCache inserts the key set on this object into the cache with the given value +func (c *intraQueryCache) InsertIntoCache(value *http.Response) (ast.Value, error) { + forceJSONDecode, err := getBoolValFromReqObj(c.key, keyCache["force_json_decode"]) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + forceYAMLDecode, err := getBoolValFromReqObj(c.key, keyCache["force_yaml_decode"]) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + result, _, err := formatHTTPResponseToAST(value, forceJSONDecode, forceYAMLDecode) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + + if cacheableCodes.Contains(ast.InternedIntNumberTerm(value.StatusCode)) { + insertIntoHTTPSendCache(c.bctx, c.key, result) + } + + return result, nil +} + +func (c *intraQueryCache) InsertErrorIntoCache(err error) { + insertErrorIntoHTTPSendCache(c.bctx, c.key, err) +} + +// ExecuteHTTPRequest executes a HTTP request +func (c *intraQueryCache) ExecuteHTTPRequest() (*http.Response, error) { + httpReq, httpClient, err := createHTTPRequest(c.bctx, c.req) + if err != nil { + return nil, handleHTTPSendErr(c.bctx, err) + } + return executeHTTPRequest(httpReq, httpClient, c.req) +} + +func useInterQueryCache(req ast.Object) (bool, *forceCacheParams, error) { + value, err := getBoolValFromReqObj(req, keyCache["cache"]) + if err != nil { + return false, nil, err + } + + valueForceCache, err := getBoolValFromReqObj(req, keyCache["force_cache"]) + if err != nil { + return false, nil, err + } + + if valueForceCache { + forceCacheParams, err := newForceCacheParams(req) + return true, forceCacheParams, err + } + + return value, nil, nil +} + +type forceCacheParams struct { + forceCacheDurationSeconds int32 +} + +func newForceCacheParams(req ast.Object) (*forceCacheParams, error) { + term := req.Get(keyCache["force_cache_duration_seconds"]) + if term == nil { + return nil, fmt.Errorf("'force_cache' set but 'force_cache_duration_seconds' parameter is missing") + } + + forceCacheDurationSeconds := term.String() + + value, err := strconv.ParseInt(forceCacheDurationSeconds, 10, 32) + if err != nil { + return nil, err + } + + return &forceCacheParams{forceCacheDurationSeconds: int32(value)}, nil +} + +func getRaiseErrorValue(req ast.Object) (bool, error) { + result := ast.Boolean(true) + var ok bool + if v := req.Get(keyCache["raise_error"]); v != nil { + if result, ok = v.Value.(ast.Boolean); !ok { + return false, fmt.Errorf("invalid value for raise_error field") + } + } + return bool(result), nil +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/http_fixup.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/http_fixup_darwin.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/http_fixup_darwin.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/input.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go similarity index 98% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/input.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go index cb70aeb71..dccf94d89 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/input.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/input.go @@ -7,7 +7,7 @@ package topdown import ( "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) var errBadPath = fmt.Errorf("bad document path") diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go new file mode 100644 index 000000000..93da1d002 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/instrumentation.go @@ -0,0 +1,63 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import "github.com/open-policy-agent/opa/v1/metrics" + +const ( + evalOpPlug = "eval_op_plug" + evalOpResolve = "eval_op_resolve" + evalOpRuleIndex = "eval_op_rule_index" + evalOpBuiltinCall = "eval_op_builtin_call" + evalOpVirtualCacheHit = "eval_op_virtual_cache_hit" + evalOpVirtualCacheMiss = "eval_op_virtual_cache_miss" + evalOpBaseCacheHit = "eval_op_base_cache_hit" + evalOpBaseCacheMiss = "eval_op_base_cache_miss" + evalOpComprehensionCacheSkip = "eval_op_comprehension_cache_skip" + evalOpComprehensionCacheBuild = "eval_op_comprehension_cache_build" + evalOpComprehensionCacheHit = "eval_op_comprehension_cache_hit" + evalOpComprehensionCacheMiss = "eval_op_comprehension_cache_miss" + partialOpSaveUnify = "partial_op_save_unify" + partialOpSaveSetContains = "partial_op_save_set_contains" + partialOpSaveSetContainsRec = "partial_op_save_set_contains_rec" + partialOpCopyPropagation = "partial_op_copy_propagation" +) + +// Instrumentation implements helper functions to instrument query evaluation +// to diagnose performance issues. Instrumentation may be expensive in some +// cases, so it is disabled by default. +type Instrumentation struct { + m metrics.Metrics +} + +// NewInstrumentation returns a new Instrumentation object. Performance +// diagnostics recorded on this Instrumentation object will stored in m. +func NewInstrumentation(m metrics.Metrics) *Instrumentation { + return &Instrumentation{ + m: m, + } +} + +func (instr *Instrumentation) startTimer(name string) { + if instr == nil { + return + } + instr.m.Timer(name).Start() +} + +func (instr *Instrumentation) stopTimer(name string) { + if instr == nil { + return + } + delta := instr.m.Timer(name).Stop() + instr.m.Histogram(name).Update(delta) +} + +func (instr *Instrumentation) counterIncr(name string) { + if instr == nil { + return + } + instr.m.Counter(name).Incr() +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go new file mode 100644 index 000000000..5b7c414e4 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/json.go @@ -0,0 +1,405 @@ +// Copyright 2019 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "strconv" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + + "github.com/open-policy-agent/opa/internal/edittree" +) + +func builtinJSONRemove(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // Expect an object and a string or array/set of strings + _, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Build a list of json pointers to remove + paths, err := getJSONPaths(operands[1].Value) + if err != nil { + return err + } + + newObj, err := jsonRemove(operands[0], ast.NewTerm(pathsToObject(paths))) + if err != nil { + return err + } + + if newObj == nil { + return nil + } + + return iter(newObj) +} + +// jsonRemove returns a new term that is the result of walking +// through a and omitting removing any values that are in b but +// have ast.Null values (ie leaf nodes for b). +func jsonRemove(a *ast.Term, b *ast.Term) (*ast.Term, error) { + if b == nil { + // The paths diverged, return a + return a, nil + } + + var bObj ast.Object + switch bValue := b.Value.(type) { + case ast.Object: + bObj = bValue + case ast.Null: + // Means we hit a leaf node on "b", dont add the value for a + return nil, nil + default: + // The paths diverged, return a + return a, nil + } + + switch aValue := a.Value.(type) { + case ast.String, ast.Number, ast.Boolean, ast.Null: + return a, nil + case ast.Object: + newObj := ast.NewObject() + err := aValue.Iter(func(k *ast.Term, v *ast.Term) error { + // recurse and add the diff of sub objects as needed + diffValue, err := jsonRemove(v, bObj.Get(k)) + if err != nil || diffValue == nil { + return err + } + newObj.Insert(k, diffValue) + return nil + }) + if err != nil { + return nil, err + } + return ast.NewTerm(newObj), nil + case ast.Set: + newSet := ast.NewSet() + err := aValue.Iter(func(v *ast.Term) error { + // recurse and add the diff of sub objects as needed + diffValue, err := jsonRemove(v, bObj.Get(v)) + if err != nil || diffValue == nil { + return err + } + newSet.Add(diffValue) + return nil + }) + if err != nil { + return nil, err + } + return ast.NewTerm(newSet), nil + case *ast.Array: + // When indexes are removed we shift left to close empty spots in the array + // as per the JSON patch spec. + newArray := ast.NewArray() + for i := 0; i < aValue.Len(); i++ { + v := aValue.Elem(i) + // recurse and add the diff of sub objects as needed + // Note: Keys in b will be strings for the index, eg path /a/1/b => {"a": {"1": {"b": null}}} + diffValue, err := jsonRemove(v, bObj.Get(ast.StringTerm(strconv.Itoa(i)))) + if err != nil { + return nil, err + } + if diffValue != nil { + newArray = newArray.Append(diffValue) + } + } + return ast.NewTerm(newArray), nil + default: + return nil, fmt.Errorf("invalid value type %T", a) + } +} + +func builtinJSONFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // Ensure we have the right parameters, expect an object and a string or array/set of strings + obj, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return err + } + + // Build a list of filter strings + filters, err := getJSONPaths(operands[1].Value) + if err != nil { + return err + } + + // Actually do the filtering + filterObj := pathsToObject(filters) + r, err := obj.Filter(filterObj) + if err != nil { + return err + } + + return iter(ast.NewTerm(r)) +} + +func getJSONPaths(operand ast.Value) ([]ast.Ref, error) { + var paths []ast.Ref + + switch v := operand.(type) { + case *ast.Array: + for i := 0; i < v.Len(); i++ { + filter, err := parsePath(v.Elem(i)) + if err != nil { + return nil, err + } + paths = append(paths, filter) + } + case ast.Set: + err := v.Iter(func(f *ast.Term) error { + filter, err := parsePath(f) + if err != nil { + return err + } + paths = append(paths, filter) + return nil + }) + if err != nil { + return nil, err + } + default: + return nil, builtins.NewOperandTypeErr(2, v, "set", "array") + } + + return paths, nil +} + +func parsePath(path *ast.Term) (ast.Ref, error) { + // paths can either be a `/` separated json path or + // an array or set of values + var pathSegments ast.Ref + switch p := path.Value.(type) { + case ast.String: + if p == "" { + return ast.Ref{}, nil + } + parts := strings.Split(strings.TrimLeft(string(p), "/"), "/") + for _, part := range parts { + part = strings.ReplaceAll(strings.ReplaceAll(part, "~1", "/"), "~0", "~") + pathSegments = append(pathSegments, ast.StringTerm(part)) + } + case *ast.Array: + p.Foreach(func(term *ast.Term) { + pathSegments = append(pathSegments, term) + }) + default: + return nil, builtins.NewOperandErr(2, "must be one of {set, array} containing string paths or array of path segments but got %v", ast.ValueName(p)) + } + + return pathSegments, nil +} + +func pathsToObject(paths []ast.Ref) ast.Object { + root := ast.NewObject() + + for _, path := range paths { + node := root + var done bool + + // If the path is an empty JSON path, skip all further processing. + if len(path) == 0 { + done = true + } + + // Otherwise, we should have 1+ path segments to work with. + for i := 0; i < len(path)-1 && !done; i++ { + + k := path[i] + child := node.Get(k) + + if child == nil { + obj := ast.NewObject() + node.Insert(k, ast.NewTerm(obj)) + node = obj + continue + } + + switch v := child.Value.(type) { + case ast.Null: + done = true + case ast.Object: + node = v + default: + panic("unreachable") + } + } + + if !done { + node.Insert(path[len(path)-1], ast.InternedNullTerm) + } + } + + return root +} + +type jsonPatch struct { + op string + path *ast.Term + from *ast.Term + value *ast.Term +} + +func getPatch(o ast.Object) (jsonPatch, error) { + validOps := map[string]struct{}{"add": {}, "remove": {}, "replace": {}, "move": {}, "copy": {}, "test": {}} + var out jsonPatch + var ok bool + getAttribute := func(attr string) (*ast.Term, error) { + if term := o.Get(ast.StringTerm(attr)); term != nil { + return term, nil + } + + return nil, fmt.Errorf("missing '%s' attribute", attr) + } + + opTerm, err := getAttribute("op") + if err != nil { + return out, err + } + op, ok := opTerm.Value.(ast.String) + if !ok { + return out, fmt.Errorf("attribute 'op' must be a string") + } + out.op = string(op) + if _, found := validOps[out.op]; !found { + out.op = "" + return out, fmt.Errorf("unrecognized op '%s'", string(op)) + } + + pathTerm, err := getAttribute("path") + if err != nil { + return out, err + } + out.path = pathTerm + + // Only fetch the "from" parameter for move/copy ops. + switch out.op { + case "move", "copy": + fromTerm, err := getAttribute("from") + if err != nil { + return out, err + } + out.from = fromTerm + } + + // Only fetch the "value" parameter for add/replace/test ops. + switch out.op { + case "add", "replace", "test": + valueTerm, err := getAttribute("value") + if err != nil { + return out, err + } + out.value = valueTerm + } + + return out, nil +} + +func applyPatches(source *ast.Term, operations *ast.Array) (*ast.Term, error) { + et := edittree.NewEditTree(source) + for i := 0; i < operations.Len(); i++ { + object, ok := operations.Elem(i).Value.(ast.Object) + if !ok { + return nil, fmt.Errorf("must be an array of JSON-Patch objects, but at least one element is not an object") + } + patch, err := getPatch(object) + if err != nil { + return nil, err + } + path, err := parsePath(patch.path) + if err != nil { + return nil, err + } + + switch patch.op { + case "add": + _, err = et.InsertAtPath(path, patch.value) + if err != nil { + return nil, err + } + case "remove": + _, err = et.DeleteAtPath(path) + if err != nil { + return nil, err + } + case "replace": + _, err = et.DeleteAtPath(path) + if err != nil { + return nil, err + } + _, err = et.InsertAtPath(path, patch.value) + if err != nil { + return nil, err + } + case "move": + from, err := parsePath(patch.from) + if err != nil { + return nil, err + } + chunk, err := et.RenderAtPath(from) + if err != nil { + return nil, err + } + _, err = et.DeleteAtPath(from) + if err != nil { + return nil, err + } + _, err = et.InsertAtPath(path, chunk) + if err != nil { + return nil, err + } + case "copy": + from, err := parsePath(patch.from) + if err != nil { + return nil, err + } + chunk, err := et.RenderAtPath(from) + if err != nil { + return nil, err + } + _, err = et.InsertAtPath(path, chunk) + if err != nil { + return nil, err + } + case "test": + chunk, err := et.RenderAtPath(path) + if err != nil { + return nil, err + } + if !chunk.Equal(patch.value) { + return nil, fmt.Errorf("value from EditTree != patch value.\n\nExpected: %v\n\nFound: %v", patch.value, chunk) + } + } + } + final := et.Render() + // TODO: Nil check here? + return final, nil +} + +func builtinJSONPatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + // JSON patch supports arrays, objects as well as values as the target. + target := ast.NewTerm(operands[0].Value) + + // Expect an array of operations. + operations, err := builtins.ArrayOperand(operands[1].Value, 2) + if err != nil { + return err + } + + patched, err := applyPatches(target, operations) + if err != nil { + return nil + } + return iter(patched) +} + +func init() { + RegisterBuiltinFunc(ast.JSONFilter.Name, builtinJSONFilter) + RegisterBuiltinFunc(ast.JSONRemove.Name, builtinJSONRemove) + RegisterBuiltinFunc(ast.JSONPatch.Name, builtinJSONPatch) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go similarity index 77% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go index f44398f1e..b1609fb04 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/jsonschema.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/jsonschema.go @@ -8,8 +8,8 @@ import ( "encoding/json" "errors" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/gojsonschema" + "github.com/open-policy-agent/opa/v1/ast" ) // astValueToJSONSchemaLoader converts a value to JSON Loader. @@ -44,7 +44,7 @@ func astValueToJSONSchemaLoader(value ast.Value) (gojsonschema.JSONLoader, error } func newResultTerm(valid bool, data *ast.Term) *ast.Term { - return ast.ArrayTerm(ast.BooleanTerm(valid), data) + return ast.ArrayTerm(ast.InternedBooleanTerm(valid), data) } // builtinJSONSchemaVerify accepts 1 argument which can be string or object and checks if it is valid JSON schema. @@ -61,13 +61,23 @@ func builtinJSONSchemaVerify(_ BuiltinContext, operands []*ast.Term, iter func(* return iter(newResultTerm(false, ast.StringTerm("jsonschema: "+err.Error()))) } - return iter(newResultTerm(true, ast.NullTerm())) + return iter(newResultTerm(true, ast.InternedNullTerm)) } // builtinJSONMatchSchema accepts 2 arguments both can be string or object and verifies if the document matches the JSON schema. // Returns an array where first element is a boolean indicating a successful match, and the second is an array of errors that is empty on success and populated on failure. // In case of internal error returns empty array. -func builtinJSONMatchSchema(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { +func builtinJSONMatchSchema(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + var schema *gojsonschema.Schema + + if bctx.InterQueryBuiltinValueCache != nil { + if val, ok := bctx.InterQueryBuiltinValueCache.Get(operands[1].Value); ok { + if s, isSchema := val.(*gojsonschema.Schema); isSchema { + schema = s + } + } + } + // Take first argument and make JSON Loader from it. // This is a JSON document made from Rego JSON string or object. documentLoader, err := astValueToJSONSchemaLoader(operands[0].Value) @@ -75,15 +85,26 @@ func builtinJSONMatchSchema(_ BuiltinContext, operands []*ast.Term, iter func(*a return err } - // Take second argument and make JSON Loader from it. - // This is a JSON schema made from Rego JSON string or object. - schemaLoader, err := astValueToJSONSchemaLoader(operands[1].Value) - if err != nil { - return err + if schema == nil { + // Take second argument and make JSON Loader from it. + // This is a JSON schema made from Rego JSON string or object. + schemaLoader, err := astValueToJSONSchemaLoader(operands[1].Value) + if err != nil { + return err + } + + schema, err = gojsonschema.NewSchema(schemaLoader) + if err != nil { + return err + } + + if bctx.InterQueryBuiltinValueCache != nil { + bctx.InterQueryBuiltinValueCache.Insert(operands[1].Value, schema) + } } // Use schema to validate document. - result, err := gojsonschema.Validate(schemaLoader, documentLoader) + result, err := schema.Validate(documentLoader) if err != nil { return err } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/net.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go similarity index 93% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/net.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go index 534520529..17ed77984 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/net.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/net.go @@ -8,8 +8,8 @@ import ( "net" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) type lookupIPAddrCacheKey string diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go new file mode 100644 index 000000000..855aef04b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/numbers.go @@ -0,0 +1,196 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "math/big" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +type randIntCachingKey string + +var zero = big.NewInt(0) +var one = big.NewInt(1) + +func builtinNumbersRange(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + if canGenerateCheapRange(operands) { + return generateCheapRange(operands, iter) + } + + x, err := builtins.BigIntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.BigIntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + ast, err := generateRange(bctx, x, y, one, "numbers.range") + if err != nil { + return err + } + + return iter(ast) +} + +func builtinNumbersRangeStep(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + x, err := builtins.BigIntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.BigIntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + step, err := builtins.BigIntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + if step.Cmp(zero) <= 0 { + return fmt.Errorf("numbers.range_step: step must be a positive number above zero") + } + + ast, err := generateRange(bctx, x, y, step, "numbers.range_step") + if err != nil { + return err + } + + return iter(ast) +} + +func canGenerateCheapRange(operands []*ast.Term) bool { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil || !ast.HasInternedIntNumberTerm(x) { + return false + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil || !ast.HasInternedIntNumberTerm(y) { + return false + } + + return true +} + +func generateCheapRange(operands []*ast.Term, iter func(*ast.Term) error) error { + x, err := builtins.IntOperand(operands[0].Value, 1) + if err != nil { + return err + } + + y, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + step := 1 + + if len(operands) > 2 { + stepOp, err := builtins.IntOperand(operands[2].Value, 3) + if err == nil { + step = stepOp + } + } + + if step <= 0 { + return fmt.Errorf("numbers.range_step: step must be a positive number above zero") + } + + terms := make([]*ast.Term, 0, y+1) + + if x <= y { + for i := x; i <= y; i += step { + terms = append(terms, ast.InternedIntNumberTerm(i)) + } + } else { + for i := x; i >= y; i -= step { + terms = append(terms, ast.InternedIntNumberTerm(i)) + } + } + + return iter(ast.ArrayTerm(terms...)) +} + +func generateRange(bctx BuiltinContext, x *big.Int, y *big.Int, step *big.Int, funcName string) (*ast.Term, error) { + + cmp := x.Cmp(y) + + comp := func(i *big.Int, y *big.Int) bool { return i.Cmp(y) <= 0 } + iter := func(i *big.Int) *big.Int { return i.Add(i, step) } + + if cmp > 0 { + comp = func(i *big.Int, y *big.Int) bool { return i.Cmp(y) >= 0 } + iter = func(i *big.Int) *big.Int { return i.Sub(i, step) } + } + + result := ast.NewArray() + haltErr := Halt{ + Err: &Error{ + Code: CancelErr, + Message: fmt.Sprintf("%s: timed out before generating all numbers in range", funcName), + }, + } + + for i := new(big.Int).Set(x); comp(i, y); i = iter(i) { + if bctx.Cancel != nil && bctx.Cancel.Cancelled() { + return nil, haltErr + } + result = result.Append(ast.NewTerm(builtins.IntToNumber(i))) + } + + return ast.NewTerm(result), nil +} + +func builtinRandIntn(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + strOp, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + + } + + n, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + if n == 0 { + return iter(ast.InternedIntNumberTerm(0)) + } + + if n < 0 { + n = -n + } + + var key = randIntCachingKey(fmt.Sprintf("%s-%d", strOp, n)) + + if val, ok := bctx.Cache.Get(key); ok { + return iter(val.(*ast.Term)) + } + + r, err := bctx.Rand() + if err != nil { + return err + } + result := ast.InternedIntNumberTerm(r.Intn(n)) + bctx.Cache.Put(key, result) + + return iter(result) +} + +func init() { + RegisterBuiltinFunc(ast.NumbersRange.Name, builtinNumbersRange) + RegisterBuiltinFunc(ast.NumbersRangeStep.Name, builtinNumbersRangeStep) + RegisterBuiltinFunc(ast.RandIntn.Name, builtinRandIntn) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/object.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go similarity index 91% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/object.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go index ba5d77ff3..4db8fa827 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/object.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/object.go @@ -5,9 +5,9 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/ref" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinObjectUnion(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -50,9 +50,6 @@ func builtinObjectUnionN(_ BuiltinContext, operands []*ast.Term, iter func(*ast. return builtins.NewOperandElementErr(1, arr, arr.Elem(i).Value, "object") } mergewithOverwriteInPlace(result, o, frozenKeys) - if err != nil { - return err - } } return iter(ast.NewTerm(result)) @@ -95,7 +92,7 @@ func builtinObjectFilter(_ BuiltinContext, operands []*ast.Term, iter func(*ast. filterObj := ast.NewObject() keys.Foreach(func(key *ast.Term) { - filterObj.Insert(key, ast.NullTerm()) + filterObj.Insert(key, ast.InternedNullTerm) }) // Actually do the filtering @@ -144,37 +141,24 @@ func builtinObjectKeys(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Te return err } - keys := ast.SetTerm(object.Keys()...) - - return iter(keys) + return iter(ast.SetTerm(object.Keys()...)) } // getObjectKeysParam returns a set of key values // from a supplied ast array, object, set value func getObjectKeysParam(arrayOrSet ast.Value) (ast.Set, error) { - keys := ast.NewSet() - switch v := arrayOrSet.(type) { case *ast.Array: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + keys := ast.NewSet() + v.Foreach(keys.Add) + return keys, nil case ast.Set: - _ = v.Iter(func(f *ast.Term) error { - keys.Add(f) - return nil - }) + return ast.NewSet(v.Slice()...), nil case ast.Object: - _ = v.Iter(func(k *ast.Term, _ *ast.Term) error { - keys.Add(k) - return nil - }) - default: - return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") + return ast.NewSet(v.Keys()...), nil } - return keys, nil + return nil, builtins.NewOperandTypeErr(2, arrayOrSet, "object", "set", "array") } func mergeWithOverwrite(objA, objB ast.Object) ast.Object { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go similarity index 91% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/parse.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go index c46222b41..464e0141a 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { @@ -25,6 +25,7 @@ func builtinRegoParseModule(_ BuiltinContext, operands []*ast.Term, iter func(*a return err } + // FIXME: Use configured rego-version? module, err := ast.ParseModule(string(filename), string(input)) if err != nil { return err diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go similarity index 76% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go index 0cd4bc193..dcc8e2199 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_bytes.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_bytes.go @@ -10,8 +10,8 @@ import ( "strings" "unicode" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) const ( @@ -121,21 +121,35 @@ func extractNumAndUnit(s string) (string, string) { } firstNonNumIdx := -1 - for idx, r := range s { - if !isNum(r) { + for idx := 0; idx < len(s); idx++ { + r := rune(s[idx]) + // Identify the first non-numeric character, marking the boundary between the number and the unit. + if !isNum(r) && r != 'e' && r != 'E' && r != '+' && r != '-' { firstNonNumIdx = idx break } + if r == 'e' || r == 'E' { + // Check if the next character is a valid digit or +/- for scientific notation + if idx == len(s)-1 || (!unicode.IsDigit(rune(s[idx+1])) && rune(s[idx+1]) != '+' && rune(s[idx+1]) != '-') { + firstNonNumIdx = idx + break + } + // Skip the next character if it is '+' or '-' + if idx+1 < len(s) && (s[idx+1] == '+' || s[idx+1] == '-') { + idx++ + } + } } - if firstNonNumIdx == -1 { // only digits and '.' + if firstNonNumIdx == -1 { // only digits, '.', or valid scientific notation return s, "" } if firstNonNumIdx == 0 { // only units (starts with non-digit) return "", s } - return s[0:firstNonNumIdx], s[firstNonNumIdx:] + // Return the number and the rest as the unit + return s[:firstNonNumIdx], s[firstNonNumIdx:] } func init() { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go similarity index 96% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go index daf240214..47e459510 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/parse_units.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/parse_units.go @@ -10,8 +10,8 @@ import ( "math/big" "strings" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Binary Si unit constants are borrowed from topdown/parse_bytes diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go new file mode 100644 index 000000000..f852f3e32 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print.go @@ -0,0 +1,86 @@ +// Copyright 2021 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "io" + "strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/print" +) + +func NewPrintHook(w io.Writer) print.Hook { + return printHook{w: w} +} + +type printHook struct { + w io.Writer +} + +func (h printHook) Print(_ print.Context, msg string) error { + _, err := fmt.Fprintln(h.w, msg) + return err +} + +func builtinPrint(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + if bctx.PrintHook == nil { + return iter(nil) + } + + arr, err := builtins.ArrayOperand(operands[0].Value, 1) + if err != nil { + return err + } + + buf := make([]string, arr.Len()) + + err = builtinPrintCrossProductOperands(bctx, buf, arr, 0, func(buf []string) error { + pctx := print.Context{ + Context: bctx.Context, + Location: bctx.Location, + } + return bctx.PrintHook.Print(pctx, strings.Join(buf, " ")) + }) + if err != nil { + return err + } + + return iter(nil) +} + +func builtinPrintCrossProductOperands(bctx BuiltinContext, buf []string, operands *ast.Array, i int, f func([]string) error) error { + + if i >= operands.Len() { + return f(buf) + } + + xs, ok := operands.Elem(i).Value.(ast.Set) + if !ok { + return Halt{Err: internalErr(bctx.Location, fmt.Sprintf("illegal argument type: %v", ast.ValueName(operands.Elem(i).Value)))} + } + + if xs.Len() == 0 { + buf[i] = "" + return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) + } + + return xs.Iter(func(x *ast.Term) error { + switch v := x.Value.(type) { + case ast.String: + buf[i] = string(v) + default: + buf[i] = v.String() + } + return builtinPrintCrossProductOperands(bctx, buf, operands, i+1, f) + }) +} + +func init() { + RegisterBuiltinFunc(ast.InternalPrint.Name, builtinPrint) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go new file mode 100644 index 000000000..ce684ae94 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/print/print.go @@ -0,0 +1,21 @@ +package print + +import ( + "context" + + "github.com/open-policy-agent/opa/v1/ast" +) + +// Context provides the Hook implementation context about the print() call. +type Context struct { + Context context.Context // request context passed when query executed + Location *ast.Location // location of print call +} + +// Hook defines the interface that callers can implement to receive print +// statement outputs. If the hook returns an error, it will be surfaced if +// strict builtin error checking is enabled (otherwise, it will not halt +// execution.) +type Hook interface { + Print(Context, string) error +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/providers.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go similarity index 97% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/providers.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go index 77db91798..dd84026e4 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/providers.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/providers.go @@ -9,9 +9,9 @@ import ( "net/url" "time" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/providers/aws" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) var awsRequiredConfigKeyNames = ast.NewSet( @@ -119,9 +119,6 @@ func builtinAWSSigV4SignReq(_ BuiltinContext, operands []*ast.Term, iter func(*a } signingTimestamp = time.Unix(0, ts) - if err != nil { - return err - } // Make sure our required keys exist! // This check is stricter than required, but better to break here than downstream. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go new file mode 100644 index 000000000..a008517cc --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/query.go @@ -0,0 +1,621 @@ +package topdown + +import ( + "context" + "crypto/rand" + "io" + "sort" + "time" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" + "github.com/open-policy-agent/opa/v1/storage" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" + "github.com/open-policy-agent/opa/v1/topdown/copypropagation" + "github.com/open-policy-agent/opa/v1/topdown/print" + "github.com/open-policy-agent/opa/v1/tracing" +) + +// QueryResultSet represents a collection of results returned by a query. +type QueryResultSet []QueryResult + +// QueryResult represents a single result returned by a query. The result +// contains bindings for all variables that appear in the query. +type QueryResult map[ast.Var]*ast.Term + +// Query provides a configurable interface for performing query evaluation. +type Query struct { + seed io.Reader + time time.Time + cancel Cancel + query ast.Body + queryCompiler ast.QueryCompiler + compiler *ast.Compiler + store storage.Store + txn storage.Transaction + input *ast.Term + external *resolverTrie + tracers []QueryTracer + plugTraceVars bool + unknowns []*ast.Term + partialNamespace string + skipSaveNamespace bool + metrics metrics.Metrics + instr *Instrumentation + disableInlining []ast.Ref + shallowInlining bool + nondeterministicBuiltins bool + genvarprefix string + runtime *ast.Term + builtins map[string]*Builtin + indexing bool + earlyExit bool + interQueryBuiltinCache cache.InterQueryCache + interQueryBuiltinValueCache cache.InterQueryValueCache + ndBuiltinCache builtins.NDBCache + strictBuiltinErrors bool + builtinErrorList *[]Error + strictObjects bool + roundTripper CustomizeRoundTripper + printHook print.Hook + tracingOpts tracing.Options + virtualCache VirtualCache +} + +// Builtin represents a built-in function that queries can call. +type Builtin struct { + Decl *ast.Builtin + Func BuiltinFunc +} + +// NewQuery returns a new Query object that can be run. +func NewQuery(query ast.Body) *Query { + return &Query{ + query: query, + genvarprefix: ast.WildcardPrefix, + indexing: true, + earlyExit: true, + external: newResolverTrie(), + } +} + +// WithQueryCompiler sets the queryCompiler used for the query. +func (q *Query) WithQueryCompiler(queryCompiler ast.QueryCompiler) *Query { + q.queryCompiler = queryCompiler + return q +} + +// WithCompiler sets the compiler to use for the query. +func (q *Query) WithCompiler(compiler *ast.Compiler) *Query { + q.compiler = compiler + return q +} + +// WithStore sets the store to use for the query. +func (q *Query) WithStore(store storage.Store) *Query { + q.store = store + return q +} + +// WithTransaction sets the transaction to use for the query. All queries +// should be performed over a consistent snapshot of the storage layer. +func (q *Query) WithTransaction(txn storage.Transaction) *Query { + q.txn = txn + return q +} + +// WithCancel sets the cancellation object to use for the query. Set this if +// you need to abort queries based on a deadline. This is optional. +func (q *Query) WithCancel(cancel Cancel) *Query { + q.cancel = cancel + return q +} + +// WithInput sets the input object to use for the query. References rooted at +// input will be evaluated against this value. This is optional. +func (q *Query) WithInput(input *ast.Term) *Query { + q.input = input + return q +} + +// WithTracer adds a query tracer to use during evaluation. This is optional. +// Deprecated: Use WithQueryTracer instead. +func (q *Query) WithTracer(tracer Tracer) *Query { + qt, ok := tracer.(QueryTracer) + if !ok { + qt = WrapLegacyTracer(tracer) + } + return q.WithQueryTracer(qt) +} + +// WithQueryTracer adds a query tracer to use during evaluation. This is optional. +// Disabled QueryTracers will be ignored. +func (q *Query) WithQueryTracer(tracer QueryTracer) *Query { + if !tracer.Enabled() { + return q + } + + q.tracers = append(q.tracers, tracer) + + // If *any* of the tracers require local variable metadata we need to + // enabled plugging local trace variables. + conf := tracer.Config() + if conf.PlugLocalVars { + q.plugTraceVars = true + } + + return q +} + +// WithMetrics sets the metrics collection to add evaluation metrics to. This +// is optional. +func (q *Query) WithMetrics(m metrics.Metrics) *Query { + q.metrics = m + return q +} + +// WithInstrumentation sets the instrumentation configuration to enable on the +// evaluation process. By default, instrumentation is turned off. +func (q *Query) WithInstrumentation(instr *Instrumentation) *Query { + q.instr = instr + return q +} + +// WithUnknowns sets the initial set of variables or references to treat as +// unknown during query evaluation. This is required for partial evaluation. +func (q *Query) WithUnknowns(terms []*ast.Term) *Query { + q.unknowns = terms + return q +} + +// WithPartialNamespace sets the namespace to use for supporting rules +// generated as part of the partial evaluation process. The ns value must be a +// valid package path component. +func (q *Query) WithPartialNamespace(ns string) *Query { + q.partialNamespace = ns + return q +} + +// WithSkipPartialNamespace disables namespacing of saved support rules that are generated +// from the original policy (rules which are completely synthetic are still namespaced.) +func (q *Query) WithSkipPartialNamespace(yes bool) *Query { + q.skipSaveNamespace = yes + return q +} + +// WithDisableInlining adds a set of paths to the query that should be excluded from +// inlining. Inlining during partial evaluation can be expensive in some cases +// (e.g., when a cross-product is computed.) Disabling inlining avoids expensive +// computation at the cost of generating support rules. +func (q *Query) WithDisableInlining(paths []ast.Ref) *Query { + q.disableInlining = paths + return q +} + +// WithShallowInlining disables aggressive inlining performed during partial evaluation. +// When shallow inlining is enabled rules that depend (transitively) on unknowns are not inlined. +// Only rules/values that are completely known will be inlined. +func (q *Query) WithShallowInlining(yes bool) *Query { + q.shallowInlining = yes + return q +} + +// WithRuntime sets the runtime data to execute the query with. The runtime data +// can be returned by the `opa.runtime` built-in function. +func (q *Query) WithRuntime(runtime *ast.Term) *Query { + q.runtime = runtime + return q +} + +// WithBuiltins adds a set of built-in functions that can be called by the +// query. +func (q *Query) WithBuiltins(builtins map[string]*Builtin) *Query { + q.builtins = builtins + return q +} + +// WithIndexing will enable or disable using rule indexing for the evaluation +// of the query. The default is enabled. +func (q *Query) WithIndexing(enabled bool) *Query { + q.indexing = enabled + return q +} + +// WithEarlyExit will enable or disable using 'early exit' for the evaluation +// of the query. The default is enabled. +func (q *Query) WithEarlyExit(enabled bool) *Query { + q.earlyExit = enabled + return q +} + +// WithSeed sets a reader that will seed randomization required by built-in functions. +// If a seed is not provided crypto/rand.Reader is used. +func (q *Query) WithSeed(r io.Reader) *Query { + q.seed = r + return q +} + +// WithTime sets the time that will be returned by the time.now_ns() built-in function. +func (q *Query) WithTime(x time.Time) *Query { + q.time = x + return q +} + +// WithInterQueryBuiltinCache sets the inter-query cache that built-in functions can utilize. +func (q *Query) WithInterQueryBuiltinCache(c cache.InterQueryCache) *Query { + q.interQueryBuiltinCache = c + return q +} + +// WithInterQueryBuiltinValueCache sets the inter-query value cache that built-in functions can utilize. +func (q *Query) WithInterQueryBuiltinValueCache(c cache.InterQueryValueCache) *Query { + q.interQueryBuiltinValueCache = c + return q +} + +// WithNDBuiltinCache sets the non-deterministic builtin cache. +func (q *Query) WithNDBuiltinCache(c builtins.NDBCache) *Query { + q.ndBuiltinCache = c + return q +} + +// WithStrictBuiltinErrors tells the evaluator to treat all built-in function errors as fatal errors. +func (q *Query) WithStrictBuiltinErrors(yes bool) *Query { + q.strictBuiltinErrors = yes + return q +} + +// WithBuiltinErrorList supplies a pointer to an Error slice to store built-in function errors +// encountered during evaluation. This error slice can be inspected after evaluation to determine +// which built-in function errors occurred. +func (q *Query) WithBuiltinErrorList(list *[]Error) *Query { + q.builtinErrorList = list + return q +} + +// WithResolver configures an external resolver to use for the given ref. +func (q *Query) WithResolver(ref ast.Ref, r resolver.Resolver) *Query { + q.external.Put(ref, r) + return q +} + +// WithHTTPRoundTripper configures a custom HTTP transport for built-in functions that make HTTP requests. +func (q *Query) WithHTTPRoundTripper(t CustomizeRoundTripper) *Query { + q.roundTripper = t + return q +} + +func (q *Query) WithPrintHook(h print.Hook) *Query { + q.printHook = h + return q +} + +// WithDistributedTracingOpts sets the options to be used by distributed tracing. +func (q *Query) WithDistributedTracingOpts(tr tracing.Options) *Query { + q.tracingOpts = tr + return q +} + +// WithStrictObjects tells the evaluator to avoid the "lazy object" optimization +// applied when reading objects from the store. It will result in higher memory +// usage and should only be used temporarily while adjusting code that breaks +// because of the optimization. +func (q *Query) WithStrictObjects(yes bool) *Query { + q.strictObjects = yes + return q +} + +// WithVirtualCache sets the VirtualCache to use during evaluation. This is +// optional, and if not set, the default cache is used. +func (q *Query) WithVirtualCache(vc VirtualCache) *Query { + q.virtualCache = vc + return q +} + +// WithNondeterministicBuiltins causes non-deterministic builtins to be evalued +// during partial evaluation. This is needed to pull in external data, or validate +// a JWT, during PE, so that the result informs what queries are returned. +func (q *Query) WithNondeterministicBuiltins(yes bool) *Query { + q.nondeterministicBuiltins = yes + return q +} + +// PartialRun executes partial evaluation on the query with respect to unknown +// values. Partial evaluation attempts to evaluate as much of the query as +// possible without requiring values for the unknowns set on the query. The +// result of partial evaluation is a new set of queries that can be evaluated +// once the unknown value is known. In addition to new queries, partial +// evaluation may produce additional support modules that should be used in +// conjunction with the partially evaluated queries. +func (q *Query) PartialRun(ctx context.Context) (partials []ast.Body, support []*ast.Module, err error) { + if q.partialNamespace == "" { + q.partialNamespace = "partial" // lazily initialize partial namespace + } + if q.seed == nil { + q.seed = rand.Reader + } + if !q.time.IsZero() { + q.time = time.Now() + } + if q.metrics == nil { + q.metrics = metrics.New() + } + + f := &queryIDFactory{} + b := newBindings(0, q.instr) + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + + e := &eval{ + ctx: ctx, + metrics: q.metrics, + seed: q.seed, + time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), + cancel: q.cancel, + query: q.query, + queryCompiler: q.queryCompiler, + queryIDFact: f, + queryID: f.Next(), + bindings: b, + compiler: q.compiler, + store: q.store, + baseCache: newBaseCache(), + targetStack: newRefStack(), + txn: q.txn, + input: q.input, + external: q.external, + tracers: q.tracers, + traceEnabled: len(q.tracers) > 0, + plugTraceVars: q.plugTraceVars, + instr: q.instr, + builtins: q.builtins, + builtinCache: builtins.Cache{}, + functionMocks: newFunctionMocksStack(), + interQueryBuiltinCache: q.interQueryBuiltinCache, + interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, + ndBuiltinCache: q.ndBuiltinCache, + virtualCache: vc, + comprehensionCache: newComprehensionCache(), + saveSet: newSaveSet(q.unknowns, b, q.instr), + saveStack: newSaveStack(), + saveSupport: newSaveSupport(), + saveNamespace: ast.StringTerm(q.partialNamespace), + skipSaveNamespace: q.skipSaveNamespace, + inliningControl: &inliningControl{ + shallow: q.shallowInlining, + nondeterministicBuiltins: q.nondeterministicBuiltins, + }, + genvarprefix: q.genvarprefix, + runtime: q.runtime, + indexing: q.indexing, + earlyExit: q.earlyExit, + builtinErrors: &builtinErrors{}, + printHook: q.printHook, + strictObjects: q.strictObjects, + } + + if len(q.disableInlining) > 0 { + e.inliningControl.PushDisable(q.disableInlining, false) + } + + e.caller = e + q.metrics.Timer(metrics.RegoPartialEval).Start() + defer q.metrics.Timer(metrics.RegoPartialEval).Stop() + + livevars := ast.NewVarSet() + for _, t := range q.unknowns { + switch v := t.Value.(type) { + case ast.Var: + livevars.Add(v) + case ast.Ref: + livevars.Add(v[0].Value.(ast.Var)) + } + } + + ast.WalkVars(q.query, func(x ast.Var) bool { + if !x.IsGenerated() { + livevars.Add(x) + } + return false + }) + + p := copypropagation.New(livevars).WithCompiler(q.compiler) + + err = e.Run(func(e *eval) error { + + // Build output from saved expressions. + body := ast.NewBody() + + for _, elem := range e.saveStack.Stack[len(e.saveStack.Stack)-1] { + body.Append(elem.Plug(e.bindings)) + } + + // Include bindings as exprs so that when caller evals the result, they + // can obtain values for the vars in their query. + bindingExprs := []*ast.Expr{} + _ = e.bindings.Iter(e.bindings, func(a, b *ast.Term) error { + bindingExprs = append(bindingExprs, ast.Equality.Expr(a, b)) + return nil + }) // cannot return error + + // Sort binding expressions so that results are deterministic. + sort.Slice(bindingExprs, func(i, j int) bool { + return bindingExprs[i].Compare(bindingExprs[j]) < 0 + }) + + for i := range bindingExprs { + body.Append(bindingExprs[i]) + } + + // Skip this rule body if it fails to type-check. + // Type-checking failure means the rule body will never succeed. + if !e.compiler.PassesTypeCheck(body) { + return nil + } + + if !q.shallowInlining { + body = applyCopyPropagation(p, e.instr, body) + } + + partials = append(partials, body) + return nil + }) + + support = e.saveSupport.List() + + if len(e.builtinErrors.errs) > 0 { + if q.strictBuiltinErrors { + err = e.builtinErrors.errs[0] + } else if q.builtinErrorList != nil { + // If a builtinErrorList has been supplied, we must use pointer indirection + // to append to it. builtinErrorList is a slice pointer so that errors can be + // appended to it without returning a new slice and changing the interface + // of PartialRun. + for _, err := range e.builtinErrors.errs { + if tdError, ok := err.(*Error); ok { + *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) + } else { + *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ + Code: BuiltinErr, + Message: err.Error(), + }) + } + } + } + } + + for i, m := range support { + if regoVersion := q.compiler.DefaultRegoVersion(); regoVersion != ast.RegoUndefined { + ast.SetModuleRegoVersion(m, q.compiler.DefaultRegoVersion()) + } + + sort.Slice(support[i].Rules, func(j, k int) bool { + return support[i].Rules[j].Compare(support[i].Rules[k]) < 0 + }) + } + + return partials, support, err +} + +// Run is a wrapper around Iter that accumulates query results and returns them +// in one shot. +func (q *Query) Run(ctx context.Context) (QueryResultSet, error) { + qrs := QueryResultSet{} + return qrs, q.Iter(ctx, func(qr QueryResult) error { + qrs = append(qrs, qr) + return nil + }) +} + +// Iter executes the query and invokes the iter function with query results +// produced by evaluating the query. +func (q *Query) Iter(ctx context.Context, iter func(QueryResult) error) error { + // Query evaluation must not be allowed if the compiler has errors and is in an undefined, possibly inconsistent state + if q.compiler != nil && len(q.compiler.Errors) > 0 { + return &Error{ + Code: InternalErr, + Message: "compiler has errors", + } + } + + if q.seed == nil { + q.seed = rand.Reader + } + if q.time.IsZero() { + q.time = time.Now() + } + if q.metrics == nil { + q.metrics = metrics.New() + } + + f := &queryIDFactory{} + + var vc VirtualCache + if q.virtualCache != nil { + vc = q.virtualCache + } else { + vc = NewVirtualCache() + } + + e := &eval{ + ctx: ctx, + metrics: q.metrics, + seed: q.seed, + time: ast.NumberTerm(int64ToJSONNumber(q.time.UnixNano())), + cancel: q.cancel, + query: q.query, + queryCompiler: q.queryCompiler, + queryIDFact: f, + queryID: f.Next(), + bindings: newBindings(0, q.instr), + compiler: q.compiler, + store: q.store, + baseCache: newBaseCache(), + targetStack: newRefStack(), + txn: q.txn, + input: q.input, + external: q.external, + tracers: q.tracers, + traceEnabled: len(q.tracers) > 0, + plugTraceVars: q.plugTraceVars, + instr: q.instr, + builtins: q.builtins, + builtinCache: builtins.Cache{}, + functionMocks: newFunctionMocksStack(), + interQueryBuiltinCache: q.interQueryBuiltinCache, + interQueryBuiltinValueCache: q.interQueryBuiltinValueCache, + ndBuiltinCache: q.ndBuiltinCache, + virtualCache: vc, + comprehensionCache: newComprehensionCache(), + genvarprefix: q.genvarprefix, + runtime: q.runtime, + indexing: q.indexing, + earlyExit: q.earlyExit, + builtinErrors: &builtinErrors{}, + printHook: q.printHook, + tracingOpts: q.tracingOpts, + strictObjects: q.strictObjects, + roundTripper: q.roundTripper, + } + e.caller = e + q.metrics.Timer(metrics.RegoQueryEval).Start() + err := e.Run(func(e *eval) error { + qr := QueryResult{} + _ = e.bindings.Iter(nil, func(k, v *ast.Term) error { + qr[k.Value.(ast.Var)] = v + return nil + }) // cannot return error + return iter(qr) + }) + + if len(e.builtinErrors.errs) > 0 { + if q.strictBuiltinErrors { + err = e.builtinErrors.errs[0] + } else if q.builtinErrorList != nil { + // If a builtinErrorList has been supplied, we must use pointer indirection + // to append to it. builtinErrorList is a slice pointer so that errors can be + // appended to it without returning a new slice and changing the interface + // of Iter. + for _, err := range e.builtinErrors.errs { + if tdError, ok := err.(*Error); ok { + *(q.builtinErrorList) = append(*(q.builtinErrorList), *tdError) + } else { + *(q.builtinErrorList) = append(*(q.builtinErrorList), Error{ + Code: BuiltinErr, + Message: err.Error(), + }) + } + } + } + } + + q.metrics.Timer(metrics.RegoQueryEval).Stop() + return err +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/reachable.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go similarity index 97% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/reachable.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go index 8d61018e7..1c31019db 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/reachable.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/reachable.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Helper: sets of vertices can be represented as Arrays or Sets. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go new file mode 100644 index 000000000..6c1f6794c --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/regex.go @@ -0,0 +1,278 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "regexp" + "sync" + + gintersect "github.com/yashtewari/glob-intersection" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const regexCacheMaxSize = 100 +const regexInterQueryValueCacheHits = "rego_builtin_regex_interquery_value_cache_hits" + +var regexpCacheLock = sync.Mutex{} +var regexpCache map[string]*regexp.Regexp + +func builtinRegexIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + _, err = regexp.Compile(string(s)) + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + return iter(ast.InternedBooleanTerm(true)) +} + +func builtinRegexMatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + return iter(ast.InternedBooleanTerm(re.MatchString(string(s2)))) +} + +func builtinRegexMatchTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + pattern, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + match, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + start, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + end, err := builtins.StringOperand(operands[3].Value, 4) + if err != nil { + return err + } + if len(start) != 1 { + return fmt.Errorf("start delimiter has to be exactly one character long but is %d long", len(start)) + } + if len(end) != 1 { + return fmt.Errorf("end delimiter has to be exactly one character long but is %d long", len(start)) + } + re, err := getRegexpTemplate(string(pattern), string(start)[0], string(end)[0]) + if err != nil { + return err + } + return iter(ast.InternedBooleanTerm(re.MatchString(string(match)))) +} + +func builtinRegexSplit(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + + elems := re.Split(string(s2), -1) + arr := make([]*ast.Term, len(elems)) + for i := range elems { + arr[i] = ast.StringTerm(elems[i]) + } + return iter(ast.NewTerm(ast.NewArray(arr...))) +} + +func getRegexp(bctx BuiltinContext, pat string) (*regexp.Regexp, error) { + if bctx.InterQueryBuiltinValueCache != nil { + // TODO: Use named cache + val, ok := bctx.InterQueryBuiltinValueCache.Get(ast.String(pat)) + if ok { + res, valid := val.(*regexp.Regexp) + if !valid { + // The cache key may exist for a different value type (eg. glob). + // In this case, we calculate the regex and return the result w/o updating the cache. + return regexp.Compile(pat) + } + + bctx.Metrics.Counter(regexInterQueryValueCacheHits).Incr() + return res, nil + } + + re, err := regexp.Compile(pat) + if err != nil { + return nil, err + } + bctx.InterQueryBuiltinValueCache.Insert(ast.String(pat), re) + return re, nil + } + + regexpCacheLock.Lock() + defer regexpCacheLock.Unlock() + re, ok := regexpCache[pat] + if !ok { + var err error + re, err = regexp.Compile(pat) + if err != nil { + return nil, err + } + if len(regexpCache) >= regexCacheMaxSize { + // Delete a (semi-)random key to make room for the new one. + for k := range regexpCache { + delete(regexpCache, k) + break + } + } + regexpCache[pat] = re + } + return re, nil +} + +func getRegexpTemplate(pat string, delimStart, delimEnd byte) (*regexp.Regexp, error) { + regexpCacheLock.Lock() + defer regexpCacheLock.Unlock() + re, ok := regexpCache[pat] + if !ok { + var err error + re, err = compileRegexTemplate(pat, delimStart, delimEnd) + if err != nil { + return nil, err + } + regexpCache[pat] = re + } + return re, nil +} + +func builtinGlobsMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + ne, err := gintersect.NonEmpty(string(s1), string(s2)) + if err != nil { + return err + } + return iter(ast.InternedBooleanTerm(ne)) +} + +func builtinRegexFind(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + n, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + + elems := re.FindAllString(string(s2), n) + arr := make([]*ast.Term, len(elems)) + for i := range elems { + arr[i] = ast.StringTerm(elems[i]) + } + return iter(ast.NewTerm(ast.NewArray(arr...))) +} + +func builtinRegexFindAllStringSubmatch(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s1, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + s2, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + n, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + re, err := getRegexp(bctx, string(s1)) + if err != nil { + return err + } + matches := re.FindAllStringSubmatch(string(s2), n) + + outer := make([]*ast.Term, len(matches)) + for i := range matches { + inner := make([]*ast.Term, len(matches[i])) + for j := range matches[i] { + inner[j] = ast.StringTerm(matches[i][j]) + } + outer[i] = ast.NewTerm(ast.NewArray(inner...)) + } + + return iter(ast.NewTerm(ast.NewArray(outer...))) +} + +func builtinRegexReplace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + pattern, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + value, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + + re, err := getRegexp(bctx, string(pattern)) + if err != nil { + return err + } + + res := re.ReplaceAllString(string(base), string(value)) + + return iter(ast.StringTerm(res)) +} + +func init() { + regexpCache = map[string]*regexp.Regexp{} + RegisterBuiltinFunc(ast.RegexIsValid.Name, builtinRegexIsValid) + RegisterBuiltinFunc(ast.RegexMatch.Name, builtinRegexMatch) + RegisterBuiltinFunc(ast.RegexMatchDeprecated.Name, builtinRegexMatch) + RegisterBuiltinFunc(ast.RegexSplit.Name, builtinRegexSplit) + RegisterBuiltinFunc(ast.GlobsMatch.Name, builtinGlobsMatch) + RegisterBuiltinFunc(ast.RegexTemplateMatch.Name, builtinRegexMatchTemplate) + RegisterBuiltinFunc(ast.RegexFind.Name, builtinRegexFind) + RegisterBuiltinFunc(ast.RegexFindAllStringSubmatch.Name, builtinRegexFindAllStringSubmatch) + RegisterBuiltinFunc(ast.RegexReplace.Name, builtinRegexReplace) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/regex_template.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/regex_template.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go new file mode 100644 index 000000000..170e6e640 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/resolver.go @@ -0,0 +1,107 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/metrics" + "github.com/open-policy-agent/opa/v1/resolver" +) + +type resolverTrie struct { + r resolver.Resolver + children map[ast.Value]*resolverTrie +} + +func newResolverTrie() *resolverTrie { + return &resolverTrie{children: map[ast.Value]*resolverTrie{}} +} + +func (t *resolverTrie) Put(ref ast.Ref, r resolver.Resolver) { + node := t + for _, t := range ref { + child, ok := node.children[t.Value] + if !ok { + child = &resolverTrie{children: map[ast.Value]*resolverTrie{}} + node.children[t.Value] = child + } + node = child + } + node.r = r +} + +func (t *resolverTrie) Resolve(e *eval, ref ast.Ref) (ast.Value, error) { + e.metrics.Timer(metrics.RegoExternalResolve).Start() + defer e.metrics.Timer(metrics.RegoExternalResolve).Stop() + node := t + for i, t := range ref { + child, ok := node.children[t.Value] + if !ok { + return nil, nil + } + node = child + if node.r != nil { + in := resolver.Input{ + Ref: ref[:i+1], + Input: e.input, + Metrics: e.metrics, + } + e.traceWasm(e.query[e.index], &in.Ref) + if e.data != nil { + return nil, errInScopeWithStmt + } + result, err := node.r.Eval(e.ctx, in) + if err != nil { + return nil, err + } + if result.Value == nil { + return nil, nil + } + val, err := result.Value.Find(ref[i+1:]) + if err != nil { + return nil, nil + } + return val, nil + } + } + return node.mktree(e, resolver.Input{ + Ref: ref, + Input: e.input, + Metrics: e.metrics, + }) +} + +func (t *resolverTrie) mktree(e *eval, in resolver.Input) (ast.Value, error) { + if t.r != nil { + e.traceWasm(e.query[e.index], &in.Ref) + if e.data != nil { + return nil, errInScopeWithStmt + } + result, err := t.r.Eval(e.ctx, in) + if err != nil { + return nil, err + } + if result.Value == nil { + return nil, nil + } + return result.Value, nil + } + obj := ast.NewObject() + for k, child := range t.children { + v, err := child.mktree(e, resolver.Input{Ref: append(in.Ref, ast.NewTerm(k)), Input: in.Input, Metrics: in.Metrics}) + if err != nil { + return nil, err + } + if v != nil { + obj.Insert(ast.NewTerm(k), ast.NewTerm(v)) + } + } + return obj, nil +} + +var errInScopeWithStmt = &Error{ + Code: InternalErr, + Message: "wasm cannot be executed when 'with' statements are in-scope", +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/runtime.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go similarity index 88% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/runtime.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go index 7d512f7c0..932322583 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/runtime.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/runtime.go @@ -7,17 +7,21 @@ package topdown import ( "fmt" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) +var configStringTerm = ast.StringTerm("config") + +var nothingResolver ast.Resolver = illegalResolver{} + func builtinOPARuntime(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { if bctx.Runtime == nil { - return iter(ast.ObjectTerm()) + return iter(ast.InternedEmptyObject) } - if bctx.Runtime.Get(ast.StringTerm("config")) != nil { - iface, err := ast.ValueToInterface(bctx.Runtime.Value, illegalResolver{}) + if bctx.Runtime.Get(configStringTerm) != nil { + iface, err := ast.ValueToInterface(bctx.Runtime.Value, nothingResolver) if err != nil { return err } diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/save.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go similarity index 96% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/save.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go index 0468692cc..439f554a3 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/save.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/save.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/v1/ast" ) // saveSet contains a stack of terms that are considered 'unknown' during @@ -365,7 +365,13 @@ func saveRequired(c *ast.Compiler, ic *inliningControl, icIgnoreInternal bool, s } switch node := node.(type) { case *ast.Expr: - found = len(node.With) > 0 || ignoreExprDuringPartial(node) + found = len(node.With) > 0 + if found { + return found + } + if !ic.nondeterministicBuiltins { // skip evaluating non-det builtins for PE + found = ignoreExprDuringPartial(node) + } case *ast.Term: switch v := node.Value.(type) { case ast.Var: @@ -422,8 +428,9 @@ func ignoreDuringPartial(bi *ast.Builtin) bool { } type inliningControl struct { - shallow bool - disable []disableInliningFrame + shallow bool + disable []disableInliningFrame + nondeterministicBuiltins bool // evaluate non-det builtins during PE (if args are known) } type disableInliningFrame struct { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go new file mode 100644 index 000000000..0e7daaeae --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/semver.go @@ -0,0 +1,59 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + + "github.com/open-policy-agent/opa/internal/semver" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +func builtinSemVerCompare(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + versionStringA, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + versionStringB, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + versionA, err := semver.NewVersion(string(versionStringA)) + if err != nil { + return fmt.Errorf("operand 1: string %s is not a valid SemVer", versionStringA) + } + versionB, err := semver.NewVersion(string(versionStringB)) + if err != nil { + return fmt.Errorf("operand 2: string %s is not a valid SemVer", versionStringB) + } + + result := versionA.Compare(*versionB) + + return iter(ast.InternedIntNumberTerm(result)) +} + +func builtinSemVerIsValid(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + versionString, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return iter(ast.InternedBooleanTerm(false)) + } + + result := true + + _, err = semver.NewVersion(string(versionString)) + if err != nil { + result = false + } + + return iter(ast.InternedBooleanTerm(result)) +} + +func init() { + RegisterBuiltinFunc(ast.SemVerCompare.Name, builtinSemVerCompare) + RegisterBuiltinFunc(ast.SemVerIsValid.Name, builtinSemVerIsValid) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/sets.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go similarity index 95% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/sets.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go index a973404f3..b7566b8e6 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/sets.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/sets.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) // Deprecated in v0.4.2 in favour of minus/infix "-" operation. diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go new file mode 100644 index 000000000..929a18ea0 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/strings.go @@ -0,0 +1,723 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + "math/big" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/tchap/go-patricia/v2/patricia" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/util" +) + +func builtinAnyPrefixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + a, b := operands[0].Value, operands[1].Value + + var strs []string + switch a := a.(type) { + case ast.String: + strs = []string{string(a)} + case *ast.Array, ast.Set: + var err error + strs, err = builtins.StringSliceOperand(a, 1) + if err != nil { + return err + } + default: + return builtins.NewOperandTypeErr(1, a, "string", "set", "array") + } + + var prefixes []string + switch b := b.(type) { + case ast.String: + prefixes = []string{string(b)} + case *ast.Array, ast.Set: + var err error + prefixes, err = builtins.StringSliceOperand(b, 2) + if err != nil { + return err + } + default: + return builtins.NewOperandTypeErr(2, b, "string", "set", "array") + } + + return iter(ast.InternedBooleanTerm(anyStartsWithAny(strs, prefixes))) +} + +func builtinAnySuffixMatch(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + a, b := operands[0].Value, operands[1].Value + + var strsReversed []string + switch a := a.(type) { + case ast.String: + strsReversed = []string{reverseString(string(a))} + case *ast.Array, ast.Set: + strs, err := builtins.StringSliceOperand(a, 1) + if err != nil { + return err + } + strsReversed = make([]string, len(strs)) + for i := range strs { + strsReversed[i] = reverseString(strs[i]) + } + default: + return builtins.NewOperandTypeErr(1, a, "string", "set", "array") + } + + var suffixesReversed []string + switch b := b.(type) { + case ast.String: + suffixesReversed = []string{reverseString(string(b))} + case *ast.Array, ast.Set: + suffixes, err := builtins.StringSliceOperand(b, 2) + if err != nil { + return err + } + suffixesReversed = make([]string, len(suffixes)) + for i := range suffixes { + suffixesReversed[i] = reverseString(suffixes[i]) + } + default: + return builtins.NewOperandTypeErr(2, b, "string", "set", "array") + } + + return iter(ast.InternedBooleanTerm(anyStartsWithAny(strsReversed, suffixesReversed))) +} + +func anyStartsWithAny(strs []string, prefixes []string) bool { + if len(strs) == 0 || len(prefixes) == 0 { + return false + } + if len(strs) == 1 && len(prefixes) == 1 { + return strings.HasPrefix(strs[0], prefixes[0]) + } + + trie := patricia.NewTrie() + for i := 0; i < len(strs); i++ { + trie.Insert([]byte(strs[i]), true) + } + + for i := 0; i < len(prefixes); i++ { + if trie.MatchSubtree([]byte(prefixes[i])) { + return true + } + } + + return false +} + +func builtinFormatInt(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + input, err := builtins.NumberOperand(operands[0].Value, 1) + if err != nil { + return err + } + + base, err := builtins.NumberOperand(operands[1].Value, 2) + if err != nil { + return err + } + + var format string + switch base { + case ast.Number("2"): + format = "%b" + case ast.Number("8"): + format = "%o" + case ast.Number("10"): + format = "%d" + case ast.Number("16"): + format = "%x" + default: + return builtins.NewOperandEnumErr(2, "2", "8", "10", "16") + } + + f := builtins.NumberToFloat(input) + i, _ := f.Int(nil) + + return iter(ast.StringTerm(fmt.Sprintf(format, i))) +} + +func builtinConcat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + join, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + var strs []string + + switch b := operands[1].Value.(type) { + case *ast.Array: + var l int + for i := 0; i < b.Len(); i++ { + s, ok := b.Elem(i).Value.(ast.String) + if !ok { + return builtins.NewOperandElementErr(2, operands[1].Value, b.Elem(i).Value, "string") + } + l += len(string(s)) + } + + if b.Len() == 1 { + return iter(b.Elem(0)) + } + + strs = make([]string, 0, l) + for i := 0; i < b.Len(); i++ { + strs = append(strs, string(b.Elem(i).Value.(ast.String))) + } + + case ast.Set: + var l int + terms := b.Slice() + for i := 0; i < len(terms); i++ { + s, ok := terms[i].Value.(ast.String) + if !ok { + return builtins.NewOperandElementErr(2, operands[1].Value, terms[i].Value, "string") + } + l += len(string(s)) + } + + if b.Len() == 1 { + return iter(b.Slice()[0]) + } + + strs = make([]string, 0, l) + for i := 0; i < b.Len(); i++ { + strs = append(strs, string(terms[i].Value.(ast.String))) + } + + default: + return builtins.NewOperandTypeErr(2, operands[1].Value, "set", "array") + } + + return iter(ast.StringTerm(strings.Join(strs, string(join)))) +} + +func runesEqual(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +func builtinIndexOf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + search, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + if len(string(search)) == 0 { + return fmt.Errorf("empty search character") + } + + if isASCII(string(base)) && isASCII(string(search)) { + return iter(ast.InternedIntNumberTerm(strings.Index(string(base), string(search)))) + } + + baseRunes := []rune(string(base)) + searchRunes := []rune(string(search)) + searchLen := len(searchRunes) + + for i, r := range baseRunes { + if len(baseRunes) >= i+searchLen { + if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { + return iter(ast.InternedIntNumberTerm(i)) + } + } else { + break + } + } + + return iter(ast.InternedIntNumberTerm(-1)) +} + +func builtinIndexOfN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + search, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + if len(string(search)) == 0 { + return fmt.Errorf("empty search character") + } + + baseRunes := []rune(string(base)) + searchRunes := []rune(string(search)) + searchLen := len(searchRunes) + + var arr []*ast.Term + for i, r := range baseRunes { + if len(baseRunes) >= i+searchLen { + if r == searchRunes[0] && runesEqual(baseRunes[i:i+searchLen], searchRunes) { + arr = append(arr, ast.InternedIntNumberTerm(i)) + } + } else { + break + } + } + + return iter(ast.ArrayTerm(arr...)) +} + +func builtinSubstring(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + base, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + startIndex, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + length, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + if startIndex < 0 { + return fmt.Errorf("negative offset") + } + + sbase := string(base) + if sbase == "" { + return iter(ast.InternedEmptyString) + } + + // Optimized path for the likely common case of ASCII strings. + // This allocates less memory and runs in about 1/3 the time. + if isASCII(sbase) { + if startIndex >= len(sbase) { + return iter(ast.InternedEmptyString) + } + + if length < 0 { + return iter(ast.StringTerm(sbase[startIndex:])) + } + + upto := startIndex + length + if len(sbase) < upto { + upto = len(sbase) + } + return iter(ast.StringTerm(sbase[startIndex:upto])) + } + + runes := []rune(base) + + if startIndex >= len(runes) { + return iter(ast.InternedEmptyString) + } + + var s string + if length < 0 { + s = string(runes[startIndex:]) + } else { + upto := startIndex + length + if len(runes) < upto { + upto = len(runes) + } + s = string(runes[startIndex:upto]) + } + + return iter(ast.StringTerm(s)) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} + +func builtinContains(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + substr, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedBooleanTerm(strings.Contains(string(s), string(substr)))) +} + +func builtinStringCount(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + substr, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + baseTerm := string(s) + searchTerm := string(substr) + count := strings.Count(baseTerm, searchTerm) + + return iter(ast.InternedIntNumberTerm(count)) +} + +func builtinStartsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + prefix, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedBooleanTerm(strings.HasPrefix(string(s), string(prefix)))) +} + +func builtinEndsWith(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + suffix, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + return iter(ast.InternedBooleanTerm(strings.HasSuffix(string(s), string(suffix)))) +} + +func builtinLower(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + return iter(ast.StringTerm(strings.ToLower(string(s)))) +} + +func builtinUpper(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + return iter(ast.StringTerm(strings.ToUpper(string(s)))) +} + +func builtinSplit(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + d, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + if !strings.Contains(string(s), string(d)) { + return iter(ast.ArrayTerm(operands[0])) + } + + elems := strings.Split(string(s), string(d)) + arr := util.NewPtrSlice[ast.Term](len(elems)) + for i := range elems { + arr[i].Value = ast.String(elems[i]) + } + + return iter(ast.ArrayTerm(arr...)) +} + +func builtinReplace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + old, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + n, err := builtins.StringOperand(operands[2].Value, 3) + if err != nil { + return err + } + + replaced := strings.Replace(string(s), string(old), string(n), -1) + if replaced == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(replaced)) +} + +func builtinReplaceN(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + patterns, err := builtins.ObjectOperand(operands[0].Value, 1) + if err != nil { + return err + } + keys := patterns.Keys() + sort.Slice(keys, func(i, j int) bool { return ast.Compare(keys[i].Value, keys[j].Value) < 0 }) + + s, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + oldnewArr := make([]string, 0, len(keys)*2) + for _, k := range keys { + keyVal, ok := k.Value.(ast.String) + if !ok { + return builtins.NewOperandErr(1, "non-string key found in pattern object") + } + val := patterns.Get(k) // cannot be nil + strVal, ok := val.Value.(ast.String) + if !ok { + return builtins.NewOperandErr(1, "non-string value found in pattern object") + } + oldnewArr = append(oldnewArr, string(keyVal), string(strVal)) + } + + return iter(ast.StringTerm(strings.NewReplacer(oldnewArr...).Replace(string(s)))) +} + +func builtinTrim(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.Trim(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(strings.Trim(string(s), string(c)))) +} + +func builtinTrimLeft(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimLeft(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) +} + +func builtinTrimPrefix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + pre, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimPrefix(string(s), string(pre)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) +} + +func builtinTrimRight(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + c, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimRight(string(s), string(c)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) +} + +func builtinTrimSuffix(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + suf, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + trimmed := strings.TrimSuffix(string(s), string(suf)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) +} + +func builtinTrimSpace(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + trimmed := strings.TrimSpace(string(s)) + if trimmed == string(s) { + return iter(operands[0]) + } + + return iter(ast.StringTerm(trimmed)) +} + +func builtinSprintf(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + astArr, ok := operands[1].Value.(*ast.Array) + if !ok { + return builtins.NewOperandTypeErr(2, operands[1].Value, "array") + } + + // Optimized path for where sprintf is used as a "to_string" function for + // a single integer, i.e. sprintf("%d", [x]) where x is an integer. + if s == "%d" && astArr.Len() == 1 { + if n, ok := astArr.Elem(0).Value.(ast.Number); ok { + if i, ok := n.Int(); ok { + return iter(ast.StringTerm(strconv.Itoa(i))) + } + } + } + + args := make([]any, astArr.Len()) + + for i := range args { + switch v := astArr.Elem(i).Value.(type) { + case ast.Number: + if n, ok := v.Int(); ok { + args[i] = n + } else if b, ok := new(big.Int).SetString(v.String(), 10); ok { + args[i] = b + } else if f, ok := v.Float64(); ok { + args[i] = f + } else { + args[i] = v.String() + } + case ast.String: + args[i] = string(v) + default: + args[i] = astArr.Elem(i).String() + } + } + + return iter(ast.StringTerm(fmt.Sprintf(string(s), args...))) +} + +func builtinReverse(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + s, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + return iter(ast.StringTerm(reverseString(string(s)))) +} + +func reverseString(str string) string { + var buf []byte + var arr [255]byte + size := len(str) + + if size < 255 { + buf = arr[:size:size] + } else { + buf = make([]byte, size) + } + + for start := 0; start < size; { + r, n := utf8.DecodeRuneInString(str[start:]) + start += n + utf8.EncodeRune(buf[size-start:], r) + } + + return string(buf) +} + +func init() { + RegisterBuiltinFunc(ast.FormatInt.Name, builtinFormatInt) + RegisterBuiltinFunc(ast.Concat.Name, builtinConcat) + RegisterBuiltinFunc(ast.IndexOf.Name, builtinIndexOf) + RegisterBuiltinFunc(ast.IndexOfN.Name, builtinIndexOfN) + RegisterBuiltinFunc(ast.Substring.Name, builtinSubstring) + RegisterBuiltinFunc(ast.Contains.Name, builtinContains) + RegisterBuiltinFunc(ast.StringCount.Name, builtinStringCount) + RegisterBuiltinFunc(ast.StartsWith.Name, builtinStartsWith) + RegisterBuiltinFunc(ast.EndsWith.Name, builtinEndsWith) + RegisterBuiltinFunc(ast.Upper.Name, builtinUpper) + RegisterBuiltinFunc(ast.Lower.Name, builtinLower) + RegisterBuiltinFunc(ast.Split.Name, builtinSplit) + RegisterBuiltinFunc(ast.Replace.Name, builtinReplace) + RegisterBuiltinFunc(ast.ReplaceN.Name, builtinReplaceN) + RegisterBuiltinFunc(ast.Trim.Name, builtinTrim) + RegisterBuiltinFunc(ast.TrimLeft.Name, builtinTrimLeft) + RegisterBuiltinFunc(ast.TrimPrefix.Name, builtinTrimPrefix) + RegisterBuiltinFunc(ast.TrimRight.Name, builtinTrimRight) + RegisterBuiltinFunc(ast.TrimSuffix.Name, builtinTrimSuffix) + RegisterBuiltinFunc(ast.TrimSpace.Name, builtinTrimSpace) + RegisterBuiltinFunc(ast.Sprintf.Name, builtinSprintf) + RegisterBuiltinFunc(ast.AnyPrefixMatch.Name, builtinAnyPrefixMatch) + RegisterBuiltinFunc(ast.AnySuffixMatch.Name, builtinAnySuffixMatch) + RegisterBuiltinFunc(ast.StringReverse.Name, builtinReverse) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/subset.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go similarity index 82% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/subset.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go index 7b152a5ef..29354d973 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/subset.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/subset.go @@ -5,8 +5,8 @@ package topdown import ( - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func bothObjects(t1, t2 *ast.Term) (bool, ast.Object, ast.Object) { @@ -88,9 +88,8 @@ func arraySet(t1, t2 *ast.Term) (bool, *ast.Array, ast.Set) { // associated with a key. func objectSubset(super ast.Object, sub ast.Object) bool { var superTerm *ast.Term - isSubset := true - sub.Until(func(key, subTerm *ast.Term) bool { + notSubset := sub.Until(func(key, subTerm *ast.Term) bool { // This really wants to be a for loop, hence the somewhat // weird internal structure. However, using Until() in this // was is a performance optimization, as it avoids performing @@ -98,10 +97,9 @@ func objectSubset(super ast.Object, sub ast.Object) bool { superTerm = super.Get(key) - // subTerm is can't be nil because we got it from Until(), so + // subTerm can't be nil because we got it from Until(), so // we only need to verify that super is non-nil. if superTerm == nil { - isSubset = false return true // break, not a subset } @@ -114,58 +112,39 @@ func objectSubset(super ast.Object, sub ast.Object) bool { // them normally. If only one term is an object, then we // do a normal comparison which will come up false. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { - if !objectSubset(superObj, subObj) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !objectSubset(superObj, subObj) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { - if !setSubset(superSet, subSet) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !setSubset(superSet, subSet) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { - if !arraySubset(superArray, subArray) { - isSubset = false - return true // break, not a subset - } - - return false // continue + return !arraySubset(superArray, subArray) } // We have already checked for exact equality, as well as for // all of the types of nested subsets we care about, so if we // get here it means this isn't a subset. - isSubset = false return true // break, not a subset }) - return isSubset + return !notSubset } // setSubset implements the subset operation on sets. // // Unlike in the object case, this is not recursive, we just compare values -// using ast.Set.Contains() because we have no well defined way to "match up" +// using ast.Set.Contains() because we have no well-defined way to "match up" // objects that are in different sets. func setSubset(super ast.Set, sub ast.Set) bool { - isSubset := true - sub.Until(func(t *ast.Term) bool { - if !super.Contains(t) { - isSubset = false - return true + for _, elem := range sub.Slice() { + if !super.Contains(elem) { + return false } - return false - }) + } - return isSubset + return true } // arraySubset implements the subset operation on arrays. @@ -197,12 +176,12 @@ func arraySubset(super, sub *ast.Array) bool { return false } - subElem := sub.Elem(subCursor) superElem := super.Elem(superCursor + subCursor) if superElem == nil { return false } + subElem := sub.Elem(subCursor) if superElem.Value.Compare(subElem.Value) == 0 { subCursor++ } else { @@ -237,22 +216,22 @@ func builtinObjectSubset(_ BuiltinContext, operands []*ast.Term, iter func(*ast. if ok, superObj, subObj := bothObjects(superTerm, subTerm); ok { // Both operands are objects. - return iter(ast.BooleanTerm(objectSubset(superObj, subObj))) + return iter(ast.InternedBooleanTerm(objectSubset(superObj, subObj))) } if ok, superSet, subSet := bothSets(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(setSubset(superSet, subSet))) + return iter(ast.InternedBooleanTerm(setSubset(superSet, subSet))) } if ok, superArray, subArray := bothArrays(superTerm, subTerm); ok { // Both operands are sets. - return iter(ast.BooleanTerm(arraySubset(superArray, subArray))) + return iter(ast.InternedBooleanTerm(arraySubset(superArray, subArray))) } if ok, superArray, subSet := arraySet(superTerm, subTerm); ok { // Super operand is array and sub operand is set - return iter(ast.BooleanTerm(arraySetSubset(superArray, subSet))) + return iter(ast.InternedBooleanTerm(arraySetSubset(superArray, subSet))) } return builtins.ErrOperand("both arguments object.subset must be of the same type or array and set") diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/template.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go similarity index 90% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/template.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go index cf42477ee..cf4635559 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/template.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/template.go @@ -4,8 +4,8 @@ import ( "bytes" "text/template" - "github.com/open-policy-agent/opa/ast" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" ) func renderTemplate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go new file mode 100644 index 000000000..1c5ddaa6f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/time.go @@ -0,0 +1,341 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "encoding/json" + "fmt" + "math" + "math/big" + "strconv" + "sync" + "time" + _ "time/tzdata" // this is needed to have LoadLocation when no filesystem tzdata is available + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +var tzCache map[string]*time.Location +var tzCacheMutex *sync.Mutex + +// 1677-09-21T00:12:43.145224192-00:00 +var minDateAllowedForNsConversion = time.Unix(0, math.MinInt64) + +// 2262-04-11T23:47:16.854775807-00:00 +var maxDateAllowedForNsConversion = time.Unix(0, math.MaxInt64) + +func toSafeUnixNano(t time.Time, iter func(*ast.Term) error) error { + if t.Before(minDateAllowedForNsConversion) || t.After(maxDateAllowedForNsConversion) { + return fmt.Errorf("time outside of valid range") + } + + return iter(ast.NewTerm(ast.Number(int64ToJSONNumber(t.UnixNano())))) +} + +func builtinTimeNowNanos(bctx BuiltinContext, _ []*ast.Term, iter func(*ast.Term) error) error { + return iter(bctx.Time) +} + +func builtinTimeParseNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + format, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + value, err := builtins.StringOperand(operands[1].Value, 2) + if err != nil { + return err + } + + formatStr := string(format) + // look for the formatStr in our acceptedTimeFormats and + // use the constant instead if it matches + if f, ok := acceptedTimeFormats[formatStr]; ok { + formatStr = f + } + result, err := time.Parse(formatStr, string(value)) + if err != nil { + return err + } + + return toSafeUnixNano(result, iter) +} + +func builtinTimeParseRFC3339Nanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + value, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + result, err := time.Parse(time.RFC3339, string(value)) + if err != nil { + return err + } + + return toSafeUnixNano(result, iter) +} +func builtinParseDurationNanos(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + duration, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + value, err := time.ParseDuration(string(duration)) + if err != nil { + return err + } + return iter(ast.NumberTerm(int64ToJSONNumber(int64(value)))) +} + +// Represent exposed constants for formatting from the stdlib time pkg +var acceptedTimeFormats = map[string]string{ + "ANSIC": time.ANSIC, + "UnixDate": time.UnixDate, + "RubyDate": time.RubyDate, + "RFC822": time.RFC822, + "RFC822Z": time.RFC822Z, + "RFC850": time.RFC850, + "RFC1123": time.RFC1123, + "RFC1123Z": time.RFC1123Z, + "RFC3339": time.RFC3339, + "RFC3339Nano": time.RFC3339Nano, +} + +func builtinFormat(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, layout, err := tzTime(operands[0].Value) + if err != nil { + return err + } + // Using RFC3339Nano time formatting as default + if layout == "" { + layout = time.RFC3339Nano + } else if layoutStr, ok := acceptedTimeFormats[layout]; ok { + // if we can find a constant specified, use the constant + layout = layoutStr + } + // otherwise try to treat the fmt string as a datetime fmt string + + timestamp := t.Format(layout) + return iter(ast.StringTerm(timestamp)) +} + +func builtinDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + year, month, day := t.Date() + result := ast.NewArray(ast.InternedIntNumberTerm(year), ast.InternedIntNumberTerm(int(month)), ast.InternedIntNumberTerm(day)) + return iter(ast.NewTerm(result)) +} + +func builtinClock(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + hour, minute, second := t.Clock() + result := ast.NewArray(ast.InternedIntNumberTerm(hour), ast.InternedIntNumberTerm(minute), ast.InternedIntNumberTerm(second)) + return iter(ast.NewTerm(result)) +} + +func builtinWeekday(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + weekday := t.Weekday().String() + return iter(ast.StringTerm(weekday)) +} + +func builtinAddDate(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + + years, err := builtins.IntOperand(operands[1].Value, 2) + if err != nil { + return err + } + + months, err := builtins.IntOperand(operands[2].Value, 3) + if err != nil { + return err + } + + days, err := builtins.IntOperand(operands[3].Value, 4) + if err != nil { + return err + } + + result := t.AddDate(years, months, days) + + return toSafeUnixNano(result, iter) +} + +func builtinDiff(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + t1, _, err := tzTime(operands[0].Value) + if err != nil { + return err + } + t2, _, err := tzTime(operands[1].Value) + if err != nil { + return err + } + + // The following implementation of this function is taken + // from https://github.com/icza/gox licensed under Apache 2.0. + // The only modification made is to variable names. + // + // For details, see https://stackoverflow.com/a/36531443/1705598 + // + // Copyright 2021 icza + // BEGIN REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT + if t1.Location() != t2.Location() { + t2 = t2.In(t1.Location()) + } + if t1.After(t2) { + t1, t2 = t2, t1 + } + y1, M1, d1 := t1.Date() + y2, M2, d2 := t2.Date() + + h1, m1, s1 := t1.Clock() + h2, m2, s2 := t2.Clock() + + year := y2 - y1 + month := int(M2 - M1) + day := d2 - d1 + hour := h2 - h1 + min := m2 - m1 + sec := s2 - s1 + + // Normalize negative values + if sec < 0 { + sec += 60 + min-- + } + if min < 0 { + min += 60 + hour-- + } + if hour < 0 { + hour += 24 + day-- + } + if day < 0 { + // Days in month: + t := time.Date(y1, M1, 32, 0, 0, 0, 0, time.UTC) + day += 32 - t.Day() + month-- + } + if month < 0 { + month += 12 + year-- + } + // END REDISTRIBUTION FROM APACHE 2.0 LICENSED PROJECT + + return iter(ast.ArrayTerm(ast.InternedIntNumberTerm(year), ast.InternedIntNumberTerm(month), ast.InternedIntNumberTerm(day), + ast.InternedIntNumberTerm(hour), ast.InternedIntNumberTerm(min), ast.InternedIntNumberTerm(sec))) +} + +func tzTime(a ast.Value) (t time.Time, lay string, err error) { + var nVal ast.Value + loc := time.UTC + layout := "" + switch va := a.(type) { + case *ast.Array: + if va.Len() == 0 { + return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") + } + + nVal, err = builtins.NumberOperand(va.Elem(0).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + + if va.Len() > 1 { + tzVal, err := builtins.StringOperand(va.Elem(1).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + + tzName := string(tzVal) + + switch tzName { + case "", "UTC": + // loc is already UTC + + case "Local": + loc = time.Local + + default: + var ok bool + + tzCacheMutex.Lock() + loc, ok = tzCache[tzName] + + if !ok { + loc, err = time.LoadLocation(tzName) + if err != nil { + tzCacheMutex.Unlock() + return time.Time{}, layout, err + } + tzCache[tzName] = loc + } + tzCacheMutex.Unlock() + } + } + + if va.Len() > 2 { + lay, err := builtins.StringOperand(va.Elem(2).Value, 1) + if err != nil { + return time.Time{}, layout, err + } + layout = string(lay) + } + + case ast.Number: + nVal = a + + default: + return time.Time{}, layout, builtins.NewOperandTypeErr(1, a, "either number (ns) or [number (ns), string (tz)]") + } + + value, err := builtins.NumberOperand(nVal, 1) + if err != nil { + return time.Time{}, layout, err + } + + f := builtins.NumberToFloat(value) + i64, acc := f.Int64() + if acc != big.Exact { + return time.Time{}, layout, fmt.Errorf("timestamp too big") + } + + t = time.Unix(0, i64).In(loc) + + return t, layout, nil +} + +func int64ToJSONNumber(i int64) json.Number { + return json.Number(strconv.FormatInt(i, 10)) +} + +func init() { + RegisterBuiltinFunc(ast.NowNanos.Name, builtinTimeNowNanos) + RegisterBuiltinFunc(ast.ParseRFC3339Nanos.Name, builtinTimeParseRFC3339Nanos) + RegisterBuiltinFunc(ast.ParseNanos.Name, builtinTimeParseNanos) + RegisterBuiltinFunc(ast.ParseDurationNanos.Name, builtinParseDurationNanos) + RegisterBuiltinFunc(ast.Format.Name, builtinFormat) + RegisterBuiltinFunc(ast.Date.Name, builtinDate) + RegisterBuiltinFunc(ast.Clock.Name, builtinClock) + RegisterBuiltinFunc(ast.Weekday.Name, builtinWeekday) + RegisterBuiltinFunc(ast.AddDate.Name, builtinAddDate) + RegisterBuiltinFunc(ast.Diff.Name, builtinDiff) + tzCacheMutex = &sync.Mutex{} + tzCache = make(map[string]*time.Location) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/topdown/tokens.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go similarity index 77% rename from constraint/vendor/github.com/open-policy-agent/opa/topdown/tokens.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go index 7457f1f15..b44c5a253 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/topdown/tokens.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/tokens.go @@ -21,11 +21,12 @@ import ( "math/big" "strings" - "github.com/open-policy-agent/opa/ast" "github.com/open-policy-agent/opa/internal/jwx/jwa" "github.com/open-policy-agent/opa/internal/jwx/jwk" "github.com/open-policy-agent/opa/internal/jwx/jws" - "github.com/open-policy-agent/opa/topdown/builtins" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" + "github.com/open-policy-agent/opa/v1/topdown/cache" ) var ( @@ -129,8 +130,8 @@ func builtinJWTDecode(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Ter } // Implements RS256 JWT signature verification -func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA256, @@ -144,8 +145,8 @@ func builtinJWTVerifyRS256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RS384 JWT signature verification -func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA384, @@ -159,8 +160,8 @@ func builtinJWTVerifyRS384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RS512 JWT signature verification -func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyRS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPKCS1v15( publicKey, crypto.SHA512, @@ -174,8 +175,8 @@ func builtinJWTVerifyRS512(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS256 JWT signature verification -func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha256.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA256, @@ -190,8 +191,8 @@ func builtinJWTVerifyPS256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS384 JWT signature verification -func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New384, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA384, @@ -206,8 +207,8 @@ func builtinJWTVerifyPS384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements PS512 JWT signature verification -func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerifyRSA(operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { +func builtinJWTVerifyPS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerifyRSA(bctx, operands[0].Value, operands[1].Value, sha512.New, func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error { return rsa.VerifyPSS( publicKey, crypto.SHA512, @@ -222,8 +223,8 @@ func builtinJWTVerifyPS512(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements RSA JWT signature verification. -func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) { - return builtinJWTVerify(a, b, hasher, func(publicKey interface{}, digest []byte, signature []byte) error { +func builtinJWTVerifyRSA(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey *rsa.PublicKey, digest []byte, signature []byte) error) (ast.Value, error) { + return builtinJWTVerify(bctx, jwt, keyStr, hasher, func(publicKey interface{}, digest []byte, signature []byte) error { publicKeyRsa, ok := publicKey.(*rsa.PublicKey) if !ok { return fmt.Errorf("incorrect public key type") @@ -233,8 +234,8 @@ func builtinJWTVerifyRSA(a ast.Value, b ast.Value, hasher func() hash.Hash, veri } // Implements ES256 JWT signature verification. -func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha256.New, verifyES) +func builtinJWTVerifyES256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha256.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -242,8 +243,8 @@ func builtinJWTVerifyES256(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements ES384 JWT signature verification -func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New384, verifyES) +func builtinJWTVerifyES384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New384, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -251,8 +252,8 @@ func builtinJWTVerifyES384(_ BuiltinContext, operands []*ast.Term, iter func(*as } // Implements ES512 JWT signature verification -func builtinJWTVerifyES512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - result, err := builtinJWTVerify(operands[0].Value, operands[1].Value, sha512.New, verifyES) +func builtinJWTVerifyES512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + result, err := builtinJWTVerify(bctx, operands[0].Value, operands[1].Value, sha512.New, verifyES) if err == nil { return iter(ast.NewTerm(result)) } @@ -345,13 +346,17 @@ func getKeyByKid(kid string, keys []verificationKey) *verificationKey { } // Implements JWT signature verification. -func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) { - token, err := decodeJWT(a) +func builtinJWTVerify(bctx BuiltinContext, jwt ast.Value, keyStr ast.Value, hasher func() hash.Hash, verify func(publicKey interface{}, digest []byte, signature []byte) error) (ast.Value, error) { + if found, _, _, valid := getTokenFromCache(bctx, jwt, keyStr); found { + return ast.Boolean(valid), nil + } + + token, err := decodeJWT(jwt) if err != nil { return nil, err } - s, err := builtins.StringOperand(b, 2) + s, err := builtins.StringOperand(keyStr, 2) if err != nil { return nil, err } @@ -375,6 +380,11 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify return nil, err } + done := func(valid bool) (ast.Boolean, error) { + putTokenInCache(bctx, jwt, keyStr, nil, nil, valid) + return ast.Boolean(valid), nil + } + // Validate the JWT signature // First, check if there's a matching key ID (`kid`) in both token header and key(s). @@ -383,7 +393,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify if key := getKeyByKid(header.kid, keys); key != nil { err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) - return ast.Boolean(err == nil), nil + return done(err == nil) } } @@ -395,7 +405,7 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify // we'll need to verify to find out err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) if err == nil { - return ast.Boolean(true), nil + return done(true) } } else { if header.alg != key.alg { @@ -403,48 +413,32 @@ func builtinJWTVerify(a ast.Value, b ast.Value, hasher func() hash.Hash, verify } err = verify(key.key, getInputSHA([]byte(token.header+"."+token.payload), hasher), []byte(signature)) if err == nil { - return ast.Boolean(true), nil + return done(true) } } } // None of the keys worked, return false - return ast.Boolean(false), nil + return done(false) } // Implements HS256 (secret) JWT signature verification -func builtinJWTVerifyHS256(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) - if err != nil { - return err - } - - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } - secret := string(astSecret) - - mac := hmac.New(sha256.New, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } +func builtinJWTVerifyHS256(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha256.New, iter) +} - signature, err := token.decodeSignature() - if err != nil { - return err - } +// Implements HS384 JWT signature verification +func builtinJWTVerifyHS384(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New384, iter) +} - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) +// Implements HS512 JWT signature verification +func builtinJWTVerifyHS512(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + return builtinJWTVerifyHS(bctx, operands, sha512.New, iter) } -// Implements HS384 JWT signature verification -func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { - // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) +func builtinJWTVerifyHS(bctx BuiltinContext, operands []*ast.Term, hashF func() hash.Hash, iter func(*ast.Term) error) error { + jwt, err := builtins.StringOperand(operands[0].Value, 1) if err != nil { return err } @@ -454,38 +448,20 @@ func builtinJWTVerifyHS384(_ BuiltinContext, operands []*ast.Term, iter func(*as if err != nil { return err } - secret := string(astSecret) - - mac := hmac.New(sha512.New384, []byte(secret)) - _, err = mac.Write([]byte(token.header + "." + token.payload)) - if err != nil { - return err - } - signature, err := token.decodeSignature() - if err != nil { - return err + if found, _, _, valid := getTokenFromCache(bctx, jwt, astSecret); found { + return iter(ast.NewTerm(ast.Boolean(valid))) } - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) -} - -// Implements HS512 JWT signature verification -func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { // Decode the JSON Web Token - token, err := decodeJWT(operands[0].Value) + token, err := decodeJWT(jwt) if err != nil { return err } - // Process Secret input - astSecret, err := builtins.StringOperand(operands[1].Value, 2) - if err != nil { - return err - } secret := string(astSecret) - mac := hmac.New(sha512.New, []byte(secret)) + mac := hmac.New(hashF, []byte(secret)) _, err = mac.Write([]byte(token.header + "." + token.payload)) if err != nil { return err @@ -496,7 +472,11 @@ func builtinJWTVerifyHS512(_ BuiltinContext, operands []*ast.Term, iter func(*as return err } - return iter(ast.NewTerm(ast.Boolean(hmac.Equal([]byte(signature), mac.Sum(nil))))) + valid := hmac.Equal([]byte(signature), mac.Sum(nil)) + + putTokenInCache(bctx, jwt, astSecret, nil, nil, valid) + + return iter(ast.NewTerm(ast.Boolean(valid))) } // -- Full JWT verification and decoding -- @@ -1024,7 +1004,7 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func } unverified := ast.ArrayTerm( - ast.BooleanTerm(false), + ast.InternedBooleanTerm(false), ast.NewTerm(ast.NewObject()), ast.NewTerm(ast.NewObject()), ) @@ -1036,57 +1016,115 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func return err } var token *JSONWebToken - var p *ast.Term - for { - // RFC7519 7.2 #1-2 split into parts - if token, err = decodeJWT(a); err != nil { - return err - } - // RFC7519 7.2 #3, #4, #6 - if err := token.decodeHeader(); err != nil { - return err - } - // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields - header, err := parseTokenHeader(token) - if err != nil { - return err - } - if !header.valid() { + var payload ast.Object + var header ast.Object + + // FIXME: optimize + k, _ := b.Filter(ast.NewObject( + ast.Item(ast.StringTerm("secret"), ast.ObjectTerm()), + ast.Item(ast.StringTerm("cert"), ast.ObjectTerm()), + )) + + if found, th, tp, validSignature := getTokenFromCache(bctx, a, k); found { + if !validSignature { + // For the given token and key(s), the signature is invalid return iter(unverified) } - // Check constraints that impact signature verification. - if constraints.alg != "" && constraints.alg != header.alg { - return iter(unverified) - } - // RFC7159 7.2 #7 verify the signature - signature, err := token.decodeSignature() - if err != nil { - return err + + if th != nil && tp != nil { + header = th + payload = tp + } else { + // Cache entry was created by one of the other built-ins that doesn't decode header/payload + + if token, err = decodeJWT(a); err != nil { + return err + } + + header = token.decodedHeader + + p, err := getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + payload, err = extractJSONObject(string(p.Value.(ast.String))) + if err != nil { + return err + } + + putTokenInCache(bctx, a, k, header, payload, true) } - if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { - if err == errSignatureNotVerified { + } else { + var p *ast.Term + + for { + // RFC7519 7.2 #1-2 split into parts + if token, err = decodeJWT(a); err != nil { + return err + } + + // RFC7519 7.2 #3, #4, #6 + if err := token.decodeHeader(); err != nil { + return err + } + + // RFC7159 7.2 #5 (and RFC7159 5.2 #5) validate header fields + header, err := parseTokenHeader(token) + if err != nil { + return err + } + + if !header.valid() { return iter(unverified) } - return err + + // Check constraints that impact signature verification. + if constraints.alg != "" && constraints.alg != header.alg { + return iter(unverified) + } + + // RFC7159 7.2 #7 verify the signature + signature, err := token.decodeSignature() + if err != nil { + return err + } + + if err := constraints.verify(header.kid, header.alg, token.header, token.payload, signature); err != nil { + if err == errSignatureNotVerified { + putTokenInCache(bctx, a, k, nil, nil, false) + return iter(unverified) + } + return err + } + + // RFC7159 7.2 #9-10 decode the payload + p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + if err != nil { + return fmt.Errorf("JWT payload had invalid encoding: %v", err) + } + + // RFC7159 7.2 #8 and 5.2 cty + if strings.ToUpper(header.cty) == headerJwt { + // Nested JWT, go round again with payload as first argument + a = p.Value + continue + } + + // Non-nested JWT (or we've reached the bottom of the nesting). + break } - // RFC7159 7.2 #9-10 decode the payload - p, err = getResult(builtinBase64UrlDecode, ast.StringTerm(token.payload)) + + payload, err = extractJSONObject(string(p.Value.(ast.String))) if err != nil { - return fmt.Errorf("JWT payload had invalid encoding: %v", err) - } - // RFC7159 7.2 #8 and 5.2 cty - if strings.ToUpper(header.cty) == headerJwt { - // Nested JWT, go round again with payload as first argument - a = p.Value - continue + return err } - // Non-nested JWT (or we've reached the bottom of the nesting). - break - } - payload, err := extractJSONObject(string(p.Value.(ast.String))) - if err != nil { - return err + + header = token.decodedHeader + + putTokenInCache(bctx, a, k, header, payload, true) } + // Check registered claim names against constraints or environment // RFC7159 4.1.1 iss if constraints.iss != "" { @@ -1137,8 +1175,8 @@ func builtinJWTDecodeVerify(bctx BuiltinContext, operands []*ast.Term, iter func } verified := ast.ArrayTerm( - ast.BooleanTerm(true), - ast.NewTerm(token.decodedHeader), + ast.InternedBooleanTerm(true), + ast.NewTerm(header), ast.NewTerm(payload), ) return iter(verified) @@ -1226,7 +1264,63 @@ func getInputSHA(input []byte, h func() hash.Hash) []byte { return hasher.Sum(nil) } +type jwtCacheEntry struct { + payload ast.Object + header ast.Object + validSignature bool +} + +const tokenCacheName = "io_jwt" + +func getTokenFromCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value) (bool, ast.Object, ast.Object, bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return false, nil, nil, false + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return false, nil, nil, false + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + entry, ok := c.Get(key) + if !ok { + return false, nil, nil, false + } + + if jwtEntry, ok := entry.(jwtCacheEntry); ok { + return true, jwtEntry.header, jwtEntry.payload, jwtEntry.validSignature + } + + return false, nil, nil, false +} + +func putTokenInCache(bctx BuiltinContext, serializedJwt ast.Value, publicKey ast.Value, header ast.Object, payload ast.Object, validSignature bool) { + if bctx.InterQueryBuiltinValueCache == nil { + return + } + + c := bctx.InterQueryBuiltinValueCache.GetCache(tokenCacheName) + if c == nil { + return + } + + key := createTokenCacheKey(serializedJwt, publicKey) + + c.Insert(key, jwtCacheEntry{header: header, payload: payload, validSignature: validSignature}) +} + +func createTokenCacheKey(serializedJwt ast.Value, publicKey ast.Value) ast.Value { + // We need to create a key that is unique to the serialized JWT (for lookup) and the public key used to verify it, + // so that we don't get a misleading cached validation result for a different, invalid key. + return ast.NewArray(ast.NewTerm(serializedJwt), ast.NewTerm(publicKey)) +} + func init() { + // By default, the JWT cache is disabled. + cache.RegisterDefaultInterQueryBuiltinValueCacheConfig(tokenCacheName, nil) + RegisterBuiltinFunc(ast.JWTDecode.Name, builtinJWTDecode) RegisterBuiltinFunc(ast.JWTVerifyRS256.Name, builtinJWTVerifyRS256) RegisterBuiltinFunc(ast.JWTVerifyRS384.Name, builtinJWTVerifyRS384) diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go new file mode 100644 index 000000000..1c45ef23b --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/trace.go @@ -0,0 +1,902 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "bytes" + "fmt" + "io" + "slices" + "strings" + + iStrs "github.com/open-policy-agent/opa/internal/strings" + + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +const ( + minLocationWidth = 5 // len("query") + maxIdealLocationWidth = 64 + columnPadding = 4 + maxExprVarWidth = 32 + maxPrettyExprVarWidth = 64 +) + +// Op defines the types of tracing events. +type Op string + +const ( + // EnterOp is emitted when a new query is about to be evaluated. + EnterOp Op = "Enter" + + // ExitOp is emitted when a query has evaluated to true. + ExitOp Op = "Exit" + + // EvalOp is emitted when an expression is about to be evaluated. + EvalOp Op = "Eval" + + // RedoOp is emitted when an expression, rule, or query is being re-evaluated. + RedoOp Op = "Redo" + + // SaveOp is emitted when an expression is saved instead of evaluated + // during partial evaluation. + SaveOp Op = "Save" + + // FailOp is emitted when an expression evaluates to false. + FailOp Op = "Fail" + + // DuplicateOp is emitted when a query has produced a duplicate value. The search + // will stop at the point where the duplicate was emitted and backtrack. + DuplicateOp Op = "Duplicate" + + // NoteOp is emitted when an expression invokes a tracing built-in function. + NoteOp Op = "Note" + + // IndexOp is emitted during an expression evaluation to represent lookup + // matches. + IndexOp Op = "Index" + + // WasmOp is emitted when resolving a ref using an external + // Resolver. + WasmOp Op = "Wasm" + + // UnifyOp is emitted when two terms are unified. Node will be set to an + // equality expression with the two terms. This Node will not have location + // info. + UnifyOp Op = "Unify" + FailedAssertionOp Op = "FailedAssertion" +) + +// VarMetadata provides some user facing information about +// a variable in some policy. +type VarMetadata struct { + Name ast.Var `json:"name"` + Location *ast.Location `json:"location"` +} + +// Event contains state associated with a tracing event. +type Event struct { + Op Op // Identifies type of event. + Node ast.Node // Contains AST node relevant to the event. + Location *ast.Location // The location of the Node this event relates to. + QueryID uint64 // Identifies the query this event belongs to. + ParentID uint64 // Identifies the parent query this event belongs to. + Locals *ast.ValueMap // Contains local variable bindings from the query context. Nil if variables were not included in the trace event. + LocalMetadata map[ast.Var]VarMetadata // Contains metadata for the local variable bindings. Nil if variables were not included in the trace event. + Message string // Contains message for Note events. + Ref *ast.Ref // Identifies the subject ref for the event. Only applies to Index and Wasm operations. + + input *ast.Term + bindings *bindings + localVirtualCacheSnapshot *ast.ValueMap +} + +func (evt *Event) WithInput(input *ast.Term) *Event { + evt.input = input + return evt +} + +// HasRule returns true if the Event contains an ast.Rule. +func (evt *Event) HasRule() bool { + _, ok := evt.Node.(*ast.Rule) + return ok +} + +// HasBody returns true if the Event contains an ast.Body. +func (evt *Event) HasBody() bool { + _, ok := evt.Node.(ast.Body) + return ok +} + +// HasExpr returns true if the Event contains an ast.Expr. +func (evt *Event) HasExpr() bool { + _, ok := evt.Node.(*ast.Expr) + return ok +} + +// Equal returns true if this event is equal to the other event. +func (evt *Event) Equal(other *Event) bool { + if evt.Op != other.Op { + return false + } + if evt.QueryID != other.QueryID { + return false + } + if evt.ParentID != other.ParentID { + return false + } + if !evt.equalNodes(other) { + return false + } + return evt.Locals.Equal(other.Locals) +} + +func (evt *Event) String() string { + return fmt.Sprintf("%v %v %v (qid=%v, pqid=%v)", evt.Op, evt.Node, evt.Locals, evt.QueryID, evt.ParentID) +} + +// Input returns the input object as it was at the event. +func (evt *Event) Input() *ast.Term { + return evt.input +} + +// Plug plugs event bindings into the provided ast.Term. Because bindings are mutable, this only makes sense to do when +// the event is emitted rather than on recorded trace events as the bindings are going to be different by then. +func (evt *Event) Plug(term *ast.Term) *ast.Term { + return evt.bindings.Plug(term) +} + +func (evt *Event) equalNodes(other *Event) bool { + switch a := evt.Node.(type) { + case ast.Body: + if b, ok := other.Node.(ast.Body); ok { + return a.Equal(b) + } + case *ast.Rule: + if b, ok := other.Node.(*ast.Rule); ok { + return a.Equal(b) + } + case *ast.Expr: + if b, ok := other.Node.(*ast.Expr); ok { + return a.Equal(b) + } + case nil: + return other.Node == nil + } + return false +} + +// Tracer defines the interface for tracing in the top-down evaluation engine. +// Deprecated: Use QueryTracer instead. +type Tracer interface { + Enabled() bool + Trace(*Event) +} + +// QueryTracer defines the interface for tracing in the top-down evaluation engine. +// The implementation can provide additional configuration to modify the tracing +// behavior for query evaluations. +type QueryTracer interface { + Enabled() bool + TraceEvent(Event) + Config() TraceConfig +} + +// TraceConfig defines some common configuration for Tracer implementations +type TraceConfig struct { + PlugLocalVars bool // Indicate whether to plug local variable bindings before calling into the tracer. +} + +// legacyTracer Implements the QueryTracer interface by wrapping an older Tracer instance. +type legacyTracer struct { + t Tracer +} + +func (l *legacyTracer) Enabled() bool { + return l.t.Enabled() +} + +func (l *legacyTracer) Config() TraceConfig { + return TraceConfig{ + PlugLocalVars: true, // For backwards compatibility old tracers will plug local variables + } +} + +func (l *legacyTracer) TraceEvent(evt Event) { + l.t.Trace(&evt) +} + +// WrapLegacyTracer will create a new QueryTracer which wraps an +// older Tracer instance. +func WrapLegacyTracer(tracer Tracer) QueryTracer { + return &legacyTracer{t: tracer} +} + +// BufferTracer implements the Tracer and QueryTracer interface by +// simply buffering all events received. +type BufferTracer []*Event + +// NewBufferTracer returns a new BufferTracer. +func NewBufferTracer() *BufferTracer { + return &BufferTracer{} +} + +// Enabled always returns true if the BufferTracer is instantiated. +func (b *BufferTracer) Enabled() bool { + return b != nil +} + +// Trace adds the event to the buffer. +// Deprecated: Use TraceEvent instead. +func (b *BufferTracer) Trace(evt *Event) { + *b = append(*b, evt) +} + +// TraceEvent adds the event to the buffer. +func (b *BufferTracer) TraceEvent(evt Event) { + *b = append(*b, &evt) +} + +// Config returns the Tracers standard configuration +func (b *BufferTracer) Config() TraceConfig { + return TraceConfig{PlugLocalVars: true} +} + +// PrettyTrace pretty prints the trace to the writer. +func PrettyTrace(w io.Writer, trace []*Event) { + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{}) +} + +// PrettyTraceWithLocation prints the trace to the writer and includes location information +func PrettyTraceWithLocation(w io.Writer, trace []*Event) { + PrettyTraceWithOpts(w, trace, PrettyTraceOptions{Locations: true}) +} + +type PrettyTraceOptions struct { + Locations bool // Include location information + ExprVariables bool // Include variables found in the expression + LocalVariables bool // Include all local variables +} + +type traceRow []string + +func (r *traceRow) add(s string) { + *r = append(*r, s) +} + +type traceTable struct { + rows []traceRow + maxWidths []int +} + +func (t *traceTable) add(row traceRow) { + t.rows = append(t.rows, row) + for i := range row { + if i >= len(t.maxWidths) { + t.maxWidths = append(t.maxWidths, len(row[i])) + } else if len(row[i]) > t.maxWidths[i] { + t.maxWidths[i] = len(row[i]) + } + } +} + +func (t *traceTable) write(w io.Writer, padding int) { + for _, row := range t.rows { + for i, cell := range row { + width := t.maxWidths[i] + padding + if i < len(row)-1 { + _, _ = fmt.Fprintf(w, "%-*s ", width, cell) + } else { + _, _ = fmt.Fprintf(w, "%s", cell) + } + } + _, _ = fmt.Fprintln(w) + } +} + +func PrettyTraceWithOpts(w io.Writer, trace []*Event, opts PrettyTraceOptions) { + depths := depths{} + + // FIXME: Can we shorten each location as we process each trace event instead of beforehand? + filePathAliases, _ := getShortenedFileNames(trace) + + table := traceTable{} + + for _, event := range trace { + depth := depths.GetOrSet(event.QueryID, event.ParentID) + row := traceRow{} + + if opts.Locations { + location := formatLocation(event, filePathAliases) + row.add(location) + } + + row.add(formatEvent(event, depth)) + + if opts.ExprVariables { + vars := exprLocalVars(event) + keys := sortedKeys(vars) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(vars.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) + } + + if opts.LocalVariables { + if locals := event.Locals; locals != nil { + keys := sortedKeys(locals) + + buf := new(bytes.Buffer) + buf.WriteString("{") + for i, k := range keys { + if i > 0 { + buf.WriteString(", ") + } + _, _ = fmt.Fprintf(buf, "%v: %s", k, iStrs.Truncate(locals.Get(k).String(), maxExprVarWidth)) + } + buf.WriteString("}") + row.add(buf.String()) + } else { + row.add("{}") + } + } + + table.add(row) + } + + table.write(w, columnPadding) +} + +func sortedKeys(vm *ast.ValueMap) []ast.Value { + keys := make([]ast.Value, 0, vm.Len()) + vm.Iter(func(k, _ ast.Value) bool { + keys = append(keys, k) + return false + }) + slices.SortFunc(keys, func(a, b ast.Value) int { + return strings.Compare(a.String(), b.String()) + }) + return keys +} + +func exprLocalVars(e *Event) *ast.ValueMap { + vars := ast.NewValueMap() + + findVars := func(term *ast.Term) bool { + //if r, ok := term.Value.(ast.Ref); ok { + // fmt.Printf("ref: %v\n", r) + // //return true + //} + if name, ok := term.Value.(ast.Var); ok { + if meta, ok := e.LocalMetadata[name]; ok { + if val := e.Locals.Get(name); val != nil { + vars.Put(meta.Name, val) + } + } + } + return false + } + + if r, ok := e.Node.(*ast.Rule); ok { + // We're only interested in vars in the head, not the body + ast.WalkTerms(r.Head, findVars) + return vars + } + + // The local cache snapshot only contains a snapshot for those refs present in the event node, + // so they can all be added to the vars map. + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + vars.Put(k, v) + return false + }) + + ast.WalkTerms(e.Node, findVars) + + return vars +} + +func formatEvent(event *Event, depth int) string { + padding := formatEventPadding(event, depth) + if event.Op == NoteOp { + return fmt.Sprintf("%v%v %q", padding, event.Op, event.Message) + } + + var details interface{} + if node, ok := event.Node.(*ast.Rule); ok { + details = node.Path() + } else if event.Ref != nil { + details = event.Ref + } else { + details = rewrite(event).Node + } + + template := "%v%v %v" + opts := []interface{}{padding, event.Op, details} + + if event.Message != "" { + template += " %v" + opts = append(opts, event.Message) + } + + return fmt.Sprintf(template, opts...) +} + +func formatEventPadding(event *Event, depth int) string { + spaces := formatEventSpaces(event, depth) + if spaces > 1 { + return strings.Repeat("| ", spaces-1) + } + return "" +} + +func formatEventSpaces(event *Event, depth int) int { + switch event.Op { + case EnterOp: + return depth + case RedoOp: + if _, ok := event.Node.(*ast.Expr); !ok { + return depth + } + } + return depth + 1 +} + +// getShortenedFileNames will return a map of file paths to shortened aliases +// that were found in the trace. It also returns the longest location expected +func getShortenedFileNames(trace []*Event) (map[string]string, int) { + // Get a deduplicated list of all file paths + // and the longest file path size + fpAliases := map[string]string{} + var canShorten []string + longestLocation := 0 + for _, event := range trace { + if event.Location != nil { + if event.Location.File != "" { + // length of ":" + curLen := len(event.Location.File) + numDigits10(event.Location.Row) + 1 + if curLen > longestLocation { + longestLocation = curLen + } + + if _, ok := fpAliases[event.Location.File]; ok { + continue + } + + canShorten = append(canShorten, event.Location.File) + + // Default to just alias their full path + fpAliases[event.Location.File] = event.Location.File + } else { + // length of ":" + curLen := minLocationWidth + numDigits10(event.Location.Row) + 1 + if curLen > longestLocation { + longestLocation = curLen + } + } + } + } + + if len(canShorten) > 0 && longestLocation > maxIdealLocationWidth { + fpAliases, longestLocation = iStrs.TruncateFilePaths(maxIdealLocationWidth, longestLocation, canShorten...) + } + + return fpAliases, longestLocation +} + +func numDigits10(n int) int { + if n < 10 { + return 1 + } + return numDigits10(n/10) + 1 +} + +func formatLocation(event *Event, fileAliases map[string]string) string { + + location := event.Location + if location == nil { + return "" + } + + if location.File == "" { + return fmt.Sprintf("query:%v", location.Row) + } + + return fmt.Sprintf("%v:%v", fileAliases[location.File], location.Row) +} + +// depths is a helper for computing the depth of an event. Events within the +// same query all have the same depth. The depth of query is +// depth(parent(query))+1. +type depths map[uint64]int + +func (ds depths) GetOrSet(qid uint64, pqid uint64) int { + depth := ds[qid] + if depth == 0 { + depth = ds[pqid] + depth++ + ds[qid] = depth + } + return depth +} + +func builtinTrace(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + str, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return handleBuiltinErr(ast.Trace.Name, bctx.Location, err) + } + + if !bctx.TraceEnabled { + return iter(ast.InternedBooleanTerm(true)) + } + + evt := Event{ + Op: NoteOp, + Location: bctx.Location, + QueryID: bctx.QueryID, + ParentID: bctx.ParentID, + Message: string(str), + } + + for i := range bctx.QueryTracers { + bctx.QueryTracers[i].TraceEvent(evt) + } + + return iter(ast.InternedBooleanTerm(true)) +} + +func rewrite(event *Event) *Event { + + cpy := *event + + var node ast.Node + + switch v := event.Node.(type) { + case *ast.Expr: + expr := v.Copy() + + // Hide generated local vars in 'key' position that have not been + // rewritten. + if ev, ok := v.Terms.(*ast.Every); ok { + if kv, ok := ev.Key.Value.(ast.Var); ok { + if rw, ok := cpy.LocalMetadata[kv]; !ok || rw.Name.IsGenerated() { + expr.Terms.(*ast.Every).Key = nil + } + } + } + node = expr + case ast.Body: + node = v.Copy() + case *ast.Rule: + node = v.Copy() + } + + _, _ = ast.TransformVars(node, func(v ast.Var) (ast.Value, error) { + if meta, ok := cpy.LocalMetadata[v]; ok { + return meta.Name, nil + } + return v, nil + }) + + cpy.Node = node + + return &cpy +} + +type varInfo struct { + VarMetadata + val ast.Value + exprLoc *ast.Location + col int // 0-indexed column +} + +func (v varInfo) Value() string { + if v.val != nil { + return v.val.String() + } + return "undefined" +} + +func (v varInfo) Title() string { + if v.exprLoc != nil && v.exprLoc.Text != nil { + return string(v.exprLoc.Text) + } + return string(v.Name) +} + +func padLocationText(loc *ast.Location) string { + if loc == nil { + return "" + } + + text := string(loc.Text) + + if loc.Col == 0 { + return text + } + + buf := new(bytes.Buffer) + j := 0 + for i := 1; i < loc.Col; i++ { + if len(loc.Tabs) > 0 && j < len(loc.Tabs) && loc.Tabs[j] == i { + buf.WriteString("\t") + j++ + } else { + buf.WriteString(" ") + } + } + + buf.WriteString(text) + return buf.String() +} + +type PrettyEventOpts struct { + PrettyVars bool +} + +func walkTestTerms(x interface{}, f func(*ast.Term) bool) { + var vis *ast.GenericVisitor + vis = ast.NewGenericVisitor(func(x interface{}) bool { + switch x := x.(type) { + case ast.Call: + for _, t := range x[1:] { + vis.Walk(t) + } + return true + case *ast.Expr: + if x.IsCall() { + for _, o := range x.Operands() { + vis.Walk(o) + } + for i := range x.With { + vis.Walk(x.With[i]) + } + return true + } + case *ast.Term: + return f(x) + case *ast.With: + vis.Walk(x.Value) + return true + } + return false + }) + vis.Walk(x) +} + +func PrettyEvent(w io.Writer, e *Event, opts PrettyEventOpts) error { + if !opts.PrettyVars { + _, _ = fmt.Fprintln(w, padLocationText(e.Location)) + return nil + } + + buf := new(bytes.Buffer) + exprVars := map[string]varInfo{} + + findVars := func(unknownAreUndefined bool) func(term *ast.Term) bool { + return func(term *ast.Term) bool { + if term.Location == nil { + return false + } + + switch v := term.Value.(type) { + case *ast.ArrayComprehension, *ast.SetComprehension, *ast.ObjectComprehension: + // we don't report on the internals of a comprehension, as it's already evaluated, and we won't have the local vars. + return true + case ast.Var: + var info *varInfo + if meta, ok := e.LocalMetadata[v]; ok { + info = &varInfo{ + VarMetadata: meta, + val: e.Locals.Get(v), + exprLoc: term.Location, + } + } else if unknownAreUndefined { + info = &varInfo{ + VarMetadata: VarMetadata{Name: v}, + exprLoc: term.Location, + col: term.Location.Col, + } + } + + if info != nil { + if v, exists := exprVars[info.Title()]; !exists || v.val == nil { + if term.Location != nil { + info.col = term.Location.Col + } + exprVars[info.Title()] = *info + } + } + } + return false + } + } + + expr, ok := e.Node.(*ast.Expr) + if !ok || expr == nil { + return nil + } + + base := expr.BaseCogeneratedExpr() + exprText := padLocationText(base.Location) + buf.WriteString(exprText) + + e.localVirtualCacheSnapshot.Iter(func(k, v ast.Value) bool { + var info *varInfo + switch k := k.(type) { + case ast.Ref: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k[0].Location, + col: k[0].Location.Col, + } + case *ast.ArrayComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.SetComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Term.Location, + col: k.Term.Location.Col, + } + case *ast.ObjectComprehension: + info = &varInfo{ + VarMetadata: VarMetadata{Name: ast.Var(k.String())}, + val: v, + exprLoc: k.Key.Location, + col: k.Key.Location.Col, + } + } + + if info != nil { + exprVars[info.Title()] = *info + } + + return false + }) + + // If the expression is negated, we can't confidently assert that vars with unknown values are 'undefined', + // since the compiler might have opted out of the necessary rewrite. + walkTestTerms(expr, findVars(!expr.Negated)) + coExprs := expr.CogeneratedExprs() + for _, coExpr := range coExprs { + // Only the current "co-expr" can have undefined vars, if we don't know the value for a var in any other co-expr, + // it's unknown, not undefined. A var can be unknown if it hasn't been assigned a value yet, because the co-expr + // hasn't been evaluated yet (the fail happened before it). + walkTestTerms(coExpr, findVars(false)) + } + + printPrettyVars(buf, exprVars) + _, _ = fmt.Fprint(w, buf.String()) + return nil +} + +func printPrettyVars(w *bytes.Buffer, exprVars map[string]varInfo) { + containsTabs := false + varRows := make(map[int]interface{}) + for _, info := range exprVars { + if len(info.exprLoc.Tabs) > 0 { + containsTabs = true + } + varRows[info.exprLoc.Row] = nil + } + + if containsTabs && len(varRows) > 1 { + // We can't (currently) reliably point to var locations when they are on different rows that contain tabs. + // So we'll just print them in alphabetical order instead. + byName := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byName = append(byName, info) + } + slices.SortStableFunc(byName, func(a, b varInfo) int { + return strings.Compare(a.Title(), b.Title()) + }) + + w.WriteString("\n\nWhere:\n") + for _, info := range byName { + w.WriteString(fmt.Sprintf("\n%s: %s", info.Title(), iStrs.Truncate(info.Value(), maxPrettyExprVarWidth))) + } + + return + } + + byCol := make([]varInfo, 0, len(exprVars)) + for _, info := range exprVars { + byCol = append(byCol, info) + } + slices.SortFunc(byCol, func(a, b varInfo) int { + // sort first by column, then by reverse row (to present vars in the same order they appear in the expr) + if a.col == b.col { + if a.exprLoc.Row == b.exprLoc.Row { + return strings.Compare(a.Title(), b.Title()) + } + return b.exprLoc.Row - a.exprLoc.Row + } + return a.col - b.col + }) + + if len(byCol) == 0 { + return + } + + w.WriteString("\n") + printArrows(w, byCol, -1) + for i := len(byCol) - 1; i >= 0; i-- { + w.WriteString("\n") + printArrows(w, byCol, i) + } +} + +func printArrows(w *bytes.Buffer, l []varInfo, printValueAt int) { + prevCol := 0 + var slice []varInfo + if printValueAt >= 0 { + slice = l[:printValueAt+1] + } else { + slice = l + } + isFirst := true + for i, info := range slice { + + isLast := i >= len(slice)-1 + col := info.col + + if !isLast && col == l[i+1].col { + // We're sharing the same column with another, subsequent var + continue + } + + spaces := col - 1 + if i > 0 && !isFirst { + spaces = (col - prevCol) - 1 + } + + for j := 0; j < spaces; j++ { + tab := false + for _, t := range info.exprLoc.Tabs { + if t == j+prevCol+1 { + w.WriteString("\t") + tab = true + break + } + } + if !tab { + w.WriteString(" ") + } + } + + if isLast && printValueAt >= 0 { + valueStr := iStrs.Truncate(info.Value(), maxPrettyExprVarWidth) + if (i > 0 && col == l[i-1].col) || (i < len(l)-1 && col == l[i+1].col) { + // There is another var on this column, so we need to include the name to differentiate them. + w.WriteString(fmt.Sprintf("%s: %s", info.Title(), valueStr)) + } else { + w.WriteString(valueStr) + } + } else { + w.WriteString("|") + } + prevCol = col + isFirst = false + } +} + +func init() { + RegisterBuiltinFunc(ast.Trace.Name, builtinTrace) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go new file mode 100644 index 000000000..6103fbe48 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type.go @@ -0,0 +1,82 @@ +// Copyright 2022 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" +) + +func builtinIsNumber(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Number: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsString(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.String: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsBoolean(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Boolean: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsArray(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case *ast.Array: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsSet(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Set: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsObject(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Object: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func builtinIsNull(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Null: + return iter(ast.InternedBooleanTerm(true)) + default: + return iter(ast.InternedBooleanTerm(false)) + } +} + +func init() { + RegisterBuiltinFunc(ast.IsNumber.Name, builtinIsNumber) + RegisterBuiltinFunc(ast.IsString.Name, builtinIsString) + RegisterBuiltinFunc(ast.IsBoolean.Name, builtinIsBoolean) + RegisterBuiltinFunc(ast.IsArray.Name, builtinIsArray) + RegisterBuiltinFunc(ast.IsSet.Name, builtinIsSet) + RegisterBuiltinFunc(ast.IsObject.Name, builtinIsObject) + RegisterBuiltinFunc(ast.IsNull.Name, builtinIsNull) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go new file mode 100644 index 000000000..fc3de4879 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/type_name.go @@ -0,0 +1,46 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "fmt" + + "github.com/open-policy-agent/opa/v1/ast" +) + +var ( + nullStringTerm = ast.StringTerm("null") + booleanStringTerm = ast.StringTerm("boolean") + numberStringTerm = ast.StringTerm("number") + stringStringTerm = ast.StringTerm("string") + arrayStringTerm = ast.StringTerm("array") + objectStringTerm = ast.StringTerm("object") + setStringTerm = ast.StringTerm("set") +) + +func builtinTypeName(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + switch operands[0].Value.(type) { + case ast.Null: + return iter(nullStringTerm) + case ast.Boolean: + return iter(booleanStringTerm) + case ast.Number: + return iter(numberStringTerm) + case ast.String: + return iter(stringStringTerm) + case *ast.Array: + return iter(arrayStringTerm) + case ast.Object: + return iter(objectStringTerm) + case ast.Set: + return iter(setStringTerm) + } + + return fmt.Errorf("illegal value") +} + +func init() { + RegisterBuiltinFunc(ast.TypeNameBuiltin.Name, builtinTypeName) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go new file mode 100644 index 000000000..d013df9fe --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/uuid.go @@ -0,0 +1,56 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/internal/uuid" + "github.com/open-policy-agent/opa/v1/ast" + "github.com/open-policy-agent/opa/v1/topdown/builtins" +) + +type uuidCachingKey string + +func builtinUUIDRFC4122(bctx BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + + var key = uuidCachingKey(operands[0].Value.String()) + + val, ok := bctx.Cache.Get(key) + if ok { + return iter(val.(*ast.Term)) + } + + s, err := uuid.New(bctx.Seed) + if err != nil { + return err + } + + result := ast.NewTerm(ast.String(s)) + bctx.Cache.Put(key, result) + + return iter(result) +} + +func builtinUUIDParse(_ BuiltinContext, operands []*ast.Term, iter func(term *ast.Term) error) error { + str, err := builtins.StringOperand(operands[0].Value, 1) + if err != nil { + return err + } + + parsed, err := uuid.Parse(string(str)) + if err != nil { + return nil + } + val, err := ast.InterfaceToValue(parsed) + if err != nil { + return err + } + + return iter(ast.NewTerm(val)) +} + +func init() { + RegisterBuiltinFunc(ast.UUIDRFC4122.Name, builtinUUIDRFC4122) + RegisterBuiltinFunc(ast.UUIDParse.Name, builtinUUIDParse) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go new file mode 100644 index 000000000..f5dcf5c9f --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/topdown/walk.go @@ -0,0 +1,147 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package topdown + +import ( + "github.com/open-policy-agent/opa/v1/ast" +) + +var emptyArr = ast.ArrayTerm() + +func evalWalk(_ BuiltinContext, operands []*ast.Term, iter func(*ast.Term) error) error { + input := operands[0] + + if pathIsWildcard(operands) { + // When the path assignment is a wildcard: walk(input, [_, value]) + // we may skip the path construction entirely, and simply return + // same pointer in each iteration. This is a *much* more efficient + // path when only the values are needed. + return walkNoPath(ast.ArrayTerm(emptyArr, input), iter) + } + + filter := getOutputPath(operands) + return walk(filter, nil, input, iter) +} + +func walk(filter, path *ast.Array, input *ast.Term, iter func(*ast.Term) error) error { + if filter == nil || filter.Len() == 0 { + if path == nil { + path = ast.NewArray() + } + + if err := iter(ast.ArrayTerm(ast.NewTerm(path.Copy()), input)); err != nil { + return err + } + } + + if filter != nil && filter.Len() > 0 { + key := filter.Elem(0) + filter = filter.Slice(1, -1) + if key.IsGround() { + if term := input.Get(key); term != nil { + path = pathAppend(path, key) + return walk(filter, path, term, iter) + } + return nil + } + } + + switch v := input.Value.(type) { + case *ast.Array: + for i := 0; i < v.Len(); i++ { + if err := walk(filter, pathAppend(path, ast.InternedIntNumberTerm(i)), v.Elem(i), iter); err != nil { + return err + } + } + case ast.Object: + for _, k := range v.Keys() { + if err := walk(filter, pathAppend(path, k), v.Get(k), iter); err != nil { + return err + } + } + case ast.Set: + for _, elem := range v.Slice() { + if err := walk(filter, pathAppend(path, elem), elem, iter); err != nil { + return err + } + } + } + + return nil +} + +func walkNoPath(input *ast.Term, iter func(*ast.Term) error) error { + // Note: the path array is embedded in the input from the start here + // in order to avoid an extra allocation per iteration. This leads to + // a little convoluted code below in order to extract and set the value, + // but since walk is commonly used to traverse large data structures, + // the performance gain is worth it. + if err := iter(input); err != nil { + return err + } + + inputArray := input.Value.(*ast.Array) + value := inputArray.Get(ast.InternedIntNumberTerm(1)).Value + + switch v := value.(type) { + case ast.Object: + for _, k := range v.Keys() { + inputArray.Set(1, v.Get(k)) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + case *ast.Array: + for i := 0; i < v.Len(); i++ { + inputArray.Set(1, v.Elem(i)) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + case ast.Set: + for _, elem := range v.Slice() { + inputArray.Set(1, elem) + if err := walkNoPath(input, iter); err != nil { + return err + } + } + } + + return nil +} + +func pathAppend(path *ast.Array, key *ast.Term) *ast.Array { + if path == nil { + return ast.NewArray(key) + } + + return path.Append(key) +} + +func getOutputPath(operands []*ast.Term) *ast.Array { + if len(operands) == 2 { + if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { + if path, ok := arr.Elem(0).Value.(*ast.Array); ok { + return path + } + } + } + return nil +} + +func pathIsWildcard(operands []*ast.Term) bool { + if len(operands) == 2 { + if arr, ok := operands[1].Value.(*ast.Array); ok && arr.Len() == 2 { + if v, ok := arr.Elem(0).Value.(ast.Var); ok { + return v.IsWildcard() + } + } + } + return false +} + +func init() { + RegisterBuiltinFunc(ast.WalkBuiltin.Name, evalWalk) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/tracing/tracing.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/tracing/tracing.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/tracing/tracing.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/types/decode.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/types/decode.go new file mode 100644 index 000000000..3fcc01664 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/types/decode.go @@ -0,0 +1,191 @@ +// Copyright 2020 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package types + +import ( + "encoding/json" + "fmt" + + "github.com/open-policy-agent/opa/v1/util" +) + +const ( + typeNull = "null" + typeBoolean = "boolean" + typeNumber = "number" + typeString = "string" + typeArray = "array" + typeSet = "set" + typeObject = "object" + typeAny = "any" + typeFunction = "function" +) + +// Unmarshal deserializes bs and returns the resulting type. +func Unmarshal(bs []byte) (result Type, err error) { + + var hint rawtype + + if err = util.UnmarshalJSON(bs, &hint); err == nil { + switch hint.Type { + case typeNull: + result = NewNull() + case typeBoolean: + result = NewBoolean() + case typeNumber: + result = NewNumber() + case typeString: + result = NewString() + case typeArray: + var arr rawarray + if err = util.UnmarshalJSON(bs, &arr); err == nil { + var err error + var static []Type + var dynamic Type + if static, err = unmarshalSlice(arr.Static); err != nil { + return nil, err + } + if len(arr.Dynamic) != 0 { + if dynamic, err = Unmarshal(arr.Dynamic); err != nil { + return nil, err + } + } + result = NewArray(static, dynamic) + } + case typeObject: + var obj rawobject + if err = util.UnmarshalJSON(bs, &obj); err == nil { + var err error + var static []*StaticProperty + var dynamic *DynamicProperty + if static, err = unmarshalStaticPropertySlice(obj.Static); err != nil { + return nil, err + } + if dynamic, err = unmarshalDynamicProperty(obj.Dynamic); err != nil { + return nil, err + } + result = NewObject(static, dynamic) + } + case typeSet: + var set rawset + if err = util.UnmarshalJSON(bs, &set); err == nil { + var of Type + if of, err = Unmarshal(set.Of); err == nil { + result = NewSet(of) + } + } + case typeAny: + var union rawunion + if err = util.UnmarshalJSON(bs, &union); err == nil { + var of []Type + if of, err = unmarshalSlice(union.Of); err == nil { + result = NewAny(of...) + } + } + case typeFunction: + var decl rawdecl + if err = util.UnmarshalJSON(bs, &decl); err == nil { + args, err := unmarshalSlice(decl.Args) + if err != nil { + return nil, err + } + var ret Type + if len(decl.Result) > 0 { + ret, err = Unmarshal(decl.Result) + if err != nil { + return nil, err + } + } + if len(decl.Variadic) > 0 { + varargs, err := Unmarshal(decl.Variadic) + if err != nil { + return nil, err + } + result = NewVariadicFunction(args, varargs, ret) + } else { + result = NewFunction(args, ret) + } + } + default: + err = fmt.Errorf("unsupported type '%v'", hint.Type) + } + } + + return result, err +} + +type rawtype struct { + Type string `json:"type"` +} + +type rawarray struct { + Static []json.RawMessage `json:"static"` + Dynamic json.RawMessage `json:"dynamic"` +} + +type rawobject struct { + Static []rawstaticproperty `json:"static"` + Dynamic rawdynamicproperty `json:"dynamic"` +} + +type rawstaticproperty struct { + Key interface{} `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawdynamicproperty struct { + Key json.RawMessage `json:"key"` + Value json.RawMessage `json:"value"` +} + +type rawset struct { + Of json.RawMessage `json:"of"` +} + +type rawunion struct { + Of []json.RawMessage `json:"of"` +} + +type rawdecl struct { + Args []json.RawMessage `json:"args"` + Result json.RawMessage `json:"result"` + Variadic json.RawMessage `json:"variadic"` +} + +func unmarshalSlice(elems []json.RawMessage) (result []Type, err error) { + result = make([]Type, len(elems)) + for i := range elems { + if result[i], err = Unmarshal(elems[i]); err != nil { + return nil, err + } + } + return result, err +} + +func unmarshalStaticPropertySlice(elems []rawstaticproperty) (result []*StaticProperty, err error) { + result = make([]*StaticProperty, len(elems)) + for i := range elems { + value, err := Unmarshal(elems[i].Value) + if err != nil { + return nil, err + } + result[i] = NewStaticProperty(elems[i].Key, value) + } + return result, err +} + +func unmarshalDynamicProperty(x rawdynamicproperty) (result *DynamicProperty, err error) { + if len(x.Key) == 0 { + return nil, nil + } + var key Type + if key, err = Unmarshal(x.Key); err == nil { + var value Type + if value, err = Unmarshal(x.Value); err == nil { + return NewDynamicProperty(key, value), nil + } + } + return nil, err +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/types/types.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/types/types.go new file mode 100644 index 000000000..070521087 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/types/types.go @@ -0,0 +1,1212 @@ +// Copyright 2017 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +// Package types declares data types for Rego values and helper functions to +// operate on these types. +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/open-policy-agent/opa/v1/util" +) + +// Sprint returns the string representation of the type. +func Sprint(x Type) string { + if x == nil { + return "???" + } + return x.String() +} + +// Type represents a type of a term in the language. +type Type interface { + String() string + typeMarker() string + json.Marshaler +} + +func (Null) typeMarker() string { return typeNull } +func (Boolean) typeMarker() string { return typeBoolean } +func (Number) typeMarker() string { return typeNumber } +func (String) typeMarker() string { return typeString } +func (*Array) typeMarker() string { return typeArray } +func (*Object) typeMarker() string { return typeObject } +func (*Set) typeMarker() string { return typeSet } +func (Any) typeMarker() string { return typeAny } +func (Function) typeMarker() string { return typeFunction } + +// Null represents the null type. +type Null struct{} + +// NewNull returns a new Null type. +func NewNull() Null { + return Null{} +} + +// NamedType represents a type alias with an arbitrary name and description. +// This is useful for generating documentation for built-in functions. +type NamedType struct { + Name, Descr string + Type Type +} + +func (n *NamedType) typeMarker() string { return n.Type.typeMarker() } +func (n *NamedType) String() string { return n.Name + ": " + n.Type.String() } +func (n *NamedType) MarshalJSON() ([]byte, error) { + var obj map[string]interface{} + switch x := n.Type.(type) { + case interface{ toMap() map[string]interface{} }: + obj = x.toMap() + default: + obj = map[string]interface{}{ + "type": n.Type.typeMarker(), + } + } + obj["name"] = n.Name + if n.Descr != "" { + obj["description"] = n.Descr + } + return json.Marshal(obj) +} + +func (n *NamedType) Description(d string) *NamedType { + n.Descr = d + return n +} + +// Named returns the passed type as a named type. +// Named types are only valid at the top level of built-in functions. +// Note that nested named types cause panic. +func Named(name string, t Type) *NamedType { + return &NamedType{ + Type: t, + Name: name, + } +} + +// MarshalJSON returns the JSON encoding of t. +func (t Null) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func unwrap(t Type) Type { + switch t := t.(type) { + case *NamedType: + return t.Type + default: + return t + } +} + +func (t Null) String() string { + return typeNull +} + +// Boolean represents the boolean type. +type Boolean struct{} + +// B represents an instance of the boolean type. +var B = NewBoolean() + +// NewBoolean returns a new Boolean type. +func NewBoolean() Boolean { + return Boolean{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t Boolean) MarshalJSON() ([]byte, error) { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + return json.Marshal(repr) +} + +func (t Boolean) String() string { + return t.typeMarker() +} + +// String represents the string type. +type String struct{} + +// S represents an instance of the string type. +var S = NewString() + +// NewString returns a new String type. +func NewString() String { + return String{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t String) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func (String) String() string { + return typeString +} + +// Number represents the number type. +type Number struct{} + +// N represents an instance of the number type. +var N = NewNumber() + +// NewNumber returns a new Number type. +func NewNumber() Number { + return Number{} +} + +// MarshalJSON returns the JSON encoding of t. +func (t Number) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "type": t.typeMarker(), + }) +} + +func (Number) String() string { + return typeNumber +} + +// Array represents the array type. +type Array struct { + static []Type // static items + dynamic Type // dynamic items +} + +// NewArray returns a new Array type. +func NewArray(static []Type, dynamic Type) *Array { + return &Array{ + static: static, + dynamic: dynamic, + } +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Array) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Array) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.static) != 0 { + repr["static"] = t.static + } + if t.dynamic != nil { + repr["dynamic"] = t.dynamic + } + return repr +} + +func (t *Array) String() string { + prefix := "array" + buf := []string{} + for _, tpe := range t.static { + buf = append(buf, Sprint(tpe)) + } + repr := prefix + if len(buf) > 0 { + repr += "<" + strings.Join(buf, ", ") + ">" + } + if t.dynamic != nil { + repr += "[" + t.dynamic.String() + "]" + } + return repr +} + +// Dynamic returns the type of the array's dynamic elements. +func (t *Array) Dynamic() Type { + return t.dynamic +} + +// Len returns the number of static array elements. +func (t *Array) Len() int { + return len(t.static) +} + +// Select returns the type of element at the zero-based pos. +func (t *Array) Select(pos int) Type { + if pos >= 0 { + if len(t.static) > pos { + return t.static[pos] + } + if t.dynamic != nil { + return t.dynamic + } + } + return nil +} + +// Set represents the set type. +type Set struct { + of Type +} + +// NewSet returns a new Set type. +func NewSet(of Type) *Set { + return &Set{ + of: of, + } +} + +func (t *Set) Of() Type { + return t.of +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Set) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if t.of != nil { + repr["of"] = t.of + } + return repr +} + +func (t *Set) String() string { + prefix := typeSet + return prefix + "[" + Sprint(t.of) + "]" +} + +// StaticProperty represents a static object property. +type StaticProperty struct { + Key interface{} + Value Type +} + +// NewStaticProperty returns a new StaticProperty object. +func NewStaticProperty(key interface{}, value Type) *StaticProperty { + return &StaticProperty{ + Key: key, + Value: value, + } +} + +// MarshalJSON returns the JSON encoding of p. +func (p *StaticProperty) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "key": p.Key, + "value": p.Value, + }) +} + +// DynamicProperty represents a dynamic object property. +type DynamicProperty struct { + Key Type + Value Type +} + +// NewDynamicProperty returns a new DynamicProperty object. +func NewDynamicProperty(key, value Type) *DynamicProperty { + return &DynamicProperty{ + Key: key, + Value: value, + } +} + +// MarshalJSON returns the JSON encoding of p. +func (p *DynamicProperty) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "key": p.Key, + "value": p.Value, + }) +} + +func (p *DynamicProperty) String() string { + return fmt.Sprintf("%s: %s", Sprint(p.Key), Sprint(p.Value)) +} + +// Object represents the object type. +type Object struct { + static []*StaticProperty // constant properties + dynamic *DynamicProperty // dynamic properties +} + +// NewObject returns a new Object type. +func NewObject(static []*StaticProperty, dynamic *DynamicProperty) *Object { + sort.Slice(static, func(i, j int) bool { + cmp := util.Compare(static[i].Key, static[j].Key) + return cmp == -1 + }) + return &Object{ + static: static, + dynamic: dynamic, + } +} + +func (t *Object) String() string { + prefix := "object" + buf := make([]string, 0, len(t.static)) + for _, p := range t.static { + buf = append(buf, fmt.Sprintf("%v: %v", p.Key, Sprint(p.Value))) + } + repr := prefix + if len(buf) > 0 { + repr += "<" + strings.Join(buf, ", ") + ">" + } + if t.dynamic != nil { + repr += "[" + t.dynamic.String() + "]" + } + return repr +} + +// DynamicValue returns the type of the object's dynamic elements. +func (t *Object) DynamicValue() Type { + if t.dynamic == nil { + return nil + } + return t.dynamic.Value +} + +// DynamicProperties returns the type of the object's dynamic elements. +func (t *Object) DynamicProperties() *DynamicProperty { + return t.dynamic +} + +// StaticProperties returns the type of the object's static elements. +func (t *Object) StaticProperties() []*StaticProperty { + return t.static +} + +// Keys returns the keys of the object's static elements. +func (t *Object) Keys() []interface{} { + sl := make([]interface{}, 0, len(t.static)) + for _, p := range t.static { + sl = append(sl, p.Key) + } + return sl +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Object) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t *Object) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.static) != 0 { + repr["static"] = t.static + } + if t.dynamic != nil { + repr["dynamic"] = t.dynamic + } + return repr +} + +// Select returns the type of the named property. +func (t *Object) Select(name interface{}) Type { + pos := sort.Search(len(t.static), func(x int) bool { + return util.Compare(t.static[x].Key, name) >= 0 + }) + + if pos < len(t.static) && util.Compare(t.static[pos].Key, name) == 0 { + return t.static[pos].Value + } + + if t.dynamic != nil { + if Contains(t.dynamic.Key, TypeOf(name)) { + return t.dynamic.Value + } + } + + return nil +} + +func (t *Object) Merge(other Type) *Object { + if otherObj, ok := other.(*Object); ok { + return mergeObjects(t, otherObj) + } + + var typeK Type + var typeV Type + dynProps := t.DynamicProperties() + if dynProps != nil { + typeK = Or(Keys(other), dynProps.Key) + typeV = Or(Values(other), dynProps.Value) + dynProps = NewDynamicProperty(typeK, typeV) + } else { + typeK = Keys(other) + typeV = Values(other) + if typeK != nil && typeV != nil { + dynProps = NewDynamicProperty(typeK, typeV) + } + } + + return NewObject(t.StaticProperties(), dynProps) +} + +func mergeObjects(a, b *Object) *Object { + var dynamicProps *DynamicProperty + if a.dynamic != nil && b.dynamic != nil { + typeK := Or(a.dynamic.Key, b.dynamic.Key) + var typeV Type + aObj, aIsObj := a.dynamic.Value.(*Object) + bObj, bIsObj := b.dynamic.Value.(*Object) + if aIsObj && bIsObj { + typeV = mergeObjects(aObj, bObj) + } else { + typeV = Or(a.dynamic.Value, b.dynamic.Value) + } + dynamicProps = NewDynamicProperty(typeK, typeV) + } else if a.dynamic != nil { + dynamicProps = a.dynamic + } else { + dynamicProps = b.dynamic + } + + staticPropsMap := make(map[interface{}]Type) + + for _, sp := range a.static { + staticPropsMap[sp.Key] = sp.Value + } + + for _, sp := range b.static { + currV := staticPropsMap[sp.Key] + if currV != nil { + currVObj, currVIsObj := currV.(*Object) + spVObj, spVIsObj := sp.Value.(*Object) + if currVIsObj && spVIsObj { + staticPropsMap[sp.Key] = mergeObjects(currVObj, spVObj) + } else { + staticPropsMap[sp.Key] = Or(currV, sp.Value) + } + } else { + staticPropsMap[sp.Key] = sp.Value + } + } + + staticProps := make([]*StaticProperty, 0, len(staticPropsMap)) + for k, v := range staticPropsMap { + staticProps = append(staticProps, NewStaticProperty(k, v)) + } + + return NewObject(staticProps, dynamicProps) +} + +// Any represents a dynamic type. +type Any []Type + +// A represents the superset of all types. +var A = NewAny() + +// NewAny returns a new Any type. +func NewAny(of ...Type) Any { + sl := make(Any, len(of)) + copy(sl, of) + sort.Sort(typeSlice(sl)) + return sl +} + +// Contains returns true if t is a superset of other. +func (t Any) Contains(other Type) bool { + if _, ok := other.(*Function); ok { + return false + } + // Note(philipc): We used to do this as a linear search. + // Since this is always sorted, we can use a binary search instead. + i := sort.Search(len(t), func(i int) bool { + return Compare(t[i], other) >= 0 + }) + if i < len(t) && Compare(t[i], other) == 0 { + // x is present at t[i] + return true + } + return len(t) == 0 +} + +// MarshalJSON returns the JSON encoding of t. +func (t Any) MarshalJSON() ([]byte, error) { + return json.Marshal(t.toMap()) +} + +func (t Any) toMap() map[string]interface{} { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t) != 0 { + repr["of"] = []Type(t) + } + return repr +} + +// Merge return a new Any type that is the superset of t and other. +func (t Any) Merge(other Type) Any { + if otherAny, ok := other.(Any); ok { + return t.Union(otherAny) + } + if t.Contains(other) { + return t + } + cpy := make(Any, len(t)+1) + idx := sort.Search(len(t), func(i int) bool { + return Compare(t[i], other) >= 0 + }) + copy(cpy, t[:idx]) + cpy[idx] = other + copy(cpy[idx+1:], t[idx:]) + return cpy +} + +// Union returns a new Any type that is the union of the two Any types. +// Note(philipc): The two Any slices MUST be sorted before running Union, +// or else this method will fail to merge the two slices correctly. +func (t Any) Union(other Any) Any { + lenT := len(t) + lenOther := len(other) + // Return the more general (blank) Any type if present. + if lenT == 0 { + return t + } + if lenOther == 0 { + return other + } + // Prealloc the output list. + maxLen := lenT + if lenT < lenOther { + maxLen = lenOther + } + merged := make(Any, 0, maxLen) + // Note(philipc): Create a merged slice, doing the minimum number of + // comparisons along the way. We treat this as a problem of merging two + // sorted lists that might have duplicates. This specifically saves us + // from cases where one list might be *much* longer than the other. + // Algorithm: + // Assume: + // - List A + // - List B + // - List Output + // - Idx_a, Idx_b + // Procedure: + // - While Idx_a < len(A) and Idx_b < len(B) + // - Compare head(A) and head(B) + // - Cases: + // - A < B: Append head(A) to Output, advance Idx_a + // - A == B: Append head(A) to Output, advance Idx_a, Idx_b + // - A > B: Append head(B) to Output, advance Idx_b + // - Return output + idxA := 0 + idxB := 0 + for idxA < lenT || idxB < lenOther { + // Early-exit cases: + if idxA == lenT { + // Ran out of elements in t. Copy over what's left from other. + merged = append(merged, other[idxB:]...) + break + } else if idxB == lenOther { + // Ran out of elements in other. Copy over what's left from t. + merged = append(merged, t[idxA:]...) + break + } + // Normal selection of next element to merge: + switch Compare(t[idxA], other[idxB]) { + // A < B: + case -1: + merged = append(merged, t[idxA]) + idxA++ + // A == B: + case 0: + merged = append(merged, t[idxA]) + idxA++ + idxB++ + // A > B: + case 1: + merged = append(merged, other[idxB]) + idxB++ + } + } + return merged +} + +func (t Any) String() string { + prefix := "any" + if len(t) == 0 { + return prefix + } + buf := make([]string, len(t)) + for i := range t { + buf[i] = Sprint(t[i]) + } + return prefix + "<" + strings.Join(buf, ", ") + ">" +} + +// Function represents a function type. +type Function struct { + args []Type + result Type + variadic Type +} + +// Args returns an argument list. +func Args(x ...Type) []Type { + return x +} + +// Void returns true if the function has no return value. This function returns +// false if x is not a function. +func Void(x Type) bool { + f, ok := x.(*Function) + return ok && f.Result() == nil +} + +// Arity returns the number of arguments in the function signature or zero if x +// is not a function. If the type is unknown, this function returns -1. +func Arity(x Type) int { + if x == nil { + return -1 + } + f, ok := x.(*Function) + if !ok { + return 0 + } + return f.Arity() +} + +// NewFunction returns a new Function object of the given argument and result types. +func NewFunction(args []Type, result Type) *Function { + return &Function{ + args: args, + result: result, + } +} + +// NewVariadicFunction returns a new Function object. This function sets the +// variadic bit on the signature. Non-void variadic functions are not currently +// supported. +func NewVariadicFunction(args []Type, varargs Type, result Type) *Function { + if result != nil { + panic("illegal value: non-void variadic functions not supported") + } + return &Function{ + args: args, + variadic: varargs, + result: nil, + } +} + +// FuncArgs returns the function's arguments. +func (t *Function) FuncArgs() FuncArgs { + return FuncArgs{Args: t.Args(), Variadic: unwrap(t.variadic)} +} + +// NamedFuncArgs returns the function's arguments, with a name and +// description if available. +func (t *Function) NamedFuncArgs() FuncArgs { + args := make([]Type, len(t.args)) + copy(args, t.args) + return FuncArgs{Args: args, Variadic: t.variadic} +} + +// Args returns the function's arguments as a slice, ignoring variadic arguments. +// Deprecated: Use FuncArgs instead. +func (t *Function) Args() []Type { + cpy := make([]Type, len(t.args)) + for i := range t.args { + cpy[i] = unwrap(t.args[i]) + } + return cpy +} + +// Arity returns the number of arguments in the function signature. +func (t *Function) Arity() int { + return len(t.args) +} + +// Result returns the function's result type. +func (t *Function) Result() Type { + return unwrap(t.result) +} + +// Result returns the function's result type, without stripping name and description. +func (t *Function) NamedResult() Type { + return t.result +} + +func (t *Function) String() string { + return fmt.Sprintf("%v => %v", t.FuncArgs(), Sprint(t.Result())) +} + +// MarshalJSON returns the JSON encoding of t. +func (t *Function) MarshalJSON() ([]byte, error) { + repr := map[string]interface{}{ + "type": t.typeMarker(), + } + if len(t.args) > 0 { + repr["args"] = t.args + } + if t.result != nil { + repr["result"] = t.result + } + if t.variadic != nil { + repr["variadic"] = t.variadic + } + return json.Marshal(repr) +} + +// UnmarshalJSON decodes the JSON serialized function declaration. +func (t *Function) UnmarshalJSON(bs []byte) error { + tpe, err := Unmarshal(bs) + if err != nil { + return err + } + + f, ok := tpe.(*Function) + if !ok { + return fmt.Errorf("invalid type") + } + + *t = *f + return nil +} + +// Union returns a new function representing the union of t and other. Functions +// must have the same arity to be unioned. +func (t *Function) Union(other *Function) *Function { + if other == nil { + return t + } + if t == nil { + return other + } + + if t.Arity() != other.Arity() { + return nil + } + + tfa := t.FuncArgs() + ofa := other.FuncArgs() + + aIsVariadic := tfa.Variadic != nil + bIsVariadic := ofa.Variadic != nil + + if aIsVariadic && !bIsVariadic { + return nil + } else if bIsVariadic && !aIsVariadic { + return nil + } + + a := t.Args() + b := other.Args() + + args := make([]Type, len(a)) + for i := range a { + args[i] = Or(a[i], b[i]) + } + + result := NewFunction(args, Or(t.Result(), other.Result())) + result.variadic = Or(tfa.Variadic, ofa.Variadic) + + return result +} + +// FuncArgs represents the arguments that can be passed to a function. +type FuncArgs struct { + Args []Type `json:"args,omitempty"` + Variadic Type `json:"variadic,omitempty"` +} + +func (a FuncArgs) String() string { + buf := make([]string, 0, len(a.Args)+1) + for i := range a.Args { + buf = append(buf, Sprint(a.Args[i])) + } + if a.Variadic != nil { + buf = append(buf, Sprint(a.Variadic)+"...") + } + return "(" + strings.Join(buf, ", ") + ")" +} + +// Arg returns the nth argument's type. +func (a FuncArgs) Arg(x int) Type { + if x < len(a.Args) { + return a.Args[x] + } + return a.Variadic +} + +// Compare returns -1, 0, 1 based on comparison between a and b. +func Compare(a, b Type) int { + a, b = unwrap(a), unwrap(b) + x := typeOrder(a) + y := typeOrder(b) + if x > y { + return 1 + } else if x < y { + return -1 + } + switch a.(type) { + case nil, Null, Boolean, Number, String: + return 0 + case *Array: + arrA := a.(*Array) + arrB := b.(*Array) + if arrA.dynamic != nil && arrB.dynamic == nil { + return 1 + } else if arrB.dynamic != nil && arrA.dynamic == nil { + return -1 + } + if arrB.dynamic != nil && arrA.dynamic != nil { + if cmp := Compare(arrA.dynamic, arrB.dynamic); cmp != 0 { + return cmp + } + } + return typeSliceCompare(arrA.static, arrB.static) + case *Object: + objA := a.(*Object) + objB := b.(*Object) + if objA.dynamic != nil && objB.dynamic == nil { + return 1 + } else if objB.dynamic != nil && objA.dynamic == nil { + return -1 + } + if objA.dynamic != nil && objB.dynamic != nil { + if cmp := Compare(objA.dynamic.Key, objB.dynamic.Key); cmp != 0 { + return cmp + } + if cmp := Compare(objA.dynamic.Value, objB.dynamic.Value); cmp != 0 { + return cmp + } + } + + lenStaticA := len(objA.static) + lenStaticB := len(objB.static) + + minLen := lenStaticA + if lenStaticB < minLen { + minLen = lenStaticB + } + + for i := 0; i < minLen; i++ { + if cmp := util.Compare(objA.static[i].Key, objB.static[i].Key); cmp != 0 { + return cmp + } + if cmp := Compare(objA.static[i].Value, objB.static[i].Value); cmp != 0 { + return cmp + } + } + + if lenStaticA < lenStaticB { + return -1 + } else if lenStaticB < lenStaticA { + return 1 + } + + return 0 + case *Set: + setA := a.(*Set) + setB := b.(*Set) + if setA.of == nil && setB.of == nil { + return 0 + } else if setA.of == nil { + return -1 + } else if setB.of == nil { + return 1 + } + return Compare(setA.of, setB.of) + case Any: + sl1 := typeSlice(a.(Any)) + sl2 := typeSlice(b.(Any)) + return typeSliceCompare(sl1, sl2) + case *Function: + fA := a.(*Function) + fB := b.(*Function) + if len(fA.args) < len(fB.args) { + return -1 + } else if len(fA.args) > len(fB.args) { + return 1 + } + for i := 0; i < len(fA.args); i++ { + if cmp := Compare(fA.args[i], fB.args[i]); cmp != 0 { + return cmp + } + } + if cmp := Compare(fA.result, fB.result); cmp != 0 { + return cmp + } + return Compare(fA.variadic, fB.variadic) + default: + panic("unreachable") + } +} + +// Contains returns true if a is a superset or equal to b. +func Contains(a, b Type) bool { + if x, ok := unwrap(a).(Any); ok { + return x.Contains(b) + } + return Compare(a, b) == 0 +} + +// Or returns a type that represents the union of a and b. If one type is a +// superset of the other, the superset is returned unchanged. +func Or(a, b Type) Type { + a, b = unwrap(a), unwrap(b) + if a == nil { + return b + } else if b == nil { + return a + } + fA, ok1 := a.(*Function) + fB, ok2 := b.(*Function) + if ok1 && ok2 { + return fA.Union(fB) + } else if ok1 || ok2 { + return nil + } + anyA, ok1 := a.(Any) + anyB, ok2 := b.(Any) + if ok1 { + return anyA.Merge(b) + } + if ok2 { + return anyB.Merge(a) + } + if Compare(a, b) == 0 { + return a + } + return NewAny(a, b) +} + +// Select returns a property or item of a. +func Select(a Type, x interface{}) Type { + switch a := unwrap(a).(type) { + case *Array: + n, ok := x.(json.Number) + if !ok { + return nil + } + pos, err := n.Int64() + if err != nil { + return nil + } + return a.Select(int(pos)) + case *Object: + return a.Select(x) + case *Set: + tpe := TypeOf(x) + if Compare(a.of, tpe) == 0 { + return a.of + } + if x, ok := a.of.(Any); ok { + if x.Contains(tpe) { + return tpe + } + } + return nil + case Any: + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + // TODO(tsandall): test nil/nil + tpe = Or(Select(a[i], x), tpe) + } + return tpe + default: + return nil + } +} + +// Keys returns the type of keys that can be enumerated for a. For arrays, the +// keys are always number types, for objects the keys are always string types, +// and for sets the keys are always the type of the set element. +func Keys(a Type) Type { + switch a := unwrap(a).(type) { + case *Array: + return N + case *Object: + var tpe Type + for _, k := range a.Keys() { + tpe = Or(tpe, TypeOf(k)) + } + if a.dynamic != nil { + tpe = Or(tpe, a.dynamic.Key) + } + return tpe + case *Set: + return a.of + case Any: + // TODO(tsandall): ditto test + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + tpe = Or(Keys(a[i]), tpe) + } + return tpe + } + return nil +} + +// Values returns the type of values that can be enumerated for a. +func Values(a Type) Type { + switch a := unwrap(a).(type) { + case *Array: + var tpe Type + for i := range a.static { + tpe = Or(tpe, a.static[i]) + } + return Or(tpe, a.dynamic) + case *Object: + var tpe Type + for i := range a.static { + tpe = Or(tpe, a.static[i].Value) + } + if a.dynamic != nil { + tpe = Or(tpe, a.dynamic.Value) + } + return tpe + case *Set: + return a.of + case Any: + if Compare(a, A) == 0 { + return A + } + var tpe Type + for i := range a { + tpe = Or(Values(a[i]), tpe) + } + return tpe + } + return nil +} + +// Nil returns true if a's type is unknown. +func Nil(a Type) bool { + switch a := unwrap(a).(type) { + case nil: + return true + case *Function: + for i := range a.args { + if Nil(a.args[i]) { + return true + } + } + return Nil(a.result) + case *Array: + for i := range a.static { + if Nil(a.static[i]) { + return true + } + } + if a.dynamic != nil { + return Nil(a.dynamic) + } + case *Object: + for i := range a.static { + if Nil(a.static[i].Value) { + return true + } + } + if a.dynamic != nil { + return Nil(a.dynamic.Key) || Nil(a.dynamic.Value) + } + case *Set: + return Nil(a.of) + } + return false +} + +// TypeOf returns the type of the Golang native value. +func TypeOf(x interface{}) Type { + switch x := x.(type) { + case nil: + return NewNull() + case bool: + return B + case string: + return S + case json.Number: + return N + case map[string]interface{}: + // The ast.ValueToInterface() function returns ast.Object values as map[string]interface{} + // so map[string]interface{} must be handled here because the type checker uses the value + // to interface conversion when inferring object types. + static := make([]*StaticProperty, 0, len(x)) + for k, v := range x { + static = append(static, NewStaticProperty(k, TypeOf(v))) + } + return NewObject(static, nil) + case map[interface{}]interface{}: + static := make([]*StaticProperty, 0, len(x)) + for k, v := range x { + static = append(static, NewStaticProperty(k, TypeOf(v))) + } + return NewObject(static, nil) + case []interface{}: + static := make([]Type, len(x)) + for i := range x { + static[i] = TypeOf(x[i]) + } + return NewArray(static, nil) + } + panic("unreachable") +} + +type typeSlice []Type + +func (s typeSlice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } +func (s typeSlice) Swap(i, j int) { x := s[i]; s[i] = s[j]; s[j] = x } +func (s typeSlice) Len() int { return len(s) } + +func typeSliceCompare(a, b []Type) int { + minLen := len(a) + if len(b) < minLen { + minLen = len(b) + } + for i := 0; i < minLen; i++ { + if cmp := Compare(a[i], b[i]); cmp != 0 { + return cmp + } + } + if len(a) < len(b) { + return -1 + } else if len(b) < len(a) { + return 1 + } + return 0 +} + +func typeOrder(x Type) int { + switch unwrap(x).(type) { + case Null: + return 0 + case Boolean: + return 1 + case Number: + return 2 + case String: + return 3 + case *Array: + return 4 + case *Object: + return 5 + case *Set: + return 6 + case Any: + return 7 + case *Function: + return 8 + case nil: + return -1 + } + panic("unreachable") +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go new file mode 100644 index 000000000..36d57f14e --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/backoff.go @@ -0,0 +1,53 @@ +// Copyright 2018 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "math/rand" + "time" +) + +func init() { + // NOTE(sr): We don't need good random numbers here; it's used for jittering + // the backup timing a bit. But anyways, let's make it random enough; without + // a call to rand.Seed() we'd get the same stream of numbers for each program + // run. (Or not, if some other packages happens to seed the global randomness + // source.) + // Note(philipc): rand.Seed() was deprecated in Go 1.20, so we've switched to + // using the recommended rand.New(rand.NewSource(seed)) style. + rand.New(rand.NewSource(time.Now().UnixNano())) +} + +// DefaultBackoff returns a delay with an exponential backoff based on the +// number of retries. +func DefaultBackoff(base, maxNS float64, retries int) time.Duration { + return Backoff(base, maxNS, .2, 1.6, retries) +} + +// Backoff returns a delay with an exponential backoff based on the number of +// retries. Same algorithm used in gRPC. +func Backoff(base, maxNS, jitter, factor float64, retries int) time.Duration { + if retries == 0 { + return 0 + } + + backoff, maxNS := base, maxNS + for backoff < maxNS && retries > 0 { + backoff *= factor + retries-- + } + if backoff > maxNS { + backoff = maxNS + } + + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + + return time.Duration(backoff) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/close.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/close.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/close.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/close.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/compare.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/compare.go new file mode 100644 index 000000000..8775a603d --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/compare.go @@ -0,0 +1,175 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Compare returns 0 if a equals b, -1 if a is less than b, and 1 if b is than a. +// +// For comparison between values of different types, the following ordering is used: +// nil < bool < int, float64 < string < []interface{} < map[string]interface{}. Slices and maps +// are compared recursively. If one slice or map is a subset of the other slice or map +// it is considered "less than". Nil is always equal to nil. +func Compare(a, b interface{}) int { + aSortOrder := sortOrder(a) + bSortOrder := sortOrder(b) + if aSortOrder < bSortOrder { + return -1 + } else if bSortOrder < aSortOrder { + return 1 + } + switch a := a.(type) { + case nil: + return 0 + case bool: + switch b := b.(type) { + case bool: + if a == b { + return 0 + } + if !a { + return -1 + } + return 1 + } + case json.Number: + switch b := b.(type) { + case json.Number: + return compareJSONNumber(a, b) + } + case int: + switch b := b.(type) { + case int: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case float64: + switch b := b.(type) { + case float64: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case string: + switch b := b.(type) { + case string: + if a == b { + return 0 + } else if a < b { + return -1 + } + return 1 + } + case []interface{}: + switch b := b.(type) { + case []interface{}: + bLen := len(b) + aLen := len(a) + minLen := aLen + if bLen < minLen { + minLen = bLen + } + for i := 0; i < minLen; i++ { + cmp := Compare(a[i], b[i]) + if cmp != 0 { + return cmp + } + } + if aLen == bLen { + return 0 + } else if aLen < bLen { + return -1 + } + return 1 + } + case map[string]interface{}: + switch b := b.(type) { + case map[string]interface{}: + aKeys := KeysSorted(a) + bKeys := KeysSorted(b) + aLen := len(aKeys) + bLen := len(bKeys) + minLen := aLen + if bLen < minLen { + minLen = bLen + } + for i := 0; i < minLen; i++ { + if aKeys[i] < bKeys[i] { + return -1 + } else if bKeys[i] < aKeys[i] { + return 1 + } + aVal := a[aKeys[i]] + bVal := b[bKeys[i]] + cmp := Compare(aVal, bVal) + if cmp != 0 { + return cmp + } + } + if aLen == bLen { + return 0 + } else if aLen < bLen { + return -1 + } + return 1 + } + } + + panic(fmt.Sprintf("illegal arguments of type %T and type %T", a, b)) +} + +const ( + nilSort = iota + boolSort = iota + numberSort = iota + stringSort = iota + arraySort = iota + objectSort = iota +) + +func compareJSONNumber(a, b json.Number) int { + bigA, ok := new(big.Float).SetString(string(a)) + if !ok { + panic("illegal value") + } + bigB, ok := new(big.Float).SetString(string(b)) + if !ok { + panic("illegal value") + } + return bigA.Cmp(bigB) +} + +func sortOrder(v interface{}) int { + switch v.(type) { + case nil: + return nilSort + case bool: + return boolSort + case json.Number: + return numberSort + case int: + return numberSort + case float64: + return numberSort + case string: + return stringSort + case []interface{}: + return arraySort + case map[string]interface{}: + return objectSort + } + panic(fmt.Sprintf("illegal argument of type %T", v)) +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/decoding/context.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/decoding/context.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/decoding/context.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/doc.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/doc.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/doc.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/doc.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/enumflag.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/enumflag.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/enumflag.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/graph.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/graph.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/graph.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/graph.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go new file mode 100644 index 000000000..a6c584c57 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/hashmap.go @@ -0,0 +1,179 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "fmt" + "strings" +) + +// T is a concise way to refer to T. +type T interface{} + +type hashEntry[K any, V any] struct { + k K + v V + next *hashEntry[K, V] +} + +// TypedHashMap represents a key/value map. +type TypedHashMap[K any, V any] struct { + keq func(K, K) bool + veq func(V, V) bool + khash func(K) int + vhash func(V) int + def V + table map[int]*hashEntry[K, V] + size int +} + +// NewTypedHashMap returns a new empty TypedHashMap. +func NewTypedHashMap[K any, V any](keq func(K, K) bool, veq func(V, V) bool, khash func(K) int, vhash func(V) int, def V) *TypedHashMap[K, V] { + return &TypedHashMap[K, V]{ + keq: keq, + veq: veq, + khash: khash, + vhash: vhash, + def: def, + table: make(map[int]*hashEntry[K, V]), + size: 0, + } +} + +// HashMap represents a key/value map. +type HashMap = TypedHashMap[T, T] + +// NewHashMap returns a new empty HashMap. +func NewHashMap(eq func(T, T) bool, hash func(T) int) *HashMap { + return &HashMap{ + keq: eq, + veq: eq, + khash: hash, + vhash: hash, + def: nil, + table: make(map[int]*hashEntry[T, T]), + size: 0, + } +} + +// Copy returns a shallow copy of this HashMap. +func (h *TypedHashMap[K, V]) Copy() *TypedHashMap[K, V] { + cpy := NewTypedHashMap[K, V](h.keq, h.veq, h.khash, h.vhash, h.def) + h.Iter(func(k K, v V) bool { + cpy.Put(k, v) + return false + }) + return cpy +} + +// Equal returns true if this HashMap equals the other HashMap. +// Two hash maps are equal if they contain the same key/value pairs. +func (h *TypedHashMap[K, V]) Equal(other *TypedHashMap[K, V]) bool { + if h.Len() != other.Len() { + return false + } + return !h.Iter(func(k K, v V) bool { + ov, ok := other.Get(k) + if !ok { + return true + } + return !h.veq(v, ov) + }) +} + +// Get returns the value for k. +func (h *TypedHashMap[K, V]) Get(k K) (V, bool) { + hash := h.khash(k) + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + return entry.v, true + } + } + return h.def, false +} + +// Delete removes the key k. +func (h *TypedHashMap[K, V]) Delete(k K) { + hash := h.khash(k) + var prev *hashEntry[K, V] + for entry := h.table[hash]; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + if prev != nil { + prev.next = entry.next + } else { + h.table[hash] = entry.next + } + h.size-- + return + } + prev = entry + } +} + +// Hash returns the hash code for this hash map. +func (h *TypedHashMap[K, V]) Hash() int { + var hash int + h.Iter(func(k K, v V) bool { + hash += h.khash(k) + h.vhash(v) + return false + }) + return hash +} + +// Iter invokes the iter function for each element in the HashMap. +// If the iter function returns true, iteration stops and the return value is true. +// If the iter function never returns true, iteration proceeds through all elements +// and the return value is false. +func (h *TypedHashMap[K, V]) Iter(iter func(K, V) bool) bool { + for _, entry := range h.table { + for ; entry != nil; entry = entry.next { + if iter(entry.k, entry.v) { + return true + } + } + } + return false +} + +// Len returns the current size of this HashMap. +func (h *TypedHashMap[K, V]) Len() int { + return h.size +} + +// Put inserts a key/value pair into this HashMap. If the key is already present, the existing +// value is overwritten. +func (h *TypedHashMap[K, V]) Put(k K, v V) { + hash := h.khash(k) + head := h.table[hash] + for entry := head; entry != nil; entry = entry.next { + if h.keq(entry.k, k) { + entry.v = v + return + } + } + h.table[hash] = &hashEntry[K, V]{k: k, v: v, next: head} + h.size++ +} + +func (h *TypedHashMap[K, V]) String() string { + var buf []string + h.Iter(func(k K, v V) bool { + buf = append(buf, fmt.Sprintf("%v: %v", k, v)) + return false + }) + return "{" + strings.Join(buf, ", ") + "}" +} + +// Update returns a new HashMap with elements from the other HashMap put into this HashMap. +// If the other HashMap contains elements with the same key as this HashMap, the value +// from the other HashMap overwrites the value from this HashMap. +func (h *TypedHashMap[K, V]) Update(other *TypedHashMap[K, V]) *TypedHashMap[K, V] { + updated := h.Copy() + other.Iter(func(k K, v V) bool { + updated.Put(k, v) + return false + }) + return updated +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/json.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/json.go new file mode 100644 index 000000000..5a4e460b6 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/json.go @@ -0,0 +1,133 @@ +// Copyright 2016 The OPA Authors. All rights reserved. +// Use of this source code is governed by an Apache2 +// license that can be found in the LICENSE file. + +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "sigs.k8s.io/yaml" + + "github.com/open-policy-agent/opa/v1/loader/extension" +) + +// UnmarshalJSON parses the JSON encoded data and stores the result in the value +// pointed to by x. +// +// This function is intended to be used in place of the standard json.Marshal +// function when json.Number is required. +func UnmarshalJSON(bs []byte, x interface{}) error { + return unmarshalJSON(bs, x, true) +} + +func unmarshalJSON(bs []byte, x interface{}, ext bool) error { + buf := bytes.NewBuffer(bs) + decoder := NewJSONDecoder(buf) + if err := decoder.Decode(x); err != nil { + if handler := extension.FindExtension(".json"); handler != nil && ext { + return handler(bs, x) + } + return err + } + + // Since decoder.Decode validates only the first json structure in bytes, + // check if decoder has more bytes to consume to validate whole input bytes. + tok, err := decoder.Token() + if tok != nil { + return fmt.Errorf("error: invalid character '%s' after top-level value", tok) + } + if err != nil && err != io.EOF { + return err + } + return nil +} + +// NewJSONDecoder returns a new decoder that reads from r. +// +// This function is intended to be used in place of the standard json.NewDecoder +// when json.Number is required. +func NewJSONDecoder(r io.Reader) *json.Decoder { + decoder := json.NewDecoder(r) + decoder.UseNumber() + return decoder +} + +// MustUnmarshalJSON parse the JSON encoded data and returns the result. +// +// If the data cannot be decoded, this function will panic. This function is for +// test purposes. +func MustUnmarshalJSON(bs []byte) interface{} { + var x interface{} + if err := UnmarshalJSON(bs, &x); err != nil { + panic(err) + } + return x +} + +// MustMarshalJSON returns the JSON encoding of x +// +// If the data cannot be encoded, this function will panic. This function is for +// test purposes. +func MustMarshalJSON(x interface{}) []byte { + bs, err := json.Marshal(x) + if err != nil { + panic(err) + } + return bs +} + +// RoundTrip encodes to JSON, and decodes the result again. +// +// Thereby, it is converting its argument to the representation expected by +// rego.Input and inmem's Write operations. Works with both references and +// values. +func RoundTrip(x *interface{}) error { + bs, err := json.Marshal(x) + if err != nil { + return err + } + return UnmarshalJSON(bs, x) +} + +// Reference returns a pointer to its argument unless the argument already is +// a pointer. If the argument is **t, or ***t, etc, it will return *t. +// +// Used for preparing Go types (including pointers to structs) into values to be +// put through util.RoundTrip(). +func Reference(x interface{}) *interface{} { + var y interface{} + rv := reflect.ValueOf(x) + if rv.Kind() == reflect.Ptr { + return Reference(rv.Elem().Interface()) + } + if rv.Kind() != reflect.Invalid { + y = rv.Interface() + return &y + } + return &x +} + +// Unmarshal decodes a YAML, JSON or JSON extension value into the specified type. +func Unmarshal(bs []byte, v interface{}) error { + if len(bs) > 2 && bs[0] == 0xef && bs[1] == 0xbb && bs[2] == 0xbf { + bs = bs[3:] // Strip UTF-8 BOM, see https://www.rfc-editor.org/rfc/rfc8259#section-8.1 + } + + if json.Valid(bs) { + return unmarshalJSON(bs, v, false) + } + nbs, err := yaml.YAMLToJSON(bs) + if err == nil { + return unmarshalJSON(nbs, v, false) + } + // not json or yaml: try extensions + if handler := extension.FindExtension(".json"); handler != nil { + return handler(bs, v) + } + return err +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/maps.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/maps.go new file mode 100644 index 000000000..c56fbe98a --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/maps.go @@ -0,0 +1,34 @@ +package util + +import ( + "cmp" + "slices" +) + +// Keys returns a slice of keys from any map. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// KeysSorted returns a slice of keys from any map, sorted in ascending order. +func KeysSorted[M ~map[K]V, K cmp.Ordered, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + slices.Sort(r) + return r +} + +// Values returns a slice of values from any map. Copied from golang.org/x/exp/maps. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/performance.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/performance.go new file mode 100644 index 000000000..b24b49a1d --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/performance.go @@ -0,0 +1,64 @@ +package util + +import ( + "math" + "slices" + "unsafe" +) + +// NewPtrSlice returns a slice of pointers to T with length n, +// with only 2 allocations performed no matter the size of n. +// See: +// https://gist.github.com/CAFxX/e96e8a5c3841d152f16d266a1fe7f8bd#slices-of-pointers +func NewPtrSlice[T any](n int) []*T { + return GrowPtrSlice[T](nil, n) +} + +// GrowPtrSlice appends n elements to the slice, each pointing to +// a newly-allocated T. The resulting slice has length equal to len(s)+n. +// +// It performs at most 2 allocations, regardless of n. +func GrowPtrSlice[T any](s []*T, n int) []*T { + s = slices.Grow(s, n) + p := make([]T, n) + for i := 0; i < n; i++ { + s = append(s, &p[i]) + } + return s +} + +// Allocation free conversion from []byte to string (unsafe) +// Note that the byte slice must not be modified after conversion +func ByteSliceToString(bs []byte) string { + return unsafe.String(unsafe.SliceData(bs), len(bs)) +} + +// Allocation free conversion from ~string to []byte (unsafe) +// Note that the byte slice must not be modified after conversion +func StringToByteSlice[T ~string](s T) []byte { + return unsafe.Slice(unsafe.StringData(string(s)), len(s)) +} + +// NumDigitsInt returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsInt(n int) int { + if n == 0 { + return 1 + } + + if n < 0 { + n = -n + } + + return int(math.Log10(float64(n))) + 1 +} + +// NumDigitsUint returns the number of digits in n. +// This is useful for pre-allocating buffers for string conversion. +func NumDigitsUint(n uint64) int { + if n == 0 { + return 1 + } + + return int(math.Log10(float64(n))) + 1 +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/queue.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/queue.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/queue.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/queue.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go similarity index 98% rename from constraint/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go index 217638b36..74bca7263 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/util/read_gzip_body.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/read_gzip_body.go @@ -10,7 +10,7 @@ import ( "strings" "sync" - "github.com/open-policy-agent/opa/util/decoding" + "github.com/open-policy-agent/opa/v1/util/decoding" ) var gzipReaderPool = sync.Pool{ diff --git a/constraint/vendor/github.com/open-policy-agent/opa/v1/util/time.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/time.go new file mode 100644 index 000000000..93ef03939 --- /dev/null +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/time.go @@ -0,0 +1,48 @@ +package util + +import "time" + +// TimerWithCancel exists because of memory leaks when using +// time.After in select statements. Instead, we now manually create timers, +// wait on them, and manually free them. +// +// See this for more details: +// https://www.arangodb.com/2020/09/a-story-of-a-memory-leak-in-go-how-to-properly-use-time-after/ +// +// Note: This issue is fixed in Go 1.23, but this fix helps us until then. +// +// Warning: the cancel cannot be done concurrent to reading, everything should +// work in the same goroutine. +// +// Example: +// +// for retries := 0; true; retries++ { +// +// ...main logic... +// +// timer, cancel := utils.TimerWithCancel(utils.Backoff(retries)) +// select { +// case <-ctx.Done(): +// cancel() +// return ctx.Err() +// case <-timer.C: +// continue +// } +// } +func TimerWithCancel(delay time.Duration) (*time.Timer, func()) { + timer := time.NewTimer(delay) + + return timer, func() { + // Note: The Stop function returns: + // - true: if the timer is active. (no draining required) + // - false: if the timer was already stopped or fired/expired. + // In this case the channel should be drained to prevent memory + // leaks only if it is not empty. + // This operation is safe only if the cancel function is + // used in same goroutine. Concurrent reading or canceling may + // cause deadlock. + if !timer.Stop() && len(timer.C) > 0 { + <-timer.C + } + } +} diff --git a/constraint/vendor/github.com/open-policy-agent/opa/util/wait.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/util/wait.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/util/wait.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/util/wait.go diff --git a/constraint/vendor/github.com/open-policy-agent/opa/version/version.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/version/version.go similarity index 97% rename from constraint/vendor/github.com/open-policy-agent/opa/version/version.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/version/version.go index 7dece0157..0af828f88 100644 --- a/constraint/vendor/github.com/open-policy-agent/opa/version/version.go +++ b/constraint/vendor/github.com/open-policy-agent/opa/v1/version/version.go @@ -11,7 +11,7 @@ import ( ) // Version is the canonical version of OPA. -var Version = "0.68.0" +var Version = "1.1.0" // GoVersion is the version of Go this was built with var GoVersion = runtime.Version() diff --git a/constraint/vendor/github.com/open-policy-agent/opa/version/wasm.go b/constraint/vendor/github.com/open-policy-agent/opa/v1/version/wasm.go similarity index 100% rename from constraint/vendor/github.com/open-policy-agent/opa/version/wasm.go rename to constraint/vendor/github.com/open-policy-agent/opa/v1/version/wasm.go diff --git a/constraint/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/constraint/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d35f2d8a..519db348a 100644 --- a/constraint/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/constraint/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -844,9 +844,7 @@ func (h *histogram) Write(out *dto.Metric) error { }} } - // If exemplars are not configured, the cap will be 0. - // So append is not needed in this case. - if cap(h.nativeExemplars.exemplars) > 0 { + if h.nativeExemplars.isEnabled() { h.nativeExemplars.Lock() his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) h.nativeExemplars.Unlock() @@ -1658,10 +1656,17 @@ func addAndResetCounts(hot, cold *histogramCounts) { type nativeExemplars struct { sync.Mutex - ttl time.Duration + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + exemplars []*dto.Exemplar } +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { if ttl == 0 { ttl = 5 * time.Minute @@ -1673,6 +1678,7 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { if maxCount < 0 { maxCount = 0 + ttl = -1 } return nativeExemplars{ @@ -1682,20 +1688,18 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { } func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { - if cap(n.exemplars) == 0 { + if !n.isEnabled() { return } n.Lock() defer n.Unlock() - // The index where to insert the new exemplar. - var nIdx int = -1 - // When the number of exemplars has not yet exceeded or // is equal to cap(n.exemplars), then // insert the new exemplar directly. if len(n.exemplars) < cap(n.exemplars) { + var nIdx int for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { if *e.Value < *n.exemplars[nIdx].Value { break @@ -1705,17 +1709,46 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { return } + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + // When the number of exemplars exceeds the limit, remove one exemplar. var ( - rIdx int // The index where to remove the old exemplar. - - ot = time.Now() // Oldest timestamp seen. - otIdx = -1 // Index of the exemplar with the oldest timestamp. - - md = -1.0 // Logarithm of the delta of the closest pair of exemplars. - mdIdx = -1 // Index of the older exemplar within the closest pair. - cLog float64 // Logarithm of the current exemplar. - pLog float64 // Logarithm of the previous exemplar. + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. ) for i, exemplar := range n.exemplars { @@ -1726,7 +1759,7 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { } // Find the index at which to insert new the exemplar. - if *e.Value <= *exemplar.Value && nIdx == -1 { + if nIdx == -1 && *e.Value <= *exemplar.Value { nIdx = i } @@ -1738,11 +1771,13 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { } diff := math.Abs(cLog - pLog) if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. md = diff if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { - mdIdx = i + rIdx = i } else { - mdIdx = i - 1 + rIdx = i - 1 } } @@ -1753,8 +1788,12 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { if nIdx == -1 { nIdx = len(n.exemplars) } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. rIdx = otIdx } else { // In the previous for loop, when calculating the closest pair of exemplars, @@ -1764,23 +1803,26 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { if nIdx > 0 { diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. md = diff - mdIdx = nIdx - if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { - mdIdx = nIdx - 1 - } + rIdx = nIdx - 1 } } if nIdx < len(n.exemplars) { diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) if diff < md { - mdIdx = nIdx - if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { - mdIdx = nIdx - } + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx } } - rIdx = mdIdx } // Adjust the slice according to rIdx and nIdx. diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/.gitignore b/constraint/vendor/github.com/sashabaranov/go-openai/.gitignore new file mode 100644 index 000000000..b0ac1605c --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Auth token for tests +.openai-token +.idea + +# Generated by tests +test.mp3 \ No newline at end of file diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/.golangci.yml b/constraint/vendor/github.com/sashabaranov/go-openai/.golangci.yml new file mode 100644 index 000000000..724cb7375 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/.golangci.yml @@ -0,0 +1,272 @@ +## Golden config for golangci-lint v1.47.3 +# +# This is the best config for golangci-lint based on my experience and opinion. +# It is very strict, but not extremely strict. +# Feel free to adopt and change it for your needs. + +run: + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 3m + + +# This file contains only configs which differ from defaults. +# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters-settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 30 + # The maximal average package complexity. + # If it's higher than 0.0 (float) the check is enabled + # Default: 0.0 + package-average: 10.0 + + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 100 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 50 + + gocognit: + # Minimal code complexity to report + # Default: 30 (but we recommend 10-20) + min-complexity: 20 + + gocritic: + # Settings passed to gocritic. + # The settings key is the name of a supported gocritic checker. + # The list of supported checkers can be find in https://go-critic.github.io/overview. + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + + mnd: + # List of function patterns to exclude from analysis. + # Values always ignored: `time.Date` + # Default: [] + ignored-functions: + - os.Chmod + - os.Mkdir + - os.MkdirAll + - os.OpenFile + - os.WriteFile + - prometheus.ExponentialBuckets + - prometheus.ExponentialBucketsRange + - prometheus.LinearBuckets + - strconv.FormatFloat + - strconv.FormatInt + - strconv.FormatUint + - strconv.ParseFloat + - strconv.ParseInt + - strconv.ParseUint + + gomodguard: + blocked: + # List of blocked modules. + # Default: [] + modules: + - github.com/golang/protobuf: + recommendations: + - google.golang.org/protobuf + reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" + - github.com/satori/go.uuid: + recommendations: + - github.com/google/uuid + reason: "satori's package is not maintained" + - github.com/gofrs/uuid: + recommendations: + - github.com/google/uuid + reason: "see recommendation from dev-infra team: https://confluence.gtforge.com/x/gQI6Aw" + + govet: + # Enable all analyzers. + # Default: false + enable-all: true + # Disable analyzers by name. + # Run `go tool vet help` to see all analyzers. + # Default: [] + disable: + - fieldalignment # too strict + # Settings per analyzer. + settings: + shadow: + # Whether to be strict about shadowing; can be noisy. + # Default: false + strict: true + + nakedret: + # Make an issue if func has more lines of code than this setting, and it has naked returns. + # Default: 30 + max-func-lines: 0 + + nolintlint: + # Exclude following linters from requiring an explanation. + # Default: [] + allow-no-explanation: [ funlen, gocognit, lll ] + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + + rowserrcheck: + # database/sql is always checked + # Default: [] + packages: + - github.com/jmoiron/sqlx + + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + + varcheck: + # Check usage of exported fields and variables. + # Default: false + exported-fields: false # default false # TODO: enable after fixing false positives + + +linters: + disable-all: true + enable: + ## enabled by default + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # Detects when assignments to existing variables are not used + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unused # Checks Go code for unused constants, variables, functions and types + ## disabled by default + # - asasalint # Check for pass []any as any in variadic func(...any) + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bidichk # Checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - contextcheck # check the function whether use a non-inherited context + - cyclop # checks function and package cyclomatic complexity + - dupl # Tool for code clone detection + - durationcheck # check for two durations multiplied together + - errname # Checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error. + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + # Removed execinquery (deprecated). execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - forbidigo # Forbids identifiers + - funlen # Tool for detection of long functions + # - gochecknoglobals # check that no global variables exist + - gochecknoinits # Checks that no init functions are present in Go code + - gocognit # Computes and checks the cognitive complexity of functions + - goconst # Finds repeated strings that could be replaced by a constant + - gocritic # Provides diagnostics that check for bugs, performance and style issues. + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. + - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. + - goprintffuncname # Checks that printf-like functions are named with f at the end + - gosec # Inspects source code for security problems + - lll # Reports long lines + - makezero # Finds slice declarations with non-zero initial length + # - nakedret # Finds naked returns in functions greater than a specified function length + - mnd # An analyzer to detect magic numbers. + - nestif # Reports deeply nested if statements + - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + - nilnil # Checks that there is no simultaneous return of nil error and an invalid value. + # - noctx # noctx finds sending http request without context.Context + - nolintlint # Reports ill-formed or insufficient nolint directives + # - nonamedreturns # Reports all named returns + - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. + - predeclared # find code that shadows one of Go's predeclared identifiers + - promlinter # Check Prometheus metrics naming via promlint + - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. + - rowserrcheck # checks whether Err of rows is checked successfully + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + - stylecheck # Stylecheck is a replacement for golint + - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + - testpackage # linter that makes you use a separate _test package + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - unconvert # Remove unnecessary type conversions + - unparam # Reports unused function parameters + - wastedassign # wastedassign finds wasted assignment statements. + - whitespace # Tool for detection of leading and trailing whitespace + ## you may want to enable + #- decorder # check declaration order and count of types, constants, variables and functions + #- exhaustruct # Checks if all structure fields are initialized + #- goheader # Checks is file header matches to pattern + #- ireturn # Accept Interfaces, Return Concrete Types + #- prealloc # [premature optimization, but can be used in some cases] Finds slice declarations that could potentially be preallocated + #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope + #- wrapcheck # Checks that errors returned from external packages are wrapped + ## disabled + #- containedctx # containedctx is a linter that detects struct contained context.Context field + #- depguard # [replaced by gomodguard] Go linter that checks if package imports are in a list of acceptable packages + #- dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted. + #- forcetypeassert # [replaced by errcheck] finds forced type assertions + #- gci # Gci controls golang package import order and makes it always deterministic. + #- godox # Tool for detection of FIXME, TODO and other comment keywords + #- goerr113 # [too strict] Golang linter to check the errors handling expressions + #- gofmt # [replaced by goimports] Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + #- gofumpt # [replaced by goimports, gofumports is not available yet] Gofumpt checks whether code was gofumpt-ed. + #- grouper # An analyzer to analyze expression groups. + #- ifshort # Checks that your code uses short syntax for if-statements whenever possible + #- importas # Enforces consistent import aliases + #- maintidx # maintidx measures the maintainability index of each function. + #- misspell # [useless] Finds commonly misspelled English words in comments + #- nlreturn # [too strict and mostly code is not more readable] nlreturn checks for a new line before return and branch statements to increase code clarity + #- nosnakecase # Detects snake case of variable naming and function name. # TODO: maybe enable after https://github.com/sivchari/nosnakecase/issues/14 + #- paralleltest # [too many false positives] paralleltest detects missing usage of t.Parallel() method in your Go test + #- tagliatelle # Checks the struct tags. + #- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + #- wsl # [too strict and mostly code is not more readable] Whitespace Linter - Forces you to use empty lines! + ## deprecated + #- exhaustivestruct # [deprecated, replaced by exhaustruct] Checks if all struct's fields are initialized + #- golint # [deprecated, replaced by revive] Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + #- interfacer # [deprecated] Linter that suggests narrower interface types + #- maligned # [deprecated, replaced by govet fieldalignment] Tool to detect Go structs that would take less memory if their fields were sorted + #- scopelint # [deprecated, replaced by exportloopref] Scopelint checks for unpinned variables in go programs + + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 50 + + exclude-rules: + - source: "^//\\s*go:generate\\s" + linters: [ lll ] + - source: "(noinspection|TODO)" + linters: [ godot ] + - source: "//noinspection" + linters: [ gocritic ] + - source: "^\\s+if _, ok := err\\.\\([^.]+\\.InternalError\\); ok {" + linters: [ errorlint ] + - path: "_test\\.go" + linters: + - bodyclose + - dupl + - funlen + - goconst + - gosec + - noctx + - wrapcheck diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/CONTRIBUTING.md b/constraint/vendor/github.com/sashabaranov/go-openai/CONTRIBUTING.md new file mode 100644 index 000000000..4dd184042 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/CONTRIBUTING.md @@ -0,0 +1,88 @@ +# Contributing Guidelines + +## Overview +Thank you for your interest in contributing to the "Go OpenAI" project! By following this guideline, we hope to ensure that your contributions are made smoothly and efficiently. The Go OpenAI project is licensed under the [Apache 2.0 License](https://github.com/sashabaranov/go-openai/blob/master/LICENSE), and we welcome contributions through GitHub pull requests. + +## Reporting Bugs +If you discover a bug, first check the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to see if the issue has already been reported. If you're reporting a new issue, please use the "Bug report" template and provide detailed information about the problem, including steps to reproduce it. + +## Suggesting Features +If you want to suggest a new feature or improvement, first check the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to ensure a similar suggestion hasn't already been made. Use the "Feature request" template to provide a detailed description of your suggestion. + +## Reporting Vulnerabilities +If you identify a security concern, please use the "Report a security vulnerability" template on the [GitHub Issues page](https://github.com/sashabaranov/go-openai/issues) to share the details. This report will only be viewable to repository maintainers. You will be credited if the advisory is published. + +## Questions for Users +If you have questions, please utilize [StackOverflow](https://stackoverflow.com/) or the [GitHub Discussions page](https://github.com/sashabaranov/go-openai/discussions). + +## Contributing Code +There might already be a similar pull requests submitted! Please search for [pull requests](https://github.com/sashabaranov/go-openai/pulls) before creating one. + +### Requirements for Merging a Pull Request + +The requirements to accept a pull request are as follows: + +- Features not provided by the OpenAI API will not be accepted. +- The functionality of the feature must match that of the official OpenAI API. +- All pull requests should be written in Go according to common conventions, formatted with `goimports`, and free of warnings from tools like `golangci-lint`. +- Include tests and ensure all tests pass. +- Maintain test coverage without any reduction. +- All pull requests require approval from at least one Go OpenAI maintainer. + +**Note:** +The merging method for pull requests in this repository is squash merge. + +### Creating a Pull Request +- Fork the repository. +- Create a new branch and commit your changes. +- Push that branch to GitHub. +- Start a new Pull Request on GitHub. (Please use the pull request template to provide detailed information.) + +**Note:** +If your changes introduce breaking changes, please prefix your pull request title with "[BREAKING_CHANGES]". + +### Code Style +In this project, we adhere to the standard coding style of Go. Your code should maintain consistency with the rest of the codebase. To achieve this, please format your code using tools like `goimports` and resolve any syntax or style issues with `golangci-lint`. + +**Run goimports:** +``` +go install golang.org/x/tools/cmd/goimports@latest +``` + +``` +goimports -w . +``` + +**Run golangci-lint:** +``` +go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest +``` + +``` +golangci-lint run --out-format=github-actions +``` + +### Unit Test +Please create or update tests relevant to your changes. Ensure all tests run successfully to verify that your modifications do not adversely affect other functionalities. + +**Run test:** +``` +go test -v ./... +``` + +### Integration Test +Integration tests are requested against the production version of the OpenAI API. These tests will verify that the library is properly coded against the actual behavior of the API, and will fail upon any incompatible change in the API. + +**Notes:** +These tests send real network traffic to the OpenAI API and may reach rate limits. Temporary network problems may also cause the test to fail. + +**Run integration test:** +``` +OPENAI_TOKEN=XXX go test -v -tags=integration ./api_integration_test.go +``` + +If the `OPENAI_TOKEN` environment variable is not available, integration tests will be skipped. + +--- + +We wholeheartedly welcome your active participation. Let's build an amazing project together! diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/LICENSE b/constraint/vendor/github.com/sashabaranov/go-openai/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/README.md b/constraint/vendor/github.com/sashabaranov/go-openai/README.md new file mode 100644 index 000000000..57d1d35bf --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/README.md @@ -0,0 +1,853 @@ +# Go OpenAI +[![Go Reference](https://pkg.go.dev/badge/github.com/sashabaranov/go-openai.svg)](https://pkg.go.dev/github.com/sashabaranov/go-openai) +[![Go Report Card](https://goreportcard.com/badge/github.com/sashabaranov/go-openai)](https://goreportcard.com/report/github.com/sashabaranov/go-openai) +[![codecov](https://codecov.io/gh/sashabaranov/go-openai/branch/master/graph/badge.svg?token=bCbIfHLIsW)](https://codecov.io/gh/sashabaranov/go-openai) + +This library provides unofficial Go clients for [OpenAI API](https://platform.openai.com/). We support: + +* ChatGPT 4o, o1 +* GPT-3, GPT-4 +* DALL·E 2, DALL·E 3 +* Whisper + +## Installation + +``` +go get github.com/sashabaranov/go-openai +``` +Currently, go-openai requires Go version 1.18 or greater. + + +## Usage + +### ChatGPT example usage: + +```go +package main + +import ( + "context" + "fmt" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + client := openai.NewClient("your token") + resp, err := client.CreateChatCompletion( + context.Background(), + openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Hello!", + }, + }, + }, + ) + + if err != nil { + fmt.Printf("ChatCompletion error: %v\n", err) + return + } + + fmt.Println(resp.Choices[0].Message.Content) +} + +``` + +### Getting an OpenAI API Key: + +1. Visit the OpenAI website at [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). +2. If you don't have an account, click on "Sign Up" to create one. If you do, click "Log In". +3. Once logged in, navigate to your API key management page. +4. Click on "Create new secret key". +5. Enter a name for your new key, then click "Create secret key". +6. Your new API key will be displayed. Use this key to interact with the OpenAI API. + +**Note:** Your API key is sensitive information. Do not share it with anyone. + +### Other examples: + +
+ChatGPT streaming completion + +```go +package main + +import ( + "context" + "errors" + "fmt" + "io" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + c := openai.NewClient("your token") + ctx := context.Background() + + req := openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + MaxTokens: 20, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Lorem ipsum", + }, + }, + Stream: true, + } + stream, err := c.CreateChatCompletionStream(ctx, req) + if err != nil { + fmt.Printf("ChatCompletionStream error: %v\n", err) + return + } + defer stream.Close() + + fmt.Printf("Stream response: ") + for { + response, err := stream.Recv() + if errors.Is(err, io.EOF) { + fmt.Println("\nStream finished") + return + } + + if err != nil { + fmt.Printf("\nStream error: %v\n", err) + return + } + + fmt.Printf(response.Choices[0].Delta.Content) + } +} +``` +
+ +
+GPT-3 completion + +```go +package main + +import ( + "context" + "fmt" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + c := openai.NewClient("your token") + ctx := context.Background() + + req := openai.CompletionRequest{ + Model: openai.GPT3Babbage002, + MaxTokens: 5, + Prompt: "Lorem ipsum", + } + resp, err := c.CreateCompletion(ctx, req) + if err != nil { + fmt.Printf("Completion error: %v\n", err) + return + } + fmt.Println(resp.Choices[0].Text) +} +``` +
+ +
+GPT-3 streaming completion + +```go +package main + +import ( + "errors" + "context" + "fmt" + "io" + openai "github.com/sashabaranov/go-openai" +) + +func main() { + c := openai.NewClient("your token") + ctx := context.Background() + + req := openai.CompletionRequest{ + Model: openai.GPT3Babbage002, + MaxTokens: 5, + Prompt: "Lorem ipsum", + Stream: true, + } + stream, err := c.CreateCompletionStream(ctx, req) + if err != nil { + fmt.Printf("CompletionStream error: %v\n", err) + return + } + defer stream.Close() + + for { + response, err := stream.Recv() + if errors.Is(err, io.EOF) { + fmt.Println("Stream finished") + return + } + + if err != nil { + fmt.Printf("Stream error: %v\n", err) + return + } + + + fmt.Printf("Stream response: %v\n", response) + } +} +``` +
+ +
+Audio Speech-To-Text + +```go +package main + +import ( + "context" + "fmt" + + openai "github.com/sashabaranov/go-openai" +) + +func main() { + c := openai.NewClient("your token") + ctx := context.Background() + + req := openai.AudioRequest{ + Model: openai.Whisper1, + FilePath: "recording.mp3", + } + resp, err := c.CreateTranscription(ctx, req) + if err != nil { + fmt.Printf("Transcription error: %v\n", err) + return + } + fmt.Println(resp.Text) +} +``` +
+ +
+Audio Captions + +```go +package main + +import ( + "context" + "fmt" + "os" + + openai "github.com/sashabaranov/go-openai" +) + +func main() { + c := openai.NewClient(os.Getenv("OPENAI_KEY")) + + req := openai.AudioRequest{ + Model: openai.Whisper1, + FilePath: os.Args[1], + Format: openai.AudioResponseFormatSRT, + } + resp, err := c.CreateTranscription(context.Background(), req) + if err != nil { + fmt.Printf("Transcription error: %v\n", err) + return + } + f, err := os.Create(os.Args[1] + ".srt") + if err != nil { + fmt.Printf("Could not open file: %v\n", err) + return + } + defer f.Close() + if _, err := f.WriteString(resp.Text); err != nil { + fmt.Printf("Error writing to file: %v\n", err) + return + } +} +``` +
+ +
+DALL-E 2 image generation + +```go +package main + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + openai "github.com/sashabaranov/go-openai" + "image/png" + "os" +) + +func main() { + c := openai.NewClient("your token") + ctx := context.Background() + + // Sample image by link + reqUrl := openai.ImageRequest{ + Prompt: "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail", + Size: openai.CreateImageSize256x256, + ResponseFormat: openai.CreateImageResponseFormatURL, + N: 1, + } + + respUrl, err := c.CreateImage(ctx, reqUrl) + if err != nil { + fmt.Printf("Image creation error: %v\n", err) + return + } + fmt.Println(respUrl.Data[0].URL) + + // Example image as base64 + reqBase64 := openai.ImageRequest{ + Prompt: "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine", + Size: openai.CreateImageSize256x256, + ResponseFormat: openai.CreateImageResponseFormatB64JSON, + N: 1, + } + + respBase64, err := c.CreateImage(ctx, reqBase64) + if err != nil { + fmt.Printf("Image creation error: %v\n", err) + return + } + + imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON) + if err != nil { + fmt.Printf("Base64 decode error: %v\n", err) + return + } + + r := bytes.NewReader(imgBytes) + imgData, err := png.Decode(r) + if err != nil { + fmt.Printf("PNG decode error: %v\n", err) + return + } + + file, err := os.Create("example.png") + if err != nil { + fmt.Printf("File creation error: %v\n", err) + return + } + defer file.Close() + + if err := png.Encode(file, imgData); err != nil { + fmt.Printf("PNG encode error: %v\n", err) + return + } + + fmt.Println("The image was saved as example.png") +} + +``` +
+ +
+Configuring proxy + +```go +config := openai.DefaultConfig("token") +proxyUrl, err := url.Parse("http://localhost:{port}") +if err != nil { + panic(err) +} +transport := &http.Transport{ + Proxy: http.ProxyURL(proxyUrl), +} +config.HTTPClient = &http.Client{ + Transport: transport, +} + +c := openai.NewClientWithConfig(config) +``` + +See also: https://pkg.go.dev/github.com/sashabaranov/go-openai#ClientConfig +
+ +
+ChatGPT support context + +```go +package main + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "github.com/sashabaranov/go-openai" +) + +func main() { + client := openai.NewClient("your token") + messages := make([]openai.ChatCompletionMessage, 0) + reader := bufio.NewReader(os.Stdin) + fmt.Println("Conversation") + fmt.Println("---------------------") + + for { + fmt.Print("-> ") + text, _ := reader.ReadString('\n') + // convert CRLF to LF + text = strings.Replace(text, "\n", "", -1) + messages = append(messages, openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleUser, + Content: text, + }) + + resp, err := client.CreateChatCompletion( + context.Background(), + openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Messages: messages, + }, + ) + + if err != nil { + fmt.Printf("ChatCompletion error: %v\n", err) + continue + } + + content := resp.Choices[0].Message.Content + messages = append(messages, openai.ChatCompletionMessage{ + Role: openai.ChatMessageRoleAssistant, + Content: content, + }) + fmt.Println(content) + } +} +``` +
+ +
+Azure OpenAI ChatGPT + +```go +package main + +import ( + "context" + "fmt" + + openai "github.com/sashabaranov/go-openai" +) + +func main() { + config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint") + // If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function + // config.AzureModelMapperFunc = func(model string) string { + // azureModelMapping := map[string]string{ + // "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name", + // } + // return azureModelMapping[model] + // } + + client := openai.NewClientWithConfig(config) + resp, err := client.CreateChatCompletion( + context.Background(), + openai.ChatCompletionRequest{ + Model: openai.GPT3Dot5Turbo, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleUser, + Content: "Hello Azure OpenAI!", + }, + }, + }, + ) + if err != nil { + fmt.Printf("ChatCompletion error: %v\n", err) + return + } + + fmt.Println(resp.Choices[0].Message.Content) +} + +``` +
+ +
+Embedding Semantic Similarity + +```go +package main + +import ( + "context" + "log" + openai "github.com/sashabaranov/go-openai" + +) + +func main() { + client := openai.NewClient("your-token") + + // Create an EmbeddingRequest for the user query + queryReq := openai.EmbeddingRequest{ + Input: []string{"How many chucks would a woodchuck chuck"}, + Model: openai.AdaEmbeddingV2, + } + + // Create an embedding for the user query + queryResponse, err := client.CreateEmbeddings(context.Background(), queryReq) + if err != nil { + log.Fatal("Error creating query embedding:", err) + } + + // Create an EmbeddingRequest for the target text + targetReq := openai.EmbeddingRequest{ + Input: []string{"How many chucks would a woodchuck chuck if the woodchuck could chuck wood"}, + Model: openai.AdaEmbeddingV2, + } + + // Create an embedding for the target text + targetResponse, err := client.CreateEmbeddings(context.Background(), targetReq) + if err != nil { + log.Fatal("Error creating target embedding:", err) + } + + // Now that we have the embeddings for the user query and the target text, we + // can calculate their similarity. + queryEmbedding := queryResponse.Data[0] + targetEmbedding := targetResponse.Data[0] + + similarity, err := queryEmbedding.DotProduct(&targetEmbedding) + if err != nil { + log.Fatal("Error calculating dot product:", err) + } + + log.Printf("The similarity score between the query and the target is %f", similarity) +} + +``` +
+ +
+Azure OpenAI Embeddings + +```go +package main + +import ( + "context" + "fmt" + + openai "github.com/sashabaranov/go-openai" +) + +func main() { + + config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint") + config.APIVersion = "2023-05-15" // optional update to latest API version + + //If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function + //config.AzureModelMapperFunc = func(model string) string { + // azureModelMapping := map[string]string{ + // "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name", + // } + // return azureModelMapping[model] + //} + + input := "Text to vectorize" + + client := openai.NewClientWithConfig(config) + resp, err := client.CreateEmbeddings( + context.Background(), + openai.EmbeddingRequest{ + Input: []string{input}, + Model: openai.AdaEmbeddingV2, + }) + + if err != nil { + fmt.Printf("CreateEmbeddings error: %v\n", err) + return + } + + vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions + + fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:]) +} +``` +
+ +
+JSON Schema for function calling + +It is now possible for chat completion to choose to call a function for more information ([see developer docs here](https://platform.openai.com/docs/guides/gpt/function-calling)). + +In order to describe the type of functions that can be called, a JSON schema must be provided. Many JSON schema libraries exist and are more advanced than what we can offer in this library, however we have included a simple `jsonschema` package for those who want to use this feature without formatting their own JSON schema payload. + +The developer documents give this JSON schema definition as an example: + +```json +{ + "name":"get_current_weather", + "description":"Get the current weather in a given location", + "parameters":{ + "type":"object", + "properties":{ + "location":{ + "type":"string", + "description":"The city and state, e.g. San Francisco, CA" + }, + "unit":{ + "type":"string", + "enum":[ + "celsius", + "fahrenheit" + ] + } + }, + "required":[ + "location" + ] + } +} +``` + +Using the `jsonschema` package, this schema could be created using structs as such: + +```go +FunctionDefinition{ + Name: "get_current_weather", + Parameters: jsonschema.Definition{ + Type: jsonschema.Object, + Properties: map[string]jsonschema.Definition{ + "location": { + Type: jsonschema.String, + Description: "The city and state, e.g. San Francisco, CA", + }, + "unit": { + Type: jsonschema.String, + Enum: []string{"celsius", "fahrenheit"}, + }, + }, + Required: []string{"location"}, + }, +} +``` + +The `Parameters` field of a `FunctionDefinition` can accept either of the above styles, or even a nested struct from another library (as long as it can be marshalled into JSON). +
+ +
+Error handling + +Open-AI maintains clear documentation on how to [handle API errors](https://platform.openai.com/docs/guides/error-codes/api-errors) + +example: +``` +e := &openai.APIError{} +if errors.As(err, &e) { + switch e.HTTPStatusCode { + case 401: + // invalid auth or key (do not retry) + case 429: + // rate limiting or engine overload (wait and retry) + case 500: + // openai server error (retry) + default: + // unhandled + } +} + +``` +
+ +
+Fine Tune Model + +```go +package main + +import ( + "context" + "fmt" + "github.com/sashabaranov/go-openai" +) + +func main() { + client := openai.NewClient("your token") + ctx := context.Background() + + // create a .jsonl file with your training data for conversational model + // {"prompt": "", "completion": ""} + // {"prompt": "", "completion": ""} + // {"prompt": "", "completion": ""} + + // chat models are trained using the following file format: + // {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} + // {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} + // {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} + + // you can use openai cli tool to validate the data + // For more info - https://platform.openai.com/docs/guides/fine-tuning + + file, err := client.CreateFile(ctx, openai.FileRequest{ + FilePath: "training_prepared.jsonl", + Purpose: "fine-tune", + }) + if err != nil { + fmt.Printf("Upload JSONL file error: %v\n", err) + return + } + + // create a fine tuning job + // Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large) + // use below get method to know the status of your model + fineTuningJob, err := client.CreateFineTuningJob(ctx, openai.FineTuningJobRequest{ + TrainingFile: file.ID, + Model: "davinci-002", // gpt-3.5-turbo-0613, babbage-002. + }) + if err != nil { + fmt.Printf("Creating new fine tune model error: %v\n", err) + return + } + + fineTuningJob, err = client.RetrieveFineTuningJob(ctx, fineTuningJob.ID) + if err != nil { + fmt.Printf("Getting fine tune model error: %v\n", err) + return + } + fmt.Println(fineTuningJob.FineTunedModel) + + // once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request + + // resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{ + // Model: fineTuningJob.FineTunedModel, + // Prompt: "your prompt", + // }) + // if err != nil { + // fmt.Printf("Create completion error %v\n", err) + // return + // } + // + // fmt.Println(resp.Choices[0].Text) +} +``` +
+ +
+Structured Outputs + +```go +package main + +import ( + "context" + "fmt" + "log" + + "github.com/sashabaranov/go-openai" + "github.com/sashabaranov/go-openai/jsonschema" +) + +func main() { + client := openai.NewClient("your token") + ctx := context.Background() + + type Result struct { + Steps []struct { + Explanation string `json:"explanation"` + Output string `json:"output"` + } `json:"steps"` + FinalAnswer string `json:"final_answer"` + } + var result Result + schema, err := jsonschema.GenerateSchemaForType(result) + if err != nil { + log.Fatalf("GenerateSchemaForType error: %v", err) + } + resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{ + Model: openai.GPT4oMini, + Messages: []openai.ChatCompletionMessage{ + { + Role: openai.ChatMessageRoleSystem, + Content: "You are a helpful math tutor. Guide the user through the solution step by step.", + }, + { + Role: openai.ChatMessageRoleUser, + Content: "how can I solve 8x + 7 = -23", + }, + }, + ResponseFormat: &openai.ChatCompletionResponseFormat{ + Type: openai.ChatCompletionResponseFormatTypeJSONSchema, + JSONSchema: &openai.ChatCompletionResponseFormatJSONSchema{ + Name: "math_reasoning", + Schema: schema, + Strict: true, + }, + }, + }) + if err != nil { + log.Fatalf("CreateChatCompletion error: %v", err) + } + err = schema.Unmarshal(resp.Choices[0].Message.Content, &result) + if err != nil { + log.Fatalf("Unmarshal schema error: %v", err) + } + fmt.Println(result) +} +``` +
+See the `examples/` folder for more. + +## Frequently Asked Questions + +### Why don't we get the same answer when specifying a temperature field of 0 and asking the same question? + +Even when specifying a temperature field of 0, it doesn't guarantee that you'll always get the same response. Several factors come into play. + +1. Go OpenAI Behavior: When you specify a temperature field of 0 in Go OpenAI, the omitempty tag causes that field to be removed from the request. Consequently, the OpenAI API applies the default value of 1. +2. Token Count for Input/Output: If there's a large number of tokens in the input and output, setting the temperature to 0 can still result in non-deterministic behavior. In particular, when using around 32k tokens, the likelihood of non-deterministic behavior becomes highest even with a temperature of 0. + +Due to the factors mentioned above, different answers may be returned even for the same question. + +**Workarounds:** +1. As of November 2023, use [the new `seed` parameter](https://platform.openai.com/docs/guides/text-generation/reproducible-outputs) in conjunction with the `system_fingerprint` response field, alongside Temperature management. +2. Try using `math.SmallestNonzeroFloat32`: By specifying `math.SmallestNonzeroFloat32` in the temperature field instead of 0, you can mimic the behavior of setting it to 0. +3. Limiting Token Count: By limiting the number of tokens in the input and output and especially avoiding large requests close to 32k tokens, you can reduce the risk of non-deterministic behavior. + +By adopting these strategies, you can expect more consistent results. + +**Related Issues:** +[omitempty option of request struct will generate incorrect request when parameter is 0.](https://github.com/sashabaranov/go-openai/issues/9) + +### Does Go OpenAI provide a method to count tokens? + +No, Go OpenAI does not offer a feature to count tokens, and there are no plans to provide such a feature in the future. However, if there's a way to implement a token counting feature with zero dependencies, it might be possible to merge that feature into Go OpenAI. Otherwise, it would be more appropriate to implement it in a dedicated library or repository. + +For counting tokens, you might find the following links helpful: +- [Counting Tokens For Chat API Calls](https://github.com/pkoukk/tiktoken-go#counting-tokens-for-chat-api-calls) +- [How to count tokens with tiktoken](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb) + +**Related Issues:** +[Is it possible to join the implementation of GPT3 Tokenizer](https://github.com/sashabaranov/go-openai/issues/62) + +## Contributing + +By following [Contributing Guidelines](https://github.com/sashabaranov/go-openai/blob/master/CONTRIBUTING.md), we hope to ensure that your contributions are made smoothly and efficiently. + +## Thank you + +We want to take a moment to express our deepest gratitude to the [contributors](https://github.com/sashabaranov/go-openai/graphs/contributors) and sponsors of this project: +- [Carson Kahn](https://carsonkahn.com) of [Spindle AI](https://spindleai.com) + +To all of you: thank you. You've helped us achieve more than we ever imagined possible. Can't wait to see where we go next, together! diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/assistant.go b/constraint/vendor/github.com/sashabaranov/go-openai/assistant.go new file mode 100644 index 000000000..8aab5bcf0 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/assistant.go @@ -0,0 +1,325 @@ +package openai + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +const ( + assistantsSuffix = "/assistants" + assistantsFilesSuffix = "/files" +) + +type Assistant struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Model string `json:"model"` + Instructions *string `json:"instructions,omitempty"` + Tools []AssistantTool `json:"tools"` + ToolResources *AssistantToolResource `json:"tool_resources,omitempty"` + FileIDs []string `json:"file_ids,omitempty"` // Deprecated in v2 + Metadata map[string]any `json:"metadata,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + TopP *float32 `json:"top_p,omitempty"` + ResponseFormat any `json:"response_format,omitempty"` + + httpHeader +} + +type AssistantToolType string + +const ( + AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter" + AssistantToolTypeRetrieval AssistantToolType = "retrieval" + AssistantToolTypeFunction AssistantToolType = "function" + AssistantToolTypeFileSearch AssistantToolType = "file_search" +) + +type AssistantTool struct { + Type AssistantToolType `json:"type"` + Function *FunctionDefinition `json:"function,omitempty"` +} + +type AssistantToolFileSearch struct { + VectorStoreIDs []string `json:"vector_store_ids"` +} + +type AssistantToolCodeInterpreter struct { + FileIDs []string `json:"file_ids"` +} + +type AssistantToolResource struct { + FileSearch *AssistantToolFileSearch `json:"file_search,omitempty"` + CodeInterpreter *AssistantToolCodeInterpreter `json:"code_interpreter,omitempty"` +} + +// AssistantRequest provides the assistant request parameters. +// When modifying the tools the API functions as the following: +// If Tools is undefined, no changes are made to the Assistant's tools. +// If Tools is empty slice it will effectively delete all of the Assistant's tools. +// If Tools is populated, it will replace all of the existing Assistant's tools with the provided tools. +type AssistantRequest struct { + Model string `json:"model"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Instructions *string `json:"instructions,omitempty"` + Tools []AssistantTool `json:"-"` + FileIDs []string `json:"file_ids,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + ToolResources *AssistantToolResource `json:"tool_resources,omitempty"` + ResponseFormat any `json:"response_format,omitempty"` + Temperature *float32 `json:"temperature,omitempty"` + TopP *float32 `json:"top_p,omitempty"` +} + +// MarshalJSON provides a custom marshaller for the assistant request to handle the API use cases +// If Tools is nil, the field is omitted from the JSON. +// If Tools is an empty slice, it's included in the JSON as an empty array ([]). +// If Tools is populated, it's included in the JSON with the elements. +func (a AssistantRequest) MarshalJSON() ([]byte, error) { + type Alias AssistantRequest + assistantAlias := &struct { + Tools *[]AssistantTool `json:"tools,omitempty"` + *Alias + }{ + Alias: (*Alias)(&a), + } + + if a.Tools != nil { + assistantAlias.Tools = &a.Tools + } + + return json.Marshal(assistantAlias) +} + +// AssistantsList is a list of assistants. +type AssistantsList struct { + Assistants []Assistant `json:"data"` + LastID *string `json:"last_id"` + FirstID *string `json:"first_id"` + HasMore bool `json:"has_more"` + httpHeader +} + +type AssistantDeleteResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +type AssistantFile struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + AssistantID string `json:"assistant_id"` + + httpHeader +} + +type AssistantFileRequest struct { + FileID string `json:"file_id"` +} + +type AssistantFilesList struct { + AssistantFiles []AssistantFile `json:"data"` + + httpHeader +} + +// CreateAssistant creates a new assistant. +func (c *Client) CreateAssistant(ctx context.Context, request AssistantRequest) (response Assistant, err error) { + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(assistantsSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveAssistant retrieves an assistant. +func (c *Client) RetrieveAssistant( + ctx context.Context, + assistantID string, +) (response Assistant, err error) { + urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ModifyAssistant modifies an assistant. +func (c *Client) ModifyAssistant( + ctx context.Context, + assistantID string, + request AssistantRequest, +) (response Assistant, err error) { + urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID) + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// DeleteAssistant deletes an assistant. +func (c *Client) DeleteAssistant( + ctx context.Context, + assistantID string, +) (response AssistantDeleteResponse, err error) { + urlSuffix := fmt.Sprintf("%s/%s", assistantsSuffix, assistantID) + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ListAssistants Lists the currently available assistants. +func (c *Client) ListAssistants( + ctx context.Context, + limit *int, + order *string, + after *string, + before *string, +) (response AssistantsList, err error) { + urlValues := url.Values{} + if limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *limit)) + } + if order != nil { + urlValues.Add("order", *order) + } + if after != nil { + urlValues.Add("after", *after) + } + if before != nil { + urlValues.Add("before", *before) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s%s", assistantsSuffix, encodedValues) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// CreateAssistantFile creates a new assistant file. +func (c *Client) CreateAssistantFile( + ctx context.Context, + assistantID string, + request AssistantFileRequest, +) (response AssistantFile, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s", assistantsSuffix, assistantID, assistantsFilesSuffix) + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveAssistantFile retrieves an assistant file. +func (c *Client) RetrieveAssistantFile( + ctx context.Context, + assistantID string, + fileID string, +) (response AssistantFile, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s", assistantsSuffix, assistantID, assistantsFilesSuffix, fileID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// DeleteAssistantFile deletes an existing file. +func (c *Client) DeleteAssistantFile( + ctx context.Context, + assistantID string, + fileID string, +) (err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s", assistantsSuffix, assistantID, assistantsFilesSuffix, fileID) + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, nil) + return +} + +// ListAssistantFiles Lists the currently available files for an assistant. +func (c *Client) ListAssistantFiles( + ctx context.Context, + assistantID string, + limit *int, + order *string, + after *string, + before *string, +) (response AssistantFilesList, err error) { + urlValues := url.Values{} + if limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *limit)) + } + if order != nil { + urlValues.Add("order", *order) + } + if after != nil { + urlValues.Add("after", *after) + } + if before != nil { + urlValues.Add("before", *before) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s/%s%s%s", assistantsSuffix, assistantID, assistantsFilesSuffix, encodedValues) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/audio.go b/constraint/vendor/github.com/sashabaranov/go-openai/audio.go new file mode 100644 index 000000000..f321f93d6 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/audio.go @@ -0,0 +1,234 @@ +package openai + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + + utils "github.com/sashabaranov/go-openai/internal" +) + +// Whisper Defines the models provided by OpenAI to use when processing audio with OpenAI. +const ( + Whisper1 = "whisper-1" +) + +// Response formats; Whisper uses AudioResponseFormatJSON by default. +type AudioResponseFormat string + +const ( + AudioResponseFormatJSON AudioResponseFormat = "json" + AudioResponseFormatText AudioResponseFormat = "text" + AudioResponseFormatSRT AudioResponseFormat = "srt" + AudioResponseFormatVerboseJSON AudioResponseFormat = "verbose_json" + AudioResponseFormatVTT AudioResponseFormat = "vtt" +) + +type TranscriptionTimestampGranularity string + +const ( + TranscriptionTimestampGranularityWord TranscriptionTimestampGranularity = "word" + TranscriptionTimestampGranularitySegment TranscriptionTimestampGranularity = "segment" +) + +// AudioRequest represents a request structure for audio API. +type AudioRequest struct { + Model string + + // FilePath is either an existing file in your filesystem or a filename representing the contents of Reader. + FilePath string + + // Reader is an optional io.Reader when you do not want to use an existing file. + Reader io.Reader + + Prompt string + Temperature float32 + Language string // Only for transcription. + Format AudioResponseFormat + TimestampGranularities []TranscriptionTimestampGranularity // Only for transcription. +} + +// AudioResponse represents a response structure for audio API. +type AudioResponse struct { + Task string `json:"task"` + Language string `json:"language"` + Duration float64 `json:"duration"` + Segments []struct { + ID int `json:"id"` + Seek int `json:"seek"` + Start float64 `json:"start"` + End float64 `json:"end"` + Text string `json:"text"` + Tokens []int `json:"tokens"` + Temperature float64 `json:"temperature"` + AvgLogprob float64 `json:"avg_logprob"` + CompressionRatio float64 `json:"compression_ratio"` + NoSpeechProb float64 `json:"no_speech_prob"` + Transient bool `json:"transient"` + } `json:"segments"` + Words []struct { + Word string `json:"word"` + Start float64 `json:"start"` + End float64 `json:"end"` + } `json:"words"` + Text string `json:"text"` + + httpHeader +} + +type audioTextResponse struct { + Text string `json:"text"` + + httpHeader +} + +func (r *audioTextResponse) ToAudioResponse() AudioResponse { + return AudioResponse{ + Text: r.Text, + httpHeader: r.httpHeader, + } +} + +// CreateTranscription — API call to create a transcription. Returns transcribed text. +func (c *Client) CreateTranscription( + ctx context.Context, + request AudioRequest, +) (response AudioResponse, err error) { + return c.callAudioAPI(ctx, request, "transcriptions") +} + +// CreateTranslation — API call to translate audio into English. +func (c *Client) CreateTranslation( + ctx context.Context, + request AudioRequest, +) (response AudioResponse, err error) { + return c.callAudioAPI(ctx, request, "translations") +} + +// callAudioAPI — API call to an audio endpoint. +func (c *Client) callAudioAPI( + ctx context.Context, + request AudioRequest, + endpointSuffix string, +) (response AudioResponse, err error) { + var formBody bytes.Buffer + builder := c.createFormBuilder(&formBody) + + if err = audioMultipartForm(request, builder); err != nil { + return AudioResponse{}, err + } + + urlSuffix := fmt.Sprintf("/audio/%s", endpointSuffix) + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(&formBody), + withContentType(builder.FormDataContentType()), + ) + if err != nil { + return AudioResponse{}, err + } + + if request.HasJSONResponse() { + err = c.sendRequest(req, &response) + } else { + var textResponse audioTextResponse + err = c.sendRequest(req, &textResponse) + response = textResponse.ToAudioResponse() + } + if err != nil { + return AudioResponse{}, err + } + return +} + +// HasJSONResponse returns true if the response format is JSON. +func (r AudioRequest) HasJSONResponse() bool { + return r.Format == "" || r.Format == AudioResponseFormatJSON || r.Format == AudioResponseFormatVerboseJSON +} + +// audioMultipartForm creates a form with audio file contents and the name of the model to use for +// audio processing. +func audioMultipartForm(request AudioRequest, b utils.FormBuilder) error { + err := createFileField(request, b) + if err != nil { + return err + } + + err = b.WriteField("model", request.Model) + if err != nil { + return fmt.Errorf("writing model name: %w", err) + } + + // Create a form field for the prompt (if provided) + if request.Prompt != "" { + err = b.WriteField("prompt", request.Prompt) + if err != nil { + return fmt.Errorf("writing prompt: %w", err) + } + } + + // Create a form field for the format (if provided) + if request.Format != "" { + err = b.WriteField("response_format", string(request.Format)) + if err != nil { + return fmt.Errorf("writing format: %w", err) + } + } + + // Create a form field for the temperature (if provided) + if request.Temperature != 0 { + err = b.WriteField("temperature", fmt.Sprintf("%.2f", request.Temperature)) + if err != nil { + return fmt.Errorf("writing temperature: %w", err) + } + } + + // Create a form field for the language (if provided) + if request.Language != "" { + err = b.WriteField("language", request.Language) + if err != nil { + return fmt.Errorf("writing language: %w", err) + } + } + + if len(request.TimestampGranularities) > 0 { + for _, tg := range request.TimestampGranularities { + err = b.WriteField("timestamp_granularities[]", string(tg)) + if err != nil { + return fmt.Errorf("writing timestamp_granularities[]: %w", err) + } + } + } + + // Close the multipart writer + return b.Close() +} + +// createFileField creates the "file" form field from either an existing file or by using the reader. +func createFileField(request AudioRequest, b utils.FormBuilder) error { + if request.Reader != nil { + err := b.CreateFormFileReader("file", request.Reader, request.FilePath) + if err != nil { + return fmt.Errorf("creating form using reader: %w", err) + } + return nil + } + + f, err := os.Open(request.FilePath) + if err != nil { + return fmt.Errorf("opening audio file: %w", err) + } + defer f.Close() + + err = b.CreateFormFile("file", f) + if err != nil { + return fmt.Errorf("creating form file: %w", err) + } + + return nil +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/batch.go b/constraint/vendor/github.com/sashabaranov/go-openai/batch.go new file mode 100644 index 000000000..3c1a9d0d7 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/batch.go @@ -0,0 +1,271 @@ +package openai + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +const batchesSuffix = "/batches" + +type BatchEndpoint string + +const ( + BatchEndpointChatCompletions BatchEndpoint = "/v1/chat/completions" + BatchEndpointCompletions BatchEndpoint = "/v1/completions" + BatchEndpointEmbeddings BatchEndpoint = "/v1/embeddings" +) + +type BatchLineItem interface { + MarshalBatchLineItem() []byte +} + +type BatchChatCompletionRequest struct { + CustomID string `json:"custom_id"` + Body ChatCompletionRequest `json:"body"` + Method string `json:"method"` + URL BatchEndpoint `json:"url"` +} + +func (r BatchChatCompletionRequest) MarshalBatchLineItem() []byte { + marshal, _ := json.Marshal(r) + return marshal +} + +type BatchCompletionRequest struct { + CustomID string `json:"custom_id"` + Body CompletionRequest `json:"body"` + Method string `json:"method"` + URL BatchEndpoint `json:"url"` +} + +func (r BatchCompletionRequest) MarshalBatchLineItem() []byte { + marshal, _ := json.Marshal(r) + return marshal +} + +type BatchEmbeddingRequest struct { + CustomID string `json:"custom_id"` + Body EmbeddingRequest `json:"body"` + Method string `json:"method"` + URL BatchEndpoint `json:"url"` +} + +func (r BatchEmbeddingRequest) MarshalBatchLineItem() []byte { + marshal, _ := json.Marshal(r) + return marshal +} + +type Batch struct { + ID string `json:"id"` + Object string `json:"object"` + Endpoint BatchEndpoint `json:"endpoint"` + Errors *struct { + Object string `json:"object,omitempty"` + Data []struct { + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` + Param *string `json:"param,omitempty"` + Line *int `json:"line,omitempty"` + } `json:"data"` + } `json:"errors"` + InputFileID string `json:"input_file_id"` + CompletionWindow string `json:"completion_window"` + Status string `json:"status"` + OutputFileID *string `json:"output_file_id"` + ErrorFileID *string `json:"error_file_id"` + CreatedAt int `json:"created_at"` + InProgressAt *int `json:"in_progress_at"` + ExpiresAt *int `json:"expires_at"` + FinalizingAt *int `json:"finalizing_at"` + CompletedAt *int `json:"completed_at"` + FailedAt *int `json:"failed_at"` + ExpiredAt *int `json:"expired_at"` + CancellingAt *int `json:"cancelling_at"` + CancelledAt *int `json:"cancelled_at"` + RequestCounts BatchRequestCounts `json:"request_counts"` + Metadata map[string]any `json:"metadata"` +} + +type BatchRequestCounts struct { + Total int `json:"total"` + Completed int `json:"completed"` + Failed int `json:"failed"` +} + +type CreateBatchRequest struct { + InputFileID string `json:"input_file_id"` + Endpoint BatchEndpoint `json:"endpoint"` + CompletionWindow string `json:"completion_window"` + Metadata map[string]any `json:"metadata"` +} + +type BatchResponse struct { + httpHeader + Batch +} + +// CreateBatch — API call to Create batch. +func (c *Client) CreateBatch( + ctx context.Context, + request CreateBatchRequest, +) (response BatchResponse, err error) { + if request.CompletionWindow == "" { + request.CompletionWindow = "24h" + } + + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(batchesSuffix), withBody(request)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +type UploadBatchFileRequest struct { + FileName string + Lines []BatchLineItem +} + +func (r *UploadBatchFileRequest) MarshalJSONL() []byte { + buff := bytes.Buffer{} + for i, line := range r.Lines { + if i != 0 { + buff.Write([]byte("\n")) + } + buff.Write(line.MarshalBatchLineItem()) + } + return buff.Bytes() +} + +func (r *UploadBatchFileRequest) AddChatCompletion(customerID string, body ChatCompletionRequest) { + r.Lines = append(r.Lines, BatchChatCompletionRequest{ + CustomID: customerID, + Body: body, + Method: "POST", + URL: BatchEndpointChatCompletions, + }) +} + +func (r *UploadBatchFileRequest) AddCompletion(customerID string, body CompletionRequest) { + r.Lines = append(r.Lines, BatchCompletionRequest{ + CustomID: customerID, + Body: body, + Method: "POST", + URL: BatchEndpointCompletions, + }) +} + +func (r *UploadBatchFileRequest) AddEmbedding(customerID string, body EmbeddingRequest) { + r.Lines = append(r.Lines, BatchEmbeddingRequest{ + CustomID: customerID, + Body: body, + Method: "POST", + URL: BatchEndpointEmbeddings, + }) +} + +// UploadBatchFile — upload batch file. +func (c *Client) UploadBatchFile(ctx context.Context, request UploadBatchFileRequest) (File, error) { + if request.FileName == "" { + request.FileName = "@batchinput.jsonl" + } + return c.CreateFileBytes(ctx, FileBytesRequest{ + Name: request.FileName, + Bytes: request.MarshalJSONL(), + Purpose: PurposeBatch, + }) +} + +type CreateBatchWithUploadFileRequest struct { + Endpoint BatchEndpoint `json:"endpoint"` + CompletionWindow string `json:"completion_window"` + Metadata map[string]any `json:"metadata"` + UploadBatchFileRequest +} + +// CreateBatchWithUploadFile — API call to Create batch with upload file. +func (c *Client) CreateBatchWithUploadFile( + ctx context.Context, + request CreateBatchWithUploadFileRequest, +) (response BatchResponse, err error) { + var file File + file, err = c.UploadBatchFile(ctx, UploadBatchFileRequest{ + FileName: request.FileName, + Lines: request.Lines, + }) + if err != nil { + return + } + return c.CreateBatch(ctx, CreateBatchRequest{ + InputFileID: file.ID, + Endpoint: request.Endpoint, + CompletionWindow: request.CompletionWindow, + Metadata: request.Metadata, + }) +} + +// RetrieveBatch — API call to Retrieve batch. +func (c *Client) RetrieveBatch( + ctx context.Context, + batchID string, +) (response BatchResponse, err error) { + urlSuffix := fmt.Sprintf("%s/%s", batchesSuffix, batchID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + err = c.sendRequest(req, &response) + return +} + +// CancelBatch — API call to Cancel batch. +func (c *Client) CancelBatch( + ctx context.Context, + batchID string, +) (response BatchResponse, err error) { + urlSuffix := fmt.Sprintf("%s/%s/cancel", batchesSuffix, batchID) + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix)) + if err != nil { + return + } + err = c.sendRequest(req, &response) + return +} + +type ListBatchResponse struct { + httpHeader + Object string `json:"object"` + Data []Batch `json:"data"` + FirstID string `json:"first_id"` + LastID string `json:"last_id"` + HasMore bool `json:"has_more"` +} + +// ListBatch API call to List batch. +func (c *Client) ListBatch(ctx context.Context, after *string, limit *int) (response ListBatchResponse, err error) { + urlValues := url.Values{} + if limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *limit)) + } + if after != nil { + urlValues.Add("after", *after) + } + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s%s", batchesSuffix, encodedValues) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/chat.go b/constraint/vendor/github.com/sashabaranov/go-openai/chat.go new file mode 100644 index 000000000..fcaf79cf7 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/chat.go @@ -0,0 +1,409 @@ +package openai + +import ( + "context" + "encoding/json" + "errors" + "net/http" +) + +// Chat message role defined by the OpenAI API. +const ( + ChatMessageRoleSystem = "system" + ChatMessageRoleUser = "user" + ChatMessageRoleAssistant = "assistant" + ChatMessageRoleFunction = "function" + ChatMessageRoleTool = "tool" +) + +const chatCompletionsSuffix = "/chat/completions" + +var ( + ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll + ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll + ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") +) + +type Hate struct { + Filtered bool `json:"filtered"` + Severity string `json:"severity,omitempty"` +} +type SelfHarm struct { + Filtered bool `json:"filtered"` + Severity string `json:"severity,omitempty"` +} +type Sexual struct { + Filtered bool `json:"filtered"` + Severity string `json:"severity,omitempty"` +} +type Violence struct { + Filtered bool `json:"filtered"` + Severity string `json:"severity,omitempty"` +} + +type JailBreak struct { + Filtered bool `json:"filtered"` + Detected bool `json:"detected"` +} + +type Profanity struct { + Filtered bool `json:"filtered"` + Detected bool `json:"detected"` +} + +type ContentFilterResults struct { + Hate Hate `json:"hate,omitempty"` + SelfHarm SelfHarm `json:"self_harm,omitempty"` + Sexual Sexual `json:"sexual,omitempty"` + Violence Violence `json:"violence,omitempty"` + JailBreak JailBreak `json:"jailbreak,omitempty"` + Profanity Profanity `json:"profanity,omitempty"` +} + +type PromptAnnotation struct { + PromptIndex int `json:"prompt_index,omitempty"` + ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` +} + +type ImageURLDetail string + +const ( + ImageURLDetailHigh ImageURLDetail = "high" + ImageURLDetailLow ImageURLDetail = "low" + ImageURLDetailAuto ImageURLDetail = "auto" +) + +type ChatMessageImageURL struct { + URL string `json:"url,omitempty"` + Detail ImageURLDetail `json:"detail,omitempty"` +} + +type ChatMessagePartType string + +const ( + ChatMessagePartTypeText ChatMessagePartType = "text" + ChatMessagePartTypeImageURL ChatMessagePartType = "image_url" +) + +type ChatMessagePart struct { + Type ChatMessagePartType `json:"type,omitempty"` + Text string `json:"text,omitempty"` + ImageURL *ChatMessageImageURL `json:"image_url,omitempty"` +} + +type ChatCompletionMessage struct { + Role string `json:"role"` + Content string `json:"content"` + Refusal string `json:"refusal,omitempty"` + MultiContent []ChatMessagePart + + // This property isn't in the official documentation, but it's in + // the documentation for the official library for python: + // - https://github.com/openai/openai-python/blob/main/chatml.md + // - https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb + Name string `json:"name,omitempty"` + + FunctionCall *FunctionCall `json:"function_call,omitempty"` + + // For Role=assistant prompts this may be set to the tool calls generated by the model, such as function calls. + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + + // For Role=tool prompts this should be set to the ID given in the assistant's prior request to call a tool. + ToolCallID string `json:"tool_call_id,omitempty"` +} + +func (m ChatCompletionMessage) MarshalJSON() ([]byte, error) { + if m.Content != "" && m.MultiContent != nil { + return nil, ErrContentFieldsMisused + } + if len(m.MultiContent) > 0 { + msg := struct { + Role string `json:"role"` + Content string `json:"-"` + Refusal string `json:"refusal,omitempty"` + MultiContent []ChatMessagePart `json:"content,omitempty"` + Name string `json:"name,omitempty"` + FunctionCall *FunctionCall `json:"function_call,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + }(m) + return json.Marshal(msg) + } + + msg := struct { + Role string `json:"role"` + Content string `json:"content"` + Refusal string `json:"refusal,omitempty"` + MultiContent []ChatMessagePart `json:"-"` + Name string `json:"name,omitempty"` + FunctionCall *FunctionCall `json:"function_call,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + }(m) + return json.Marshal(msg) +} + +func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) error { + msg := struct { + Role string `json:"role"` + Content string `json:"content"` + Refusal string `json:"refusal,omitempty"` + MultiContent []ChatMessagePart + Name string `json:"name,omitempty"` + FunctionCall *FunctionCall `json:"function_call,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + }{} + + if err := json.Unmarshal(bs, &msg); err == nil { + *m = ChatCompletionMessage(msg) + return nil + } + multiMsg := struct { + Role string `json:"role"` + Content string + Refusal string `json:"refusal,omitempty"` + MultiContent []ChatMessagePart `json:"content"` + Name string `json:"name,omitempty"` + FunctionCall *FunctionCall `json:"function_call,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + }{} + if err := json.Unmarshal(bs, &multiMsg); err != nil { + return err + } + *m = ChatCompletionMessage(multiMsg) + return nil +} + +type ToolCall struct { + // Index is not nil only in chat completion chunk object + Index *int `json:"index,omitempty"` + ID string `json:"id,omitempty"` + Type ToolType `json:"type"` + Function FunctionCall `json:"function"` +} + +type FunctionCall struct { + Name string `json:"name,omitempty"` + // call function with arguments in JSON format + Arguments string `json:"arguments,omitempty"` +} + +type ChatCompletionResponseFormatType string + +const ( + ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" + ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" + ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" +) + +type ChatCompletionResponseFormat struct { + Type ChatCompletionResponseFormatType `json:"type,omitempty"` + JSONSchema *ChatCompletionResponseFormatJSONSchema `json:"json_schema,omitempty"` +} + +type ChatCompletionResponseFormatJSONSchema struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Schema json.Marshaler `json:"schema"` + Strict bool `json:"strict"` +} + +// ChatCompletionRequest represents a request structure for chat completion API. +type ChatCompletionRequest struct { + Model string `json:"model"` + Messages []ChatCompletionMessage `json:"messages"` + // MaxTokens The maximum number of tokens that can be generated in the chat completion. + // This value can be used to control costs for text generated via API. + // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. + // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens + MaxTokens int `json:"max_tokens,omitempty"` + // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion, + // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` + Temperature float32 `json:"temperature,omitempty"` + TopP float32 `json:"top_p,omitempty"` + N int `json:"n,omitempty"` + Stream bool `json:"stream,omitempty"` + Stop []string `json:"stop,omitempty"` + PresencePenalty float32 `json:"presence_penalty,omitempty"` + ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` + Seed *int `json:"seed,omitempty"` + FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` + // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. + // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` + // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias + LogitBias map[string]int `json:"logit_bias,omitempty"` + // LogProbs indicates whether to return log probabilities of the output tokens or not. + // If true, returns the log probabilities of each output token returned in the content of message. + // This option is currently not available on the gpt-4-vision-preview model. + LogProbs bool `json:"logprobs,omitempty"` + // TopLogProbs is an integer between 0 and 5 specifying the number of most likely tokens to return at each + // token position, each with an associated log probability. + // logprobs must be set to true if this parameter is used. + TopLogProbs int `json:"top_logprobs,omitempty"` + User string `json:"user,omitempty"` + // Deprecated: use Tools instead. + Functions []FunctionDefinition `json:"functions,omitempty"` + // Deprecated: use ToolChoice instead. + FunctionCall any `json:"function_call,omitempty"` + Tools []Tool `json:"tools,omitempty"` + // This can be either a string or an ToolChoice object. + ToolChoice any `json:"tool_choice,omitempty"` + // Options for streaming response. Only set this when you set stream: true. + StreamOptions *StreamOptions `json:"stream_options,omitempty"` + // Disable the default behavior of parallel tool calls by setting it: false. + ParallelToolCalls any `json:"parallel_tool_calls,omitempty"` + // Store can be set to true to store the output of this completion request for use in distillations and evals. + // https://platform.openai.com/docs/api-reference/chat/create#chat-create-store + Store bool `json:"store,omitempty"` + // Metadata to store with the completion. + Metadata map[string]string `json:"metadata,omitempty"` +} + +type StreamOptions struct { + // If set, an additional chunk will be streamed before the data: [DONE] message. + // The usage field on this chunk shows the token usage statistics for the entire request, + // and the choices field will always be an empty array. + // All other chunks will also include a usage field, but with a null value. + IncludeUsage bool `json:"include_usage,omitempty"` +} + +type ToolType string + +const ( + ToolTypeFunction ToolType = "function" +) + +type Tool struct { + Type ToolType `json:"type"` + Function *FunctionDefinition `json:"function,omitempty"` +} + +type ToolChoice struct { + Type ToolType `json:"type"` + Function ToolFunction `json:"function,omitempty"` +} + +type ToolFunction struct { + Name string `json:"name"` +} + +type FunctionDefinition struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Strict bool `json:"strict,omitempty"` + // Parameters is an object describing the function. + // You can pass json.RawMessage to describe the schema, + // or you can pass in a struct which serializes to the proper JSON schema. + // The jsonschema package is provided for convenience, but you should + // consider another specialized library if you require more complex schemas. + Parameters any `json:"parameters"` +} + +// Deprecated: use FunctionDefinition instead. +type FunctionDefine = FunctionDefinition + +type TopLogProbs struct { + Token string `json:"token"` + LogProb float64 `json:"logprob"` + Bytes []byte `json:"bytes,omitempty"` +} + +// LogProb represents the probability information for a token. +type LogProb struct { + Token string `json:"token"` + LogProb float64 `json:"logprob"` + Bytes []byte `json:"bytes,omitempty"` // Omitting the field if it is null + // TopLogProbs is a list of the most likely tokens and their log probability, at this token position. + // In rare cases, there may be fewer than the number of requested top_logprobs returned. + TopLogProbs []TopLogProbs `json:"top_logprobs"` +} + +// LogProbs is the top-level structure containing the log probability information. +type LogProbs struct { + // Content is a list of message content tokens with log probability information. + Content []LogProb `json:"content"` +} + +type FinishReason string + +const ( + FinishReasonStop FinishReason = "stop" + FinishReasonLength FinishReason = "length" + FinishReasonFunctionCall FinishReason = "function_call" + FinishReasonToolCalls FinishReason = "tool_calls" + FinishReasonContentFilter FinishReason = "content_filter" + FinishReasonNull FinishReason = "null" +) + +func (r FinishReason) MarshalJSON() ([]byte, error) { + if r == FinishReasonNull || r == "" { + return []byte("null"), nil + } + return []byte(`"` + string(r) + `"`), nil // best effort to not break future API changes +} + +type ChatCompletionChoice struct { + Index int `json:"index"` + Message ChatCompletionMessage `json:"message"` + // FinishReason + // stop: API returned complete message, + // or a message terminated by one of the stop sequences provided via the stop parameter + // length: Incomplete model output due to max_tokens parameter or token limit + // function_call: The model decided to call a function + // content_filter: Omitted content due to a flag from our content filters + // null: API response still in progress or incomplete + FinishReason FinishReason `json:"finish_reason"` + LogProbs *LogProbs `json:"logprobs,omitempty"` + ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` +} + +// ChatCompletionResponse represents a response structure for chat completion API. +type ChatCompletionResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []ChatCompletionChoice `json:"choices"` + Usage Usage `json:"usage"` + SystemFingerprint string `json:"system_fingerprint"` + PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"` + + httpHeader +} + +// CreateChatCompletion — API call to Create a completion for the chat message. +func (c *Client) CreateChatCompletion( + ctx context.Context, + request ChatCompletionRequest, +) (response ChatCompletionResponse, err error) { + if request.Stream { + err = ErrChatCompletionStreamNotSupported + return + } + + urlSuffix := chatCompletionsSuffix + if !checkEndpointSupportsModel(urlSuffix, request.Model) { + err = ErrChatCompletionInvalidModel + return + } + + if err = validateRequestForO1Models(request); err != nil { + return + } + + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(request), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/chat_stream.go b/constraint/vendor/github.com/sashabaranov/go-openai/chat_stream.go new file mode 100644 index 000000000..58b2651c0 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/chat_stream.go @@ -0,0 +1,105 @@ +package openai + +import ( + "context" + "net/http" +) + +type ChatCompletionStreamChoiceDelta struct { + Content string `json:"content,omitempty"` + Role string `json:"role,omitempty"` + FunctionCall *FunctionCall `json:"function_call,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + Refusal string `json:"refusal,omitempty"` +} + +type ChatCompletionStreamChoiceLogprobs struct { + Content []ChatCompletionTokenLogprob `json:"content,omitempty"` + Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"` +} + +type ChatCompletionTokenLogprob struct { + Token string `json:"token"` + Bytes []int64 `json:"bytes,omitempty"` + Logprob float64 `json:"logprob,omitempty"` + TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"` +} + +type ChatCompletionTokenLogprobTopLogprob struct { + Token string `json:"token"` + Bytes []int64 `json:"bytes"` + Logprob float64 `json:"logprob"` +} + +type ChatCompletionStreamChoice struct { + Index int `json:"index"` + Delta ChatCompletionStreamChoiceDelta `json:"delta"` + Logprobs *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"` + FinishReason FinishReason `json:"finish_reason"` + ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` +} + +type PromptFilterResult struct { + Index int `json:"index"` + ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"` +} + +type ChatCompletionStreamResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []ChatCompletionStreamChoice `json:"choices"` + SystemFingerprint string `json:"system_fingerprint"` + PromptAnnotations []PromptAnnotation `json:"prompt_annotations,omitempty"` + PromptFilterResults []PromptFilterResult `json:"prompt_filter_results,omitempty"` + // An optional field that will only be present when you set stream_options: {"include_usage": true} in your request. + // When present, it contains a null value except for the last chunk which contains the token usage statistics + // for the entire request. + Usage *Usage `json:"usage,omitempty"` +} + +// ChatCompletionStream +// Note: Perhaps it is more elegant to abstract Stream using generics. +type ChatCompletionStream struct { + *streamReader[ChatCompletionStreamResponse] +} + +// CreateChatCompletionStream — API call to create a chat completion w/ streaming +// support. It sets whether to stream back partial progress. If set, tokens will be +// sent as data-only server-sent events as they become available, with the +// stream terminated by a data: [DONE] message. +func (c *Client) CreateChatCompletionStream( + ctx context.Context, + request ChatCompletionRequest, +) (stream *ChatCompletionStream, err error) { + urlSuffix := chatCompletionsSuffix + if !checkEndpointSupportsModel(urlSuffix, request.Model) { + err = ErrChatCompletionInvalidModel + return + } + + request.Stream = true + if err = validateRequestForO1Models(request); err != nil { + return + } + + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(request), + ) + if err != nil { + return nil, err + } + + resp, err := sendRequestStream[ChatCompletionStreamResponse](c, req) + if err != nil { + return + } + stream = &ChatCompletionStream{ + streamReader: resp, + } + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/client.go b/constraint/vendor/github.com/sashabaranov/go-openai/client.go new file mode 100644 index 000000000..ed8595e0b --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/client.go @@ -0,0 +1,319 @@ +package openai + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + utils "github.com/sashabaranov/go-openai/internal" +) + +// Client is OpenAI GPT-3 API client. +type Client struct { + config ClientConfig + + requestBuilder utils.RequestBuilder + createFormBuilder func(io.Writer) utils.FormBuilder +} + +type Response interface { + SetHeader(http.Header) +} + +type httpHeader http.Header + +func (h *httpHeader) SetHeader(header http.Header) { + *h = httpHeader(header) +} + +func (h *httpHeader) Header() http.Header { + return http.Header(*h) +} + +func (h *httpHeader) GetRateLimitHeaders() RateLimitHeaders { + return newRateLimitHeaders(h.Header()) +} + +type RawResponse struct { + io.ReadCloser + + httpHeader +} + +// NewClient creates new OpenAI API client. +func NewClient(authToken string) *Client { + config := DefaultConfig(authToken) + return NewClientWithConfig(config) +} + +// NewClientWithConfig creates new OpenAI API client for specified config. +func NewClientWithConfig(config ClientConfig) *Client { + return &Client{ + config: config, + requestBuilder: utils.NewRequestBuilder(), + createFormBuilder: func(body io.Writer) utils.FormBuilder { + return utils.NewFormBuilder(body) + }, + } +} + +// NewOrgClient creates new OpenAI API client for specified Organization ID. +// +// Deprecated: Please use NewClientWithConfig. +func NewOrgClient(authToken, org string) *Client { + config := DefaultConfig(authToken) + config.OrgID = org + return NewClientWithConfig(config) +} + +type requestOptions struct { + body any + header http.Header +} + +type requestOption func(*requestOptions) + +func withBody(body any) requestOption { + return func(args *requestOptions) { + args.body = body + } +} + +func withContentType(contentType string) requestOption { + return func(args *requestOptions) { + args.header.Set("Content-Type", contentType) + } +} + +func withBetaAssistantVersion(version string) requestOption { + return func(args *requestOptions) { + args.header.Set("OpenAI-Beta", fmt.Sprintf("assistants=%s", version)) + } +} + +func (c *Client) newRequest(ctx context.Context, method, url string, setters ...requestOption) (*http.Request, error) { + // Default Options + args := &requestOptions{ + body: nil, + header: make(http.Header), + } + for _, setter := range setters { + setter(args) + } + req, err := c.requestBuilder.Build(ctx, method, url, args.body, args.header) + if err != nil { + return nil, err + } + c.setCommonHeaders(req) + return req, nil +} + +func (c *Client) sendRequest(req *http.Request, v Response) error { + req.Header.Set("Accept", "application/json") + + // Check whether Content-Type is already set, Upload Files API requires + // Content-Type == multipart/form-data + contentType := req.Header.Get("Content-Type") + if contentType == "" { + req.Header.Set("Content-Type", "application/json") + } + + res, err := c.config.HTTPClient.Do(req) + if err != nil { + return err + } + + defer res.Body.Close() + + if v != nil { + v.SetHeader(res.Header) + } + + if isFailureStatusCode(res) { + return c.handleErrorResp(res) + } + + return decodeResponse(res.Body, v) +} + +func (c *Client) sendRequestRaw(req *http.Request) (response RawResponse, err error) { + resp, err := c.config.HTTPClient.Do(req) //nolint:bodyclose // body should be closed by outer function + if err != nil { + return + } + + if isFailureStatusCode(resp) { + err = c.handleErrorResp(resp) + return + } + + response.SetHeader(resp.Header) + response.ReadCloser = resp.Body + return +} + +func sendRequestStream[T streamable](client *Client, req *http.Request) (*streamReader[T], error) { + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "text/event-stream") + req.Header.Set("Cache-Control", "no-cache") + req.Header.Set("Connection", "keep-alive") + + resp, err := client.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close() + if err != nil { + return new(streamReader[T]), err + } + if isFailureStatusCode(resp) { + return new(streamReader[T]), client.handleErrorResp(resp) + } + return &streamReader[T]{ + emptyMessagesLimit: client.config.EmptyMessagesLimit, + reader: bufio.NewReader(resp.Body), + response: resp, + errAccumulator: utils.NewErrorAccumulator(), + unmarshaler: &utils.JSONUnmarshaler{}, + httpHeader: httpHeader(resp.Header), + }, nil +} + +func (c *Client) setCommonHeaders(req *http.Request) { + // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#authentication + // Azure API Key authentication + if c.config.APIType == APITypeAzure || c.config.APIType == APITypeCloudflareAzure { + req.Header.Set(AzureAPIKeyHeader, c.config.authToken) + } else if c.config.authToken != "" { + // OpenAI or Azure AD authentication + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.config.authToken)) + } + if c.config.OrgID != "" { + req.Header.Set("OpenAI-Organization", c.config.OrgID) + } +} + +func isFailureStatusCode(resp *http.Response) bool { + return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest +} + +func decodeResponse(body io.Reader, v any) error { + if v == nil { + return nil + } + + switch o := v.(type) { + case *string: + return decodeString(body, o) + case *audioTextResponse: + return decodeString(body, &o.Text) + default: + return json.NewDecoder(body).Decode(v) + } +} + +func decodeString(body io.Reader, output *string) error { + b, err := io.ReadAll(body) + if err != nil { + return err + } + *output = string(b) + return nil +} + +type fullURLOptions struct { + model string +} + +type fullURLOption func(*fullURLOptions) + +func withModel(model string) fullURLOption { + return func(args *fullURLOptions) { + args.model = model + } +} + +var azureDeploymentsEndpoints = []string{ + "/completions", + "/embeddings", + "/chat/completions", + "/audio/transcriptions", + "/audio/translations", + "/audio/speech", + "/images/generations", +} + +// fullURL returns full URL for request. +func (c *Client) fullURL(suffix string, setters ...fullURLOption) string { + baseURL := strings.TrimRight(c.config.BaseURL, "/") + args := fullURLOptions{} + for _, setter := range setters { + setter(&args) + } + + if c.config.APIType == APITypeAzure || c.config.APIType == APITypeAzureAD { + baseURL = c.baseURLWithAzureDeployment(baseURL, suffix, args.model) + } + + if c.config.APIVersion != "" { + suffix = c.suffixWithAPIVersion(suffix) + } + return fmt.Sprintf("%s%s", baseURL, suffix) +} + +func (c *Client) suffixWithAPIVersion(suffix string) string { + parsedSuffix, err := url.Parse(suffix) + if err != nil { + panic("failed to parse url suffix") + } + query := parsedSuffix.Query() + query.Add("api-version", c.config.APIVersion) + return fmt.Sprintf("%s?%s", parsedSuffix.Path, query.Encode()) +} + +func (c *Client) baseURLWithAzureDeployment(baseURL, suffix, model string) (newBaseURL string) { + baseURL = fmt.Sprintf("%s/%s", strings.TrimRight(baseURL, "/"), azureAPIPrefix) + if containsSubstr(azureDeploymentsEndpoints, suffix) { + azureDeploymentName := c.config.GetAzureDeploymentByModel(model) + if azureDeploymentName == "" { + azureDeploymentName = "UNKNOWN" + } + baseURL = fmt.Sprintf("%s/%s/%s", baseURL, azureDeploymentsPrefix, azureDeploymentName) + } + return baseURL +} + +func (c *Client) handleErrorResp(resp *http.Response) error { + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error, reading response body: %w", err) + } + var errRes ErrorResponse + err = json.Unmarshal(body, &errRes) + if err != nil || errRes.Error == nil { + reqErr := &RequestError{ + HTTPStatus: resp.Status, + HTTPStatusCode: resp.StatusCode, + Err: err, + Body: body, + } + if errRes.Error != nil { + reqErr.Err = errRes.Error + } + return reqErr + } + + errRes.Error.HTTPStatus = resp.Status + errRes.Error.HTTPStatusCode = resp.StatusCode + return errRes.Error +} + +func containsSubstr(s []string, e string) bool { + for _, v := range s { + if strings.Contains(e, v) { + return true + } + } + return false +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/common.go b/constraint/vendor/github.com/sashabaranov/go-openai/common.go new file mode 100644 index 000000000..8cc7289c0 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/common.go @@ -0,0 +1,24 @@ +package openai + +// common.go defines common types used throughout the OpenAI API. + +// Usage Represents the total token usage per request to OpenAI. +type Usage struct { + PromptTokens int `json:"prompt_tokens"` + CompletionTokens int `json:"completion_tokens"` + TotalTokens int `json:"total_tokens"` + PromptTokensDetails *PromptTokensDetails `json:"prompt_tokens_details"` + CompletionTokensDetails *CompletionTokensDetails `json:"completion_tokens_details"` +} + +// CompletionTokensDetails Breakdown of tokens used in a completion. +type CompletionTokensDetails struct { + AudioTokens int `json:"audio_tokens"` + ReasoningTokens int `json:"reasoning_tokens"` +} + +// PromptTokensDetails Breakdown of tokens used in the prompt. +type PromptTokensDetails struct { + AudioTokens int `json:"audio_tokens"` + CachedTokens int `json:"cached_tokens"` +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/completion.go b/constraint/vendor/github.com/sashabaranov/go-openai/completion.go new file mode 100644 index 000000000..f11566081 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/completion.go @@ -0,0 +1,336 @@ +package openai + +import ( + "context" + "errors" + "net/http" +) + +var ( + ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll + ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll + ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll + ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll +) + +var ( + ErrO1BetaLimitationsMessageTypes = errors.New("this model has beta-limitations, user and assistant messages only, system messages are not supported") //nolint:lll + ErrO1BetaLimitationsTools = errors.New("this model has beta-limitations, tools, function calling, and response format parameters are not supported") //nolint:lll + ErrO1BetaLimitationsLogprobs = errors.New("this model has beta-limitations, logprobs not supported") //nolint:lll + ErrO1BetaLimitationsOther = errors.New("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") //nolint:lll +) + +// GPT3 Defines the models provided by OpenAI to use when generating +// completions from OpenAI. +// GPT3 Models are designed for text-based tasks. For code-specific +// tasks, please refer to the Codex series of models. +const ( + O1Mini = "o1-mini" + O1Mini20240912 = "o1-mini-2024-09-12" + O1Preview = "o1-preview" + O1Preview20240912 = "o1-preview-2024-09-12" + GPT432K0613 = "gpt-4-32k-0613" + GPT432K0314 = "gpt-4-32k-0314" + GPT432K = "gpt-4-32k" + GPT40613 = "gpt-4-0613" + GPT40314 = "gpt-4-0314" + GPT4o = "gpt-4o" + GPT4o20240513 = "gpt-4o-2024-05-13" + GPT4o20240806 = "gpt-4o-2024-08-06" + GPT4o20241120 = "gpt-4o-2024-11-20" + GPT4oLatest = "chatgpt-4o-latest" + GPT4oMini = "gpt-4o-mini" + GPT4oMini20240718 = "gpt-4o-mini-2024-07-18" + GPT4Turbo = "gpt-4-turbo" + GPT4Turbo20240409 = "gpt-4-turbo-2024-04-09" + GPT4Turbo0125 = "gpt-4-0125-preview" + GPT4Turbo1106 = "gpt-4-1106-preview" + GPT4TurboPreview = "gpt-4-turbo-preview" + GPT4VisionPreview = "gpt-4-vision-preview" + GPT4 = "gpt-4" + GPT3Dot5Turbo0125 = "gpt-3.5-turbo-0125" + GPT3Dot5Turbo1106 = "gpt-3.5-turbo-1106" + GPT3Dot5Turbo0613 = "gpt-3.5-turbo-0613" + GPT3Dot5Turbo0301 = "gpt-3.5-turbo-0301" + GPT3Dot5Turbo16K = "gpt-3.5-turbo-16k" + GPT3Dot5Turbo16K0613 = "gpt-3.5-turbo-16k-0613" + GPT3Dot5Turbo = "gpt-3.5-turbo" + GPT3Dot5TurboInstruct = "gpt-3.5-turbo-instruct" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextDavinci003 = "text-davinci-003" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextDavinci002 = "text-davinci-002" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextCurie001 = "text-curie-001" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextBabbage001 = "text-babbage-001" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextAda001 = "text-ada-001" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3TextDavinci001 = "text-davinci-001" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3DavinciInstructBeta = "davinci-instruct-beta" + // Deprecated: Model is shutdown. Use davinci-002 instead. + GPT3Davinci = "davinci" + GPT3Davinci002 = "davinci-002" + // Deprecated: Model is shutdown. Use gpt-3.5-turbo-instruct instead. + GPT3CurieInstructBeta = "curie-instruct-beta" + GPT3Curie = "curie" + GPT3Curie002 = "curie-002" + // Deprecated: Model is shutdown. Use babbage-002 instead. + GPT3Ada = "ada" + GPT3Ada002 = "ada-002" + // Deprecated: Model is shutdown. Use babbage-002 instead. + GPT3Babbage = "babbage" + GPT3Babbage002 = "babbage-002" +) + +// Codex Defines the models provided by OpenAI. +// These models are designed for code-specific tasks, and use +// a different tokenizer which optimizes for whitespace. +const ( + CodexCodeDavinci002 = "code-davinci-002" + CodexCodeCushman001 = "code-cushman-001" + CodexCodeDavinci001 = "code-davinci-001" +) + +// O1SeriesModels List of new Series of OpenAI models. +// Some old api attributes not supported. +var O1SeriesModels = map[string]struct{}{ + O1Mini: {}, + O1Mini20240912: {}, + O1Preview: {}, + O1Preview20240912: {}, +} + +var disabledModelsForEndpoints = map[string]map[string]bool{ + "/completions": { + O1Mini: true, + O1Mini20240912: true, + O1Preview: true, + O1Preview20240912: true, + GPT3Dot5Turbo: true, + GPT3Dot5Turbo0301: true, + GPT3Dot5Turbo0613: true, + GPT3Dot5Turbo1106: true, + GPT3Dot5Turbo0125: true, + GPT3Dot5Turbo16K: true, + GPT3Dot5Turbo16K0613: true, + GPT4: true, + GPT4o: true, + GPT4o20240513: true, + GPT4o20240806: true, + GPT4o20241120: true, + GPT4oLatest: true, + GPT4oMini: true, + GPT4oMini20240718: true, + GPT4TurboPreview: true, + GPT4VisionPreview: true, + GPT4Turbo1106: true, + GPT4Turbo0125: true, + GPT4Turbo: true, + GPT4Turbo20240409: true, + GPT40314: true, + GPT40613: true, + GPT432K: true, + GPT432K0314: true, + GPT432K0613: true, + }, + chatCompletionsSuffix: { + CodexCodeDavinci002: true, + CodexCodeCushman001: true, + CodexCodeDavinci001: true, + GPT3TextDavinci003: true, + GPT3TextDavinci002: true, + GPT3TextCurie001: true, + GPT3TextBabbage001: true, + GPT3TextAda001: true, + GPT3TextDavinci001: true, + GPT3DavinciInstructBeta: true, + GPT3Davinci: true, + GPT3CurieInstructBeta: true, + GPT3Curie: true, + GPT3Ada: true, + GPT3Babbage: true, + }, +} + +func checkEndpointSupportsModel(endpoint, model string) bool { + return !disabledModelsForEndpoints[endpoint][model] +} + +func checkPromptType(prompt any) bool { + _, isString := prompt.(string) + _, isStringSlice := prompt.([]string) + if isString || isStringSlice { + return true + } + + // check if it is prompt is []string hidden under []any + slice, isSlice := prompt.([]any) + if !isSlice { + return false + } + + for _, item := range slice { + _, itemIsString := item.(string) + if !itemIsString { + return false + } + } + return true // all items in the slice are string, so it is []string +} + +var unsupportedToolsForO1Models = map[ToolType]struct{}{ + ToolTypeFunction: {}, +} + +var availableMessageRoleForO1Models = map[string]struct{}{ + ChatMessageRoleUser: {}, + ChatMessageRoleAssistant: {}, +} + +// validateRequestForO1Models checks for deprecated fields of OpenAI models. +func validateRequestForO1Models(request ChatCompletionRequest) error { + if _, found := O1SeriesModels[request.Model]; !found { + return nil + } + + if request.MaxTokens > 0 { + return ErrO1MaxTokensDeprecated + } + + // Logprobs: not supported. + if request.LogProbs { + return ErrO1BetaLimitationsLogprobs + } + + // Message types: user and assistant messages only, system messages are not supported. + for _, m := range request.Messages { + if _, found := availableMessageRoleForO1Models[m.Role]; !found { + return ErrO1BetaLimitationsMessageTypes + } + } + + // Tools: tools, function calling, and response format parameters are not supported + for _, t := range request.Tools { + if _, found := unsupportedToolsForO1Models[t.Type]; found { + return ErrO1BetaLimitationsTools + } + } + + // Other: temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0. + if request.Temperature > 0 && request.Temperature != 1 { + return ErrO1BetaLimitationsOther + } + if request.TopP > 0 && request.TopP != 1 { + return ErrO1BetaLimitationsOther + } + if request.N > 0 && request.N != 1 { + return ErrO1BetaLimitationsOther + } + if request.PresencePenalty > 0 { + return ErrO1BetaLimitationsOther + } + if request.FrequencyPenalty > 0 { + return ErrO1BetaLimitationsOther + } + + return nil +} + +// CompletionRequest represents a request structure for completion API. +type CompletionRequest struct { + Model string `json:"model"` + Prompt any `json:"prompt,omitempty"` + BestOf int `json:"best_of,omitempty"` + Echo bool `json:"echo,omitempty"` + FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` + // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. + // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` + // refs: https://platform.openai.com/docs/api-reference/completions/create#completions/create-logit_bias + LogitBias map[string]int `json:"logit_bias,omitempty"` + // Store can be set to true to store the output of this completion request for use in distillations and evals. + // https://platform.openai.com/docs/api-reference/chat/create#chat-create-store + Store bool `json:"store,omitempty"` + // Metadata to store with the completion. + Metadata map[string]string `json:"metadata,omitempty"` + LogProbs int `json:"logprobs,omitempty"` + MaxTokens int `json:"max_tokens,omitempty"` + N int `json:"n,omitempty"` + PresencePenalty float32 `json:"presence_penalty,omitempty"` + Seed *int `json:"seed,omitempty"` + Stop []string `json:"stop,omitempty"` + Stream bool `json:"stream,omitempty"` + Suffix string `json:"suffix,omitempty"` + Temperature float32 `json:"temperature,omitempty"` + TopP float32 `json:"top_p,omitempty"` + User string `json:"user,omitempty"` +} + +// CompletionChoice represents one of possible completions. +type CompletionChoice struct { + Text string `json:"text"` + Index int `json:"index"` + FinishReason string `json:"finish_reason"` + LogProbs LogprobResult `json:"logprobs"` +} + +// LogprobResult represents logprob result of Choice. +type LogprobResult struct { + Tokens []string `json:"tokens"` + TokenLogprobs []float32 `json:"token_logprobs"` + TopLogprobs []map[string]float32 `json:"top_logprobs"` + TextOffset []int `json:"text_offset"` +} + +// CompletionResponse represents a response structure for completion API. +type CompletionResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []CompletionChoice `json:"choices"` + Usage Usage `json:"usage"` + + httpHeader +} + +// CreateCompletion — API call to create a completion. This is the main endpoint of the API. Returns new text as well +// as, if requested, the probabilities over each alternative token at each position. +// +// If using a fine-tuned model, simply provide the model's ID in the CompletionRequest object, +// and the server will use the model's parameters to generate the completion. +func (c *Client) CreateCompletion( + ctx context.Context, + request CompletionRequest, +) (response CompletionResponse, err error) { + if request.Stream { + err = ErrCompletionStreamNotSupported + return + } + + urlSuffix := "/completions" + if !checkEndpointSupportsModel(urlSuffix, request.Model) { + err = ErrCompletionUnsupportedModel + return + } + + if !checkPromptType(request.Prompt) { + err = ErrCompletionRequestPromptTypeNotSupported + return + } + + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(request), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/config.go b/constraint/vendor/github.com/sashabaranov/go-openai/config.go new file mode 100644 index 000000000..8a9183558 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/config.go @@ -0,0 +1,89 @@ +package openai + +import ( + "net/http" + "regexp" +) + +const ( + openaiAPIURLv1 = "https://api.openai.com/v1" + defaultEmptyMessagesLimit uint = 300 + + azureAPIPrefix = "openai" + azureDeploymentsPrefix = "deployments" +) + +type APIType string + +const ( + APITypeOpenAI APIType = "OPEN_AI" + APITypeAzure APIType = "AZURE" + APITypeAzureAD APIType = "AZURE_AD" + APITypeCloudflareAzure APIType = "CLOUDFLARE_AZURE" +) + +const AzureAPIKeyHeader = "api-key" + +const defaultAssistantVersion = "v2" // upgrade to v2 to support vector store + +type HTTPDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// ClientConfig is a configuration of a client. +type ClientConfig struct { + authToken string + + BaseURL string + OrgID string + APIType APIType + APIVersion string // required when APIType is APITypeAzure or APITypeAzureAD + AssistantVersion string + AzureModelMapperFunc func(model string) string // replace model to azure deployment name func + HTTPClient HTTPDoer + + EmptyMessagesLimit uint +} + +func DefaultConfig(authToken string) ClientConfig { + return ClientConfig{ + authToken: authToken, + BaseURL: openaiAPIURLv1, + APIType: APITypeOpenAI, + AssistantVersion: defaultAssistantVersion, + OrgID: "", + + HTTPClient: &http.Client{}, + + EmptyMessagesLimit: defaultEmptyMessagesLimit, + } +} + +func DefaultAzureConfig(apiKey, baseURL string) ClientConfig { + return ClientConfig{ + authToken: apiKey, + BaseURL: baseURL, + OrgID: "", + APIType: APITypeAzure, + APIVersion: "2023-05-15", + AzureModelMapperFunc: func(model string) string { + return regexp.MustCompile(`[.:]`).ReplaceAllString(model, "") + }, + + HTTPClient: &http.Client{}, + + EmptyMessagesLimit: defaultEmptyMessagesLimit, + } +} + +func (ClientConfig) String() string { + return "" +} + +func (c ClientConfig) GetAzureDeploymentByModel(model string) string { + if c.AzureModelMapperFunc != nil { + return c.AzureModelMapperFunc(model) + } + + return model +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/edits.go b/constraint/vendor/github.com/sashabaranov/go-openai/edits.go new file mode 100644 index 000000000..fe8ecd0c1 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/edits.go @@ -0,0 +1,53 @@ +package openai + +import ( + "context" + "fmt" + "net/http" +) + +// EditsRequest represents a request structure for Edits API. +type EditsRequest struct { + Model *string `json:"model,omitempty"` + Input string `json:"input,omitempty"` + Instruction string `json:"instruction,omitempty"` + N int `json:"n,omitempty"` + Temperature float32 `json:"temperature,omitempty"` + TopP float32 `json:"top_p,omitempty"` +} + +// EditsChoice represents one of possible edits. +type EditsChoice struct { + Text string `json:"text"` + Index int `json:"index"` +} + +// EditsResponse represents a response structure for Edits API. +type EditsResponse struct { + Object string `json:"object"` + Created int64 `json:"created"` + Usage Usage `json:"usage"` + Choices []EditsChoice `json:"choices"` + + httpHeader +} + +// Edits Perform an API call to the Edits endpoint. +/* Deprecated: Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001) +will need to migrate to GPT-3.5 Turbo by January 4, 2024. +You can use CreateChatCompletion or CreateChatCompletionStream instead. +*/ +func (c *Client) Edits(ctx context.Context, request EditsRequest) (response EditsResponse, err error) { + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/edits", withModel(fmt.Sprint(request.Model))), + withBody(request), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/embeddings.go b/constraint/vendor/github.com/sashabaranov/go-openai/embeddings.go new file mode 100644 index 000000000..4a0e682da --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/embeddings.go @@ -0,0 +1,267 @@ +package openai + +import ( + "context" + "encoding/base64" + "encoding/binary" + "errors" + "math" + "net/http" +) + +var ErrVectorLengthMismatch = errors.New("vector length mismatch") + +// EmbeddingModel enumerates the models which can be used +// to generate Embedding vectors. +type EmbeddingModel string + +const ( + // Deprecated: The following block is shut down. Use text-embedding-ada-002 instead. + AdaSimilarity EmbeddingModel = "text-similarity-ada-001" + BabbageSimilarity EmbeddingModel = "text-similarity-babbage-001" + CurieSimilarity EmbeddingModel = "text-similarity-curie-001" + DavinciSimilarity EmbeddingModel = "text-similarity-davinci-001" + AdaSearchDocument EmbeddingModel = "text-search-ada-doc-001" + AdaSearchQuery EmbeddingModel = "text-search-ada-query-001" + BabbageSearchDocument EmbeddingModel = "text-search-babbage-doc-001" + BabbageSearchQuery EmbeddingModel = "text-search-babbage-query-001" + CurieSearchDocument EmbeddingModel = "text-search-curie-doc-001" + CurieSearchQuery EmbeddingModel = "text-search-curie-query-001" + DavinciSearchDocument EmbeddingModel = "text-search-davinci-doc-001" + DavinciSearchQuery EmbeddingModel = "text-search-davinci-query-001" + AdaCodeSearchCode EmbeddingModel = "code-search-ada-code-001" + AdaCodeSearchText EmbeddingModel = "code-search-ada-text-001" + BabbageCodeSearchCode EmbeddingModel = "code-search-babbage-code-001" + BabbageCodeSearchText EmbeddingModel = "code-search-babbage-text-001" + + AdaEmbeddingV2 EmbeddingModel = "text-embedding-ada-002" + SmallEmbedding3 EmbeddingModel = "text-embedding-3-small" + LargeEmbedding3 EmbeddingModel = "text-embedding-3-large" +) + +// Embedding is a special format of data representation that can be easily utilized by machine +// learning models and algorithms. The embedding is an information dense representation of the +// semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, +// such that the distance between two embeddings in the vector space is correlated with semantic similarity +// between two inputs in the original format. For example, if two texts are similar, +// then their vector representations should also be similar. +type Embedding struct { + Object string `json:"object"` + Embedding []float32 `json:"embedding"` + Index int `json:"index"` +} + +// DotProduct calculates the dot product of the embedding vector with another +// embedding vector. Both vectors must have the same length; otherwise, an +// ErrVectorLengthMismatch is returned. The method returns the calculated dot +// product as a float32 value. +func (e *Embedding) DotProduct(other *Embedding) (float32, error) { + if len(e.Embedding) != len(other.Embedding) { + return 0, ErrVectorLengthMismatch + } + + var dotProduct float32 + for i := range e.Embedding { + dotProduct += e.Embedding[i] * other.Embedding[i] + } + + return dotProduct, nil +} + +// EmbeddingResponse is the response from a Create embeddings request. +type EmbeddingResponse struct { + Object string `json:"object"` + Data []Embedding `json:"data"` + Model EmbeddingModel `json:"model"` + Usage Usage `json:"usage"` + + httpHeader +} + +type base64String string + +func (b base64String) Decode() ([]float32, error) { + decodedData, err := base64.StdEncoding.DecodeString(string(b)) + if err != nil { + return nil, err + } + + const sizeOfFloat32 = 4 + floats := make([]float32, len(decodedData)/sizeOfFloat32) + for i := 0; i < len(floats); i++ { + floats[i] = math.Float32frombits(binary.LittleEndian.Uint32(decodedData[i*4 : (i+1)*4])) + } + + return floats, nil +} + +// Base64Embedding is a container for base64 encoded embeddings. +type Base64Embedding struct { + Object string `json:"object"` + Embedding base64String `json:"embedding"` + Index int `json:"index"` +} + +// EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format. +type EmbeddingResponseBase64 struct { + Object string `json:"object"` + Data []Base64Embedding `json:"data"` + Model EmbeddingModel `json:"model"` + Usage Usage `json:"usage"` + + httpHeader +} + +// ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse. +func (r *EmbeddingResponseBase64) ToEmbeddingResponse() (EmbeddingResponse, error) { + data := make([]Embedding, len(r.Data)) + + for i, base64Embedding := range r.Data { + embedding, err := base64Embedding.Embedding.Decode() + if err != nil { + return EmbeddingResponse{}, err + } + + data[i] = Embedding{ + Object: base64Embedding.Object, + Embedding: embedding, + Index: base64Embedding.Index, + } + } + + return EmbeddingResponse{ + Object: r.Object, + Model: r.Model, + Data: data, + Usage: r.Usage, + }, nil +} + +type EmbeddingRequestConverter interface { + // Needs to be of type EmbeddingRequestStrings or EmbeddingRequestTokens + Convert() EmbeddingRequest +} + +// EmbeddingEncodingFormat is the format of the embeddings data. +// Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. +// If not specified OpenAI will use "float". +type EmbeddingEncodingFormat string + +const ( + EmbeddingEncodingFormatFloat EmbeddingEncodingFormat = "float" + EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64" +) + +type EmbeddingRequest struct { + Input any `json:"input"` + Model EmbeddingModel `json:"model"` + User string `json:"user,omitempty"` + EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // Only supported in text-embedding-3 and later models. + Dimensions int `json:"dimensions,omitempty"` +} + +func (r EmbeddingRequest) Convert() EmbeddingRequest { + return r +} + +// EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings. +type EmbeddingRequestStrings struct { + // Input is a slice of strings for which you want to generate an Embedding vector. + // Each input must not exceed 8192 tokens in length. + // OpenAPI suggests replacing newlines (\n) in your input with a single space, as they + // have observed inferior results when newlines are present. + // E.g. + // "The food was delicious and the waiter..." + Input []string `json:"input"` + // ID of the model to use. You can use the List models API to see all of your available models, + // or see our Model overview for descriptions of them. + Model EmbeddingModel `json:"model"` + // A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. + User string `json:"user"` + // EmbeddingEncodingFormat is the format of the embeddings data. + // Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. + // If not specified OpenAI will use "float". + EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // Only supported in text-embedding-3 and later models. + Dimensions int `json:"dimensions,omitempty"` +} + +func (r EmbeddingRequestStrings) Convert() EmbeddingRequest { + return EmbeddingRequest{ + Input: r.Input, + Model: r.Model, + User: r.User, + EncodingFormat: r.EncodingFormat, + Dimensions: r.Dimensions, + } +} + +type EmbeddingRequestTokens struct { + // Input is a slice of slices of ints ([][]int) for which you want to generate an Embedding vector. + // Each input must not exceed 8192 tokens in length. + // OpenAPI suggests replacing newlines (\n) in your input with a single space, as they + // have observed inferior results when newlines are present. + // E.g. + // "The food was delicious and the waiter..." + Input [][]int `json:"input"` + // ID of the model to use. You can use the List models API to see all of your available models, + // or see our Model overview for descriptions of them. + Model EmbeddingModel `json:"model"` + // A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. + User string `json:"user"` + // EmbeddingEncodingFormat is the format of the embeddings data. + // Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. + // If not specified OpenAI will use "float". + EncodingFormat EmbeddingEncodingFormat `json:"encoding_format,omitempty"` + // Dimensions The number of dimensions the resulting output embeddings should have. + // Only supported in text-embedding-3 and later models. + Dimensions int `json:"dimensions,omitempty"` +} + +func (r EmbeddingRequestTokens) Convert() EmbeddingRequest { + return EmbeddingRequest{ + Input: r.Input, + Model: r.Model, + User: r.User, + EncodingFormat: r.EncodingFormat, + Dimensions: r.Dimensions, + } +} + +// CreateEmbeddings returns an EmbeddingResponse which will contain an Embedding for every item in |body.Input|. +// https://beta.openai.com/docs/api-reference/embeddings/create +// +// Body should be of type EmbeddingRequestStrings for embedding strings or EmbeddingRequestTokens +// for embedding groups of text already converted to tokens. +func (c *Client) CreateEmbeddings( + ctx context.Context, + conv EmbeddingRequestConverter, +) (res EmbeddingResponse, err error) { + baseReq := conv.Convert() + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/embeddings", withModel(string(baseReq.Model))), + withBody(baseReq), + ) + if err != nil { + return + } + + if baseReq.EncodingFormat != EmbeddingEncodingFormatBase64 { + err = c.sendRequest(req, &res) + return + } + + base64Response := &EmbeddingResponseBase64{} + err = c.sendRequest(req, base64Response) + if err != nil { + return + } + + res, err = base64Response.ToEmbeddingResponse() + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/engines.go b/constraint/vendor/github.com/sashabaranov/go-openai/engines.go new file mode 100644 index 000000000..5a0dba858 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/engines.go @@ -0,0 +1,52 @@ +package openai + +import ( + "context" + "fmt" + "net/http" +) + +// Engine struct represents engine from OpenAPI API. +type Engine struct { + ID string `json:"id"` + Object string `json:"object"` + Owner string `json:"owner"` + Ready bool `json:"ready"` + + httpHeader +} + +// EnginesList is a list of engines. +type EnginesList struct { + Engines []Engine `json:"data"` + + httpHeader +} + +// ListEngines Lists the currently available engines, and provides basic +// information about each option such as the owner and availability. +func (c *Client) ListEngines(ctx context.Context) (engines EnginesList, err error) { + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/engines")) + if err != nil { + return + } + + err = c.sendRequest(req, &engines) + return +} + +// GetEngine Retrieves an engine instance, providing basic information about +// the engine such as the owner and availability. +func (c *Client) GetEngine( + ctx context.Context, + engineID string, +) (engine Engine, err error) { + urlSuffix := fmt.Sprintf("/engines/%s", engineID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &engine) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/error.go b/constraint/vendor/github.com/sashabaranov/go-openai/error.go new file mode 100644 index 000000000..8a74bd52c --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/error.go @@ -0,0 +1,115 @@ +package openai + +import ( + "encoding/json" + "fmt" + "strings" +) + +// APIError provides error information returned by the OpenAI API. +// InnerError struct is only valid for Azure OpenAI Service. +type APIError struct { + Code any `json:"code,omitempty"` + Message string `json:"message"` + Param *string `json:"param,omitempty"` + Type string `json:"type"` + HTTPStatus string `json:"-"` + HTTPStatusCode int `json:"-"` + InnerError *InnerError `json:"innererror,omitempty"` +} + +// InnerError Azure Content filtering. Only valid for Azure OpenAI Service. +type InnerError struct { + Code string `json:"code,omitempty"` + ContentFilterResults ContentFilterResults `json:"content_filter_result,omitempty"` +} + +// RequestError provides information about generic request errors. +type RequestError struct { + HTTPStatus string + HTTPStatusCode int + Err error + Body []byte +} + +type ErrorResponse struct { + Error *APIError `json:"error,omitempty"` +} + +func (e *APIError) Error() string { + if e.HTTPStatusCode > 0 { + return fmt.Sprintf("error, status code: %d, status: %s, message: %s", e.HTTPStatusCode, e.HTTPStatus, e.Message) + } + + return e.Message +} + +func (e *APIError) UnmarshalJSON(data []byte) (err error) { + var rawMap map[string]json.RawMessage + err = json.Unmarshal(data, &rawMap) + if err != nil { + return + } + + err = json.Unmarshal(rawMap["message"], &e.Message) + if err != nil { + // If the parameter field of a function call is invalid as a JSON schema + // refs: https://github.com/sashabaranov/go-openai/issues/381 + var messages []string + err = json.Unmarshal(rawMap["message"], &messages) + if err != nil { + return + } + e.Message = strings.Join(messages, ", ") + } + + // optional fields for azure openai + // refs: https://github.com/sashabaranov/go-openai/issues/343 + if _, ok := rawMap["type"]; ok { + err = json.Unmarshal(rawMap["type"], &e.Type) + if err != nil { + return + } + } + + if _, ok := rawMap["innererror"]; ok { + err = json.Unmarshal(rawMap["innererror"], &e.InnerError) + if err != nil { + return + } + } + + // optional fields + if _, ok := rawMap["param"]; ok { + err = json.Unmarshal(rawMap["param"], &e.Param) + if err != nil { + return + } + } + + if _, ok := rawMap["code"]; !ok { + return nil + } + + // if the api returned a number, we need to force an integer + // since the json package defaults to float64 + var intCode int + err = json.Unmarshal(rawMap["code"], &intCode) + if err == nil { + e.Code = intCode + return nil + } + + return json.Unmarshal(rawMap["code"], &e.Code) +} + +func (e *RequestError) Error() string { + return fmt.Sprintf( + "error, status code: %d, status: %s, message: %s, body: %s", + e.HTTPStatusCode, e.HTTPStatus, e.Err, e.Body, + ) +} + +func (e *RequestError) Unwrap() error { + return e.Err +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/files.go b/constraint/vendor/github.com/sashabaranov/go-openai/files.go new file mode 100644 index 000000000..edc9f2a20 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/files.go @@ -0,0 +1,171 @@ +package openai + +import ( + "bytes" + "context" + "fmt" + "net/http" + "os" +) + +type FileRequest struct { + FileName string `json:"file"` + FilePath string `json:"-"` + Purpose string `json:"purpose"` +} + +// PurposeType represents the purpose of the file when uploading. +type PurposeType string + +const ( + PurposeFineTune PurposeType = "fine-tune" + PurposeFineTuneResults PurposeType = "fine-tune-results" + PurposeAssistants PurposeType = "assistants" + PurposeAssistantsOutput PurposeType = "assistants_output" + PurposeBatch PurposeType = "batch" +) + +// FileBytesRequest represents a file upload request. +type FileBytesRequest struct { + // the name of the uploaded file in OpenAI + Name string + // the bytes of the file + Bytes []byte + // the purpose of the file + Purpose PurposeType +} + +// File struct represents an OpenAPI file. +type File struct { + Bytes int `json:"bytes"` + CreatedAt int64 `json:"created_at"` + ID string `json:"id"` + FileName string `json:"filename"` + Object string `json:"object"` + Status string `json:"status"` + Purpose string `json:"purpose"` + StatusDetails string `json:"status_details"` + + httpHeader +} + +// FilesList is a list of files that belong to the user or organization. +type FilesList struct { + Files []File `json:"data"` + + httpHeader +} + +// CreateFileBytes uploads bytes directly to OpenAI without requiring a local file. +func (c *Client) CreateFileBytes(ctx context.Context, request FileBytesRequest) (file File, err error) { + var b bytes.Buffer + reader := bytes.NewReader(request.Bytes) + builder := c.createFormBuilder(&b) + + err = builder.WriteField("purpose", string(request.Purpose)) + if err != nil { + return + } + + err = builder.CreateFormFileReader("file", reader, request.Name) + if err != nil { + return + } + + err = builder.Close() + if err != nil { + return + } + + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/files"), + withBody(&b), withContentType(builder.FormDataContentType())) + if err != nil { + return + } + + err = c.sendRequest(req, &file) + return +} + +// CreateFile uploads a jsonl file to GPT3 +// FilePath must be a local file path. +func (c *Client) CreateFile(ctx context.Context, request FileRequest) (file File, err error) { + var b bytes.Buffer + builder := c.createFormBuilder(&b) + + err = builder.WriteField("purpose", request.Purpose) + if err != nil { + return + } + + fileData, err := os.Open(request.FilePath) + if err != nil { + return + } + defer fileData.Close() + + err = builder.CreateFormFile("file", fileData) + if err != nil { + return + } + + err = builder.Close() + if err != nil { + return + } + + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/files"), + withBody(&b), withContentType(builder.FormDataContentType())) + if err != nil { + return + } + + err = c.sendRequest(req, &file) + return +} + +// DeleteFile deletes an existing file. +func (c *Client) DeleteFile(ctx context.Context, fileID string) (err error) { + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/files/"+fileID)) + if err != nil { + return + } + + err = c.sendRequest(req, nil) + return +} + +// ListFiles Lists the currently available files, +// and provides basic information about each file such as the file name and purpose. +func (c *Client) ListFiles(ctx context.Context) (files FilesList, err error) { + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/files")) + if err != nil { + return + } + + err = c.sendRequest(req, &files) + return +} + +// GetFile Retrieves a file instance, providing basic information about the file +// such as the file name and purpose. +func (c *Client) GetFile(ctx context.Context, fileID string) (file File, err error) { + urlSuffix := fmt.Sprintf("/files/%s", fileID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &file) + return +} + +func (c *Client) GetFileContent(ctx context.Context, fileID string) (content RawResponse, err error) { + urlSuffix := fmt.Sprintf("/files/%s/content", fileID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + return c.sendRequestRaw(req) +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/fine_tunes.go b/constraint/vendor/github.com/sashabaranov/go-openai/fine_tunes.go new file mode 100644 index 000000000..74b47bf3f --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/fine_tunes.go @@ -0,0 +1,178 @@ +package openai + +import ( + "context" + "fmt" + "net/http" +) + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneRequest struct { + TrainingFile string `json:"training_file"` + ValidationFile string `json:"validation_file,omitempty"` + Model string `json:"model,omitempty"` + Epochs int `json:"n_epochs,omitempty"` + BatchSize int `json:"batch_size,omitempty"` + LearningRateMultiplier float32 `json:"learning_rate_multiplier,omitempty"` + PromptLossRate float32 `json:"prompt_loss_rate,omitempty"` + ComputeClassificationMetrics bool `json:"compute_classification_metrics,omitempty"` + ClassificationClasses int `json:"classification_n_classes,omitempty"` + ClassificationPositiveClass string `json:"classification_positive_class,omitempty"` + ClassificationBetas []float32 `json:"classification_betas,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTune struct { + ID string `json:"id"` + Object string `json:"object"` + Model string `json:"model"` + CreatedAt int64 `json:"created_at"` + FineTuneEventList []FineTuneEvent `json:"events,omitempty"` + FineTunedModel string `json:"fine_tuned_model"` + HyperParams FineTuneHyperParams `json:"hyperparams"` + OrganizationID string `json:"organization_id"` + ResultFiles []File `json:"result_files"` + Status string `json:"status"` + ValidationFiles []File `json:"validation_files"` + TrainingFiles []File `json:"training_files"` + UpdatedAt int64 `json:"updated_at"` + + httpHeader +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneEvent struct { + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + Level string `json:"level"` + Message string `json:"message"` +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneHyperParams struct { + BatchSize int `json:"batch_size"` + LearningRateMultiplier float64 `json:"learning_rate_multiplier"` + Epochs int `json:"n_epochs"` + PromptLossWeight float64 `json:"prompt_loss_weight"` +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneList struct { + Object string `json:"object"` + Data []FineTune `json:"data"` + + httpHeader +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneEventList struct { + Object string `json:"object"` + Data []FineTuneEvent `json:"data"` + + httpHeader +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +type FineTuneDeleteResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) CreateFineTune(ctx context.Context, request FineTuneRequest) (response FineTune, err error) { + urlSuffix := "/fine-tunes" + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// CancelFineTune cancel a fine-tune job. +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) CancelFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) { + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/fine-tunes/"+fineTuneID+"/cancel")) //nolint:lll //this method is deprecated + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) ListFineTunes(ctx context.Context) (response FineTuneList, err error) { + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/fine-tunes")) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) GetFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) { + urlSuffix := fmt.Sprintf("/fine-tunes/%s", fineTuneID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) DeleteFineTune(ctx context.Context, fineTuneID string) (response FineTuneDeleteResponse, err error) { + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/fine-tunes/"+fineTuneID)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. +// This API will be officially deprecated on January 4th, 2024. +// OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go. +func (c *Client) ListFineTuneEvents(ctx context.Context, fineTuneID string) (response FineTuneEventList, err error) { + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/fine-tunes/"+fineTuneID+"/events")) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/fine_tuning_job.go b/constraint/vendor/github.com/sashabaranov/go-openai/fine_tuning_job.go new file mode 100644 index 000000000..5a9f54a92 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/fine_tuning_job.go @@ -0,0 +1,159 @@ +package openai + +import ( + "context" + "fmt" + "net/http" + "net/url" +) + +type FineTuningJob struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + FinishedAt int64 `json:"finished_at"` + Model string `json:"model"` + FineTunedModel string `json:"fine_tuned_model,omitempty"` + OrganizationID string `json:"organization_id"` + Status string `json:"status"` + Hyperparameters Hyperparameters `json:"hyperparameters"` + TrainingFile string `json:"training_file"` + ValidationFile string `json:"validation_file,omitempty"` + ResultFiles []string `json:"result_files"` + TrainedTokens int `json:"trained_tokens"` + + httpHeader +} + +type Hyperparameters struct { + Epochs any `json:"n_epochs,omitempty"` + LearningRateMultiplier any `json:"learning_rate_multiplier,omitempty"` + BatchSize any `json:"batch_size,omitempty"` +} + +type FineTuningJobRequest struct { + TrainingFile string `json:"training_file"` + ValidationFile string `json:"validation_file,omitempty"` + Model string `json:"model,omitempty"` + Hyperparameters *Hyperparameters `json:"hyperparameters,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +type FineTuningJobEventList struct { + Object string `json:"object"` + Data []FineTuneEvent `json:"data"` + HasMore bool `json:"has_more"` + + httpHeader +} + +type FineTuningJobEvent struct { + Object string `json:"object"` + ID string `json:"id"` + CreatedAt int `json:"created_at"` + Level string `json:"level"` + Message string `json:"message"` + Data any `json:"data"` + Type string `json:"type"` +} + +// CreateFineTuningJob create a fine tuning job. +func (c *Client) CreateFineTuningJob( + ctx context.Context, + request FineTuningJobRequest, +) (response FineTuningJob, err error) { + urlSuffix := "/fine_tuning/jobs" + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// CancelFineTuningJob cancel a fine tuning job. +func (c *Client) CancelFineTuningJob(ctx context.Context, fineTuningJobID string) (response FineTuningJob, err error) { + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL("/fine_tuning/jobs/"+fineTuningJobID+"/cancel")) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveFineTuningJob retrieve a fine tuning job. +func (c *Client) RetrieveFineTuningJob( + ctx context.Context, + fineTuningJobID string, +) (response FineTuningJob, err error) { + urlSuffix := fmt.Sprintf("/fine_tuning/jobs/%s", fineTuningJobID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +type listFineTuningJobEventsParameters struct { + after *string + limit *int +} + +type ListFineTuningJobEventsParameter func(*listFineTuningJobEventsParameters) + +func ListFineTuningJobEventsWithAfter(after string) ListFineTuningJobEventsParameter { + return func(args *listFineTuningJobEventsParameters) { + args.after = &after + } +} + +func ListFineTuningJobEventsWithLimit(limit int) ListFineTuningJobEventsParameter { + return func(args *listFineTuningJobEventsParameters) { + args.limit = &limit + } +} + +// ListFineTuningJobs list fine tuning jobs events. +func (c *Client) ListFineTuningJobEvents( + ctx context.Context, + fineTuningJobID string, + setters ...ListFineTuningJobEventsParameter, +) (response FineTuningJobEventList, err error) { + parameters := &listFineTuningJobEventsParameters{ + after: nil, + limit: nil, + } + + for _, setter := range setters { + setter(parameters) + } + + urlValues := url.Values{} + if parameters.after != nil { + urlValues.Add("after", *parameters.after) + } + if parameters.limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *parameters.limit)) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + req, err := c.newRequest( + ctx, + http.MethodGet, + c.fullURL("/fine_tuning/jobs/"+fineTuningJobID+"/events"+encodedValues), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/image.go b/constraint/vendor/github.com/sashabaranov/go-openai/image.go new file mode 100644 index 000000000..577d7db95 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/image.go @@ -0,0 +1,209 @@ +package openai + +import ( + "bytes" + "context" + "net/http" + "os" + "strconv" +) + +// Image sizes defined by the OpenAI API. +const ( + CreateImageSize256x256 = "256x256" + CreateImageSize512x512 = "512x512" + CreateImageSize1024x1024 = "1024x1024" + // dall-e-3 supported only. + CreateImageSize1792x1024 = "1792x1024" + CreateImageSize1024x1792 = "1024x1792" +) + +const ( + CreateImageResponseFormatURL = "url" + CreateImageResponseFormatB64JSON = "b64_json" +) + +const ( + CreateImageModelDallE2 = "dall-e-2" + CreateImageModelDallE3 = "dall-e-3" +) + +const ( + CreateImageQualityHD = "hd" + CreateImageQualityStandard = "standard" +) + +const ( + CreateImageStyleVivid = "vivid" + CreateImageStyleNatural = "natural" +) + +// ImageRequest represents the request structure for the image API. +type ImageRequest struct { + Prompt string `json:"prompt,omitempty"` + Model string `json:"model,omitempty"` + N int `json:"n,omitempty"` + Quality string `json:"quality,omitempty"` + Size string `json:"size,omitempty"` + Style string `json:"style,omitempty"` + ResponseFormat string `json:"response_format,omitempty"` + User string `json:"user,omitempty"` +} + +// ImageResponse represents a response structure for image API. +type ImageResponse struct { + Created int64 `json:"created,omitempty"` + Data []ImageResponseDataInner `json:"data,omitempty"` + + httpHeader +} + +// ImageResponseDataInner represents a response data structure for image API. +type ImageResponseDataInner struct { + URL string `json:"url,omitempty"` + B64JSON string `json:"b64_json,omitempty"` + RevisedPrompt string `json:"revised_prompt,omitempty"` +} + +// CreateImage - API call to create an image. This is the main endpoint of the DALL-E API. +func (c *Client) CreateImage(ctx context.Context, request ImageRequest) (response ImageResponse, err error) { + urlSuffix := "/images/generations" + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(request), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ImageEditRequest represents the request structure for the image API. +type ImageEditRequest struct { + Image *os.File `json:"image,omitempty"` + Mask *os.File `json:"mask,omitempty"` + Prompt string `json:"prompt,omitempty"` + Model string `json:"model,omitempty"` + N int `json:"n,omitempty"` + Size string `json:"size,omitempty"` + ResponseFormat string `json:"response_format,omitempty"` +} + +// CreateEditImage - API call to create an image. This is the main endpoint of the DALL-E API. +func (c *Client) CreateEditImage(ctx context.Context, request ImageEditRequest) (response ImageResponse, err error) { + body := &bytes.Buffer{} + builder := c.createFormBuilder(body) + + // image + err = builder.CreateFormFile("image", request.Image) + if err != nil { + return + } + + // mask, it is optional + if request.Mask != nil { + err = builder.CreateFormFile("mask", request.Mask) + if err != nil { + return + } + } + + err = builder.WriteField("prompt", request.Prompt) + if err != nil { + return + } + + err = builder.WriteField("n", strconv.Itoa(request.N)) + if err != nil { + return + } + + err = builder.WriteField("size", request.Size) + if err != nil { + return + } + + err = builder.WriteField("response_format", request.ResponseFormat) + if err != nil { + return + } + + err = builder.Close() + if err != nil { + return + } + + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/images/edits", withModel(request.Model)), + withBody(body), + withContentType(builder.FormDataContentType()), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ImageVariRequest represents the request structure for the image API. +type ImageVariRequest struct { + Image *os.File `json:"image,omitempty"` + Model string `json:"model,omitempty"` + N int `json:"n,omitempty"` + Size string `json:"size,omitempty"` + ResponseFormat string `json:"response_format,omitempty"` +} + +// CreateVariImage - API call to create an image variation. This is the main endpoint of the DALL-E API. +// Use abbreviations(vari for variation) because ci-lint has a single-line length limit ... +func (c *Client) CreateVariImage(ctx context.Context, request ImageVariRequest) (response ImageResponse, err error) { + body := &bytes.Buffer{} + builder := c.createFormBuilder(body) + + // image + err = builder.CreateFormFile("image", request.Image) + if err != nil { + return + } + + err = builder.WriteField("n", strconv.Itoa(request.N)) + if err != nil { + return + } + + err = builder.WriteField("size", request.Size) + if err != nil { + return + } + + err = builder.WriteField("response_format", request.ResponseFormat) + if err != nil { + return + } + + err = builder.Close() + if err != nil { + return + } + + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/images/variations", withModel(request.Model)), + withBody(body), + withContentType(builder.FormDataContentType()), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/internal/error_accumulator.go b/constraint/vendor/github.com/sashabaranov/go-openai/internal/error_accumulator.go new file mode 100644 index 000000000..3d3e805fe --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/internal/error_accumulator.go @@ -0,0 +1,44 @@ +package openai + +import ( + "bytes" + "fmt" + "io" +) + +type ErrorAccumulator interface { + Write(p []byte) error + Bytes() []byte +} + +type errorBuffer interface { + io.Writer + Len() int + Bytes() []byte +} + +type DefaultErrorAccumulator struct { + Buffer errorBuffer +} + +func NewErrorAccumulator() ErrorAccumulator { + return &DefaultErrorAccumulator{ + Buffer: &bytes.Buffer{}, + } +} + +func (e *DefaultErrorAccumulator) Write(p []byte) error { + _, err := e.Buffer.Write(p) + if err != nil { + return fmt.Errorf("error accumulator write error, %w", err) + } + return nil +} + +func (e *DefaultErrorAccumulator) Bytes() (errBytes []byte) { + if e.Buffer.Len() == 0 { + return + } + errBytes = e.Buffer.Bytes() + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/internal/form_builder.go b/constraint/vendor/github.com/sashabaranov/go-openai/internal/form_builder.go new file mode 100644 index 000000000..2224fad45 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/internal/form_builder.go @@ -0,0 +1,65 @@ +package openai + +import ( + "fmt" + "io" + "mime/multipart" + "os" + "path" +) + +type FormBuilder interface { + CreateFormFile(fieldname string, file *os.File) error + CreateFormFileReader(fieldname string, r io.Reader, filename string) error + WriteField(fieldname, value string) error + Close() error + FormDataContentType() string +} + +type DefaultFormBuilder struct { + writer *multipart.Writer +} + +func NewFormBuilder(body io.Writer) *DefaultFormBuilder { + return &DefaultFormBuilder{ + writer: multipart.NewWriter(body), + } +} + +func (fb *DefaultFormBuilder) CreateFormFile(fieldname string, file *os.File) error { + return fb.createFormFile(fieldname, file, file.Name()) +} + +func (fb *DefaultFormBuilder) CreateFormFileReader(fieldname string, r io.Reader, filename string) error { + return fb.createFormFile(fieldname, r, path.Base(filename)) +} + +func (fb *DefaultFormBuilder) createFormFile(fieldname string, r io.Reader, filename string) error { + if filename == "" { + return fmt.Errorf("filename cannot be empty") + } + + fieldWriter, err := fb.writer.CreateFormFile(fieldname, filename) + if err != nil { + return err + } + + _, err = io.Copy(fieldWriter, r) + if err != nil { + return err + } + + return nil +} + +func (fb *DefaultFormBuilder) WriteField(fieldname, value string) error { + return fb.writer.WriteField(fieldname, value) +} + +func (fb *DefaultFormBuilder) Close() error { + return fb.writer.Close() +} + +func (fb *DefaultFormBuilder) FormDataContentType() string { + return fb.writer.FormDataContentType() +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/internal/marshaller.go b/constraint/vendor/github.com/sashabaranov/go-openai/internal/marshaller.go new file mode 100644 index 000000000..223a4dc1c --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/internal/marshaller.go @@ -0,0 +1,15 @@ +package openai + +import ( + "encoding/json" +) + +type Marshaller interface { + Marshal(value any) ([]byte, error) +} + +type JSONMarshaller struct{} + +func (jm *JSONMarshaller) Marshal(value any) ([]byte, error) { + return json.Marshal(value) +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/internal/request_builder.go b/constraint/vendor/github.com/sashabaranov/go-openai/internal/request_builder.go new file mode 100644 index 000000000..5699f6b18 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/internal/request_builder.go @@ -0,0 +1,52 @@ +package openai + +import ( + "bytes" + "context" + "io" + "net/http" +) + +type RequestBuilder interface { + Build(ctx context.Context, method, url string, body any, header http.Header) (*http.Request, error) +} + +type HTTPRequestBuilder struct { + marshaller Marshaller +} + +func NewRequestBuilder() *HTTPRequestBuilder { + return &HTTPRequestBuilder{ + marshaller: &JSONMarshaller{}, + } +} + +func (b *HTTPRequestBuilder) Build( + ctx context.Context, + method string, + url string, + body any, + header http.Header, +) (req *http.Request, err error) { + var bodyReader io.Reader + if body != nil { + if v, ok := body.(io.Reader); ok { + bodyReader = v + } else { + var reqBytes []byte + reqBytes, err = b.marshaller.Marshal(body) + if err != nil { + return + } + bodyReader = bytes.NewBuffer(reqBytes) + } + } + req, err = http.NewRequestWithContext(ctx, method, url, bodyReader) + if err != nil { + return + } + if header != nil { + req.Header = header + } + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/internal/unmarshaler.go b/constraint/vendor/github.com/sashabaranov/go-openai/internal/unmarshaler.go new file mode 100644 index 000000000..882876022 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/internal/unmarshaler.go @@ -0,0 +1,15 @@ +package openai + +import ( + "encoding/json" +) + +type Unmarshaler interface { + Unmarshal(data []byte, v any) error +} + +type JSONUnmarshaler struct{} + +func (jm *JSONUnmarshaler) Unmarshal(data []byte, v any) error { + return json.Unmarshal(data, v) +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/messages.go b/constraint/vendor/github.com/sashabaranov/go-openai/messages.go new file mode 100644 index 000000000..902363938 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/messages.go @@ -0,0 +1,218 @@ +package openai + +import ( + "context" + "fmt" + "net/http" + "net/url" +) + +const ( + messagesSuffix = "messages" +) + +type Message struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int `json:"created_at"` + ThreadID string `json:"thread_id"` + Role string `json:"role"` + Content []MessageContent `json:"content"` + FileIds []string `json:"file_ids"` //nolint:revive //backwards-compatibility + AssistantID *string `json:"assistant_id,omitempty"` + RunID *string `json:"run_id,omitempty"` + Metadata map[string]any `json:"metadata"` + + httpHeader +} + +type MessagesList struct { + Messages []Message `json:"data"` + + Object string `json:"object"` + FirstID *string `json:"first_id"` + LastID *string `json:"last_id"` + HasMore bool `json:"has_more"` + + httpHeader +} + +type MessageContent struct { + Type string `json:"type"` + Text *MessageText `json:"text,omitempty"` + ImageFile *ImageFile `json:"image_file,omitempty"` +} +type MessageText struct { + Value string `json:"value"` + Annotations []any `json:"annotations"` +} + +type ImageFile struct { + FileID string `json:"file_id"` +} + +type MessageRequest struct { + Role string `json:"role"` + Content string `json:"content"` + FileIds []string `json:"file_ids,omitempty"` //nolint:revive // backwards-compatibility + Metadata map[string]any `json:"metadata,omitempty"` + Attachments []ThreadAttachment `json:"attachments,omitempty"` +} + +type MessageFile struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int `json:"created_at"` + MessageID string `json:"message_id"` + + httpHeader +} + +type MessageFilesList struct { + MessageFiles []MessageFile `json:"data"` + + httpHeader +} + +type MessageDeletionStatus struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +// CreateMessage creates a new message. +func (c *Client) CreateMessage(ctx context.Context, threadID string, request MessageRequest) (msg Message, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s", threadID, messagesSuffix) + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &msg) + return +} + +// ListMessage fetches all messages in the thread. +func (c *Client) ListMessage(ctx context.Context, threadID string, + limit *int, + order *string, + after *string, + before *string, + runID *string, +) (messages MessagesList, err error) { + urlValues := url.Values{} + if limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *limit)) + } + if order != nil { + urlValues.Add("order", *order) + } + if after != nil { + urlValues.Add("after", *after) + } + if before != nil { + urlValues.Add("before", *before) + } + if runID != nil { + urlValues.Add("run_id", *runID) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("/threads/%s/%s%s", threadID, messagesSuffix, encodedValues) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &messages) + return +} + +// RetrieveMessage retrieves a Message. +func (c *Client) RetrieveMessage( + ctx context.Context, + threadID, messageID string, +) (msg Message, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &msg) + return +} + +// ModifyMessage modifies a message. +func (c *Client) ModifyMessage( + ctx context.Context, + threadID, messageID string, + metadata map[string]string, +) (msg Message, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID) + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), + withBody(map[string]any{"metadata": metadata}), withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &msg) + return +} + +// RetrieveMessageFile fetches a message file. +func (c *Client) RetrieveMessageFile( + ctx context.Context, + threadID, messageID, fileID string, +) (file MessageFile, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s/%s/files/%s", threadID, messagesSuffix, messageID, fileID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &file) + return +} + +// ListMessageFiles fetches all files attached to a message. +func (c *Client) ListMessageFiles( + ctx context.Context, + threadID, messageID string, +) (files MessageFilesList, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s/%s/files", threadID, messagesSuffix, messageID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &files) + return +} + +// DeleteMessage deletes a message.. +func (c *Client) DeleteMessage( + ctx context.Context, + threadID, messageID string, +) (status MessageDeletionStatus, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/%s/%s", threadID, messagesSuffix, messageID) + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &status) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/models.go b/constraint/vendor/github.com/sashabaranov/go-openai/models.go new file mode 100644 index 000000000..d94f98836 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/models.go @@ -0,0 +1,90 @@ +package openai + +import ( + "context" + "fmt" + "net/http" +) + +// Model struct represents an OpenAPI model. +type Model struct { + CreatedAt int64 `json:"created"` + ID string `json:"id"` + Object string `json:"object"` + OwnedBy string `json:"owned_by"` + Permission []Permission `json:"permission"` + Root string `json:"root"` + Parent string `json:"parent"` + + httpHeader +} + +// Permission struct represents an OpenAPI permission. +type Permission struct { + CreatedAt int64 `json:"created"` + ID string `json:"id"` + Object string `json:"object"` + AllowCreateEngine bool `json:"allow_create_engine"` + AllowSampling bool `json:"allow_sampling"` + AllowLogprobs bool `json:"allow_logprobs"` + AllowSearchIndices bool `json:"allow_search_indices"` + AllowView bool `json:"allow_view"` + AllowFineTuning bool `json:"allow_fine_tuning"` + Organization string `json:"organization"` + Group interface{} `json:"group"` + IsBlocking bool `json:"is_blocking"` +} + +// FineTuneModelDeleteResponse represents the deletion status of a fine-tuned model. +type FineTuneModelDeleteResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +// ModelsList is a list of models, including those that belong to the user or organization. +type ModelsList struct { + Models []Model `json:"data"` + + httpHeader +} + +// ListModels Lists the currently available models, +// and provides basic information about each model such as the model id and parent. +func (c *Client) ListModels(ctx context.Context) (models ModelsList, err error) { + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL("/models")) + if err != nil { + return + } + + err = c.sendRequest(req, &models) + return +} + +// GetModel Retrieves a model instance, providing basic information about +// the model such as the owner and permissioning. +func (c *Client) GetModel(ctx context.Context, modelID string) (model Model, err error) { + urlSuffix := fmt.Sprintf("/models/%s", modelID) + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix)) + if err != nil { + return + } + + err = c.sendRequest(req, &model) + return +} + +// DeleteFineTuneModel Deletes a fine-tune model. You must have the Owner +// role in your organization to delete a model. +func (c *Client) DeleteFineTuneModel(ctx context.Context, modelID string) ( + response FineTuneModelDeleteResponse, err error) { + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL("/models/"+modelID)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/moderation.go b/constraint/vendor/github.com/sashabaranov/go-openai/moderation.go new file mode 100644 index 000000000..a0e09c0ee --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/moderation.go @@ -0,0 +1,107 @@ +package openai + +import ( + "context" + "errors" + "net/http" +) + +// The moderation endpoint is a tool you can use to check whether content complies with OpenAI's usage policies. +// Developers can thus identify content that our usage policies prohibits and take action, for instance by filtering it. + +// The default is text-moderation-latest which will be automatically upgraded over time. +// This ensures you are always using our most accurate model. +// If you use text-moderation-stable, we will provide advanced notice before updating the model. +// Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest. +const ( + ModerationOmniLatest = "omni-moderation-latest" + ModerationOmni20240926 = "omni-moderation-2024-09-26" + ModerationTextStable = "text-moderation-stable" + ModerationTextLatest = "text-moderation-latest" + // Deprecated: use ModerationTextStable and ModerationTextLatest instead. + ModerationText001 = "text-moderation-001" +) + +var ( + ErrModerationInvalidModel = errors.New("this model is not supported with moderation, please use text-moderation-stable or text-moderation-latest instead") //nolint:lll +) + +var validModerationModel = map[string]struct{}{ + ModerationOmniLatest: {}, + ModerationOmni20240926: {}, + ModerationTextStable: {}, + ModerationTextLatest: {}, +} + +// ModerationRequest represents a request structure for moderation API. +type ModerationRequest struct { + Input string `json:"input,omitempty"` + Model string `json:"model,omitempty"` +} + +// Result represents one of possible moderation results. +type Result struct { + Categories ResultCategories `json:"categories"` + CategoryScores ResultCategoryScores `json:"category_scores"` + Flagged bool `json:"flagged"` +} + +// ResultCategories represents Categories of Result. +type ResultCategories struct { + Hate bool `json:"hate"` + HateThreatening bool `json:"hate/threatening"` + Harassment bool `json:"harassment"` + HarassmentThreatening bool `json:"harassment/threatening"` + SelfHarm bool `json:"self-harm"` + SelfHarmIntent bool `json:"self-harm/intent"` + SelfHarmInstructions bool `json:"self-harm/instructions"` + Sexual bool `json:"sexual"` + SexualMinors bool `json:"sexual/minors"` + Violence bool `json:"violence"` + ViolenceGraphic bool `json:"violence/graphic"` +} + +// ResultCategoryScores represents CategoryScores of Result. +type ResultCategoryScores struct { + Hate float32 `json:"hate"` + HateThreatening float32 `json:"hate/threatening"` + Harassment float32 `json:"harassment"` + HarassmentThreatening float32 `json:"harassment/threatening"` + SelfHarm float32 `json:"self-harm"` + SelfHarmIntent float32 `json:"self-harm/intent"` + SelfHarmInstructions float32 `json:"self-harm/instructions"` + Sexual float32 `json:"sexual"` + SexualMinors float32 `json:"sexual/minors"` + Violence float32 `json:"violence"` + ViolenceGraphic float32 `json:"violence/graphic"` +} + +// ModerationResponse represents a response structure for moderation API. +type ModerationResponse struct { + ID string `json:"id"` + Model string `json:"model"` + Results []Result `json:"results"` + + httpHeader +} + +// Moderations — perform a moderation api call over a string. +// Input can be an array or slice but a string will reduce the complexity. +func (c *Client) Moderations(ctx context.Context, request ModerationRequest) (response ModerationResponse, err error) { + if _, ok := validModerationModel[request.Model]; len(request.Model) > 0 && !ok { + err = ErrModerationInvalidModel + return + } + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/moderations", withModel(request.Model)), + withBody(&request), + ) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/ratelimit.go b/constraint/vendor/github.com/sashabaranov/go-openai/ratelimit.go new file mode 100644 index 000000000..e8953f716 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/ratelimit.go @@ -0,0 +1,43 @@ +package openai + +import ( + "net/http" + "strconv" + "time" +) + +// RateLimitHeaders struct represents Openai rate limits headers. +type RateLimitHeaders struct { + LimitRequests int `json:"x-ratelimit-limit-requests"` + LimitTokens int `json:"x-ratelimit-limit-tokens"` + RemainingRequests int `json:"x-ratelimit-remaining-requests"` + RemainingTokens int `json:"x-ratelimit-remaining-tokens"` + ResetRequests ResetTime `json:"x-ratelimit-reset-requests"` + ResetTokens ResetTime `json:"x-ratelimit-reset-tokens"` +} + +type ResetTime string + +func (r ResetTime) String() string { + return string(r) +} + +func (r ResetTime) Time() time.Time { + d, _ := time.ParseDuration(string(r)) + return time.Now().Add(d) +} + +func newRateLimitHeaders(h http.Header) RateLimitHeaders { + limitReq, _ := strconv.Atoi(h.Get("x-ratelimit-limit-requests")) + limitTokens, _ := strconv.Atoi(h.Get("x-ratelimit-limit-tokens")) + remainingReq, _ := strconv.Atoi(h.Get("x-ratelimit-remaining-requests")) + remainingTokens, _ := strconv.Atoi(h.Get("x-ratelimit-remaining-tokens")) + return RateLimitHeaders{ + LimitRequests: limitReq, + LimitTokens: limitTokens, + RemainingRequests: remainingReq, + RemainingTokens: remainingTokens, + ResetRequests: ResetTime(h.Get("x-ratelimit-reset-requests")), + ResetTokens: ResetTime(h.Get("x-ratelimit-reset-tokens")), + } +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/run.go b/constraint/vendor/github.com/sashabaranov/go-openai/run.go new file mode 100644 index 000000000..9c51aaf8d --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/run.go @@ -0,0 +1,454 @@ +package openai + +import ( + "context" + "fmt" + "net/http" + "net/url" +) + +type Run struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + ThreadID string `json:"thread_id"` + AssistantID string `json:"assistant_id"` + Status RunStatus `json:"status"` + RequiredAction *RunRequiredAction `json:"required_action,omitempty"` + LastError *RunLastError `json:"last_error,omitempty"` + ExpiresAt int64 `json:"expires_at"` + StartedAt *int64 `json:"started_at,omitempty"` + CancelledAt *int64 `json:"cancelled_at,omitempty"` + FailedAt *int64 `json:"failed_at,omitempty"` + CompletedAt *int64 `json:"completed_at,omitempty"` + Model string `json:"model"` + Instructions string `json:"instructions,omitempty"` + Tools []Tool `json:"tools"` + FileIDS []string `json:"file_ids"` //nolint:revive // backwards-compatibility + Metadata map[string]any `json:"metadata"` + Usage Usage `json:"usage,omitempty"` + + Temperature *float32 `json:"temperature,omitempty"` + // The maximum number of prompt tokens that may be used over the course of the run. + // If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'. + MaxPromptTokens int `json:"max_prompt_tokens,omitempty"` + // The maximum number of completion tokens that may be used over the course of the run. + // If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'. + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` + // ThreadTruncationStrategy defines the truncation strategy to use for the thread. + TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"` + + httpHeader +} + +type RunStatus string + +const ( + RunStatusQueued RunStatus = "queued" + RunStatusInProgress RunStatus = "in_progress" + RunStatusRequiresAction RunStatus = "requires_action" + RunStatusCancelling RunStatus = "cancelling" + RunStatusFailed RunStatus = "failed" + RunStatusCompleted RunStatus = "completed" + RunStatusIncomplete RunStatus = "incomplete" + RunStatusExpired RunStatus = "expired" + RunStatusCancelled RunStatus = "cancelled" +) + +type RunRequiredAction struct { + Type RequiredActionType `json:"type"` + SubmitToolOutputs *SubmitToolOutputs `json:"submit_tool_outputs,omitempty"` +} + +type RequiredActionType string + +const ( + RequiredActionTypeSubmitToolOutputs RequiredActionType = "submit_tool_outputs" +) + +type SubmitToolOutputs struct { + ToolCalls []ToolCall `json:"tool_calls"` +} + +type RunLastError struct { + Code RunError `json:"code"` + Message string `json:"message"` +} + +type RunError string + +const ( + RunErrorServerError RunError = "server_error" + RunErrorRateLimitExceeded RunError = "rate_limit_exceeded" +) + +type RunRequest struct { + AssistantID string `json:"assistant_id"` + Model string `json:"model,omitempty"` + Instructions string `json:"instructions,omitempty"` + AdditionalInstructions string `json:"additional_instructions,omitempty"` + AdditionalMessages []ThreadMessage `json:"additional_messages,omitempty"` + Tools []Tool `json:"tools,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + + // Sampling temperature between 0 and 2. Higher values like 0.8 are more random. + // lower values are more focused and deterministic. + Temperature *float32 `json:"temperature,omitempty"` + TopP *float32 `json:"top_p,omitempty"` + + // The maximum number of prompt tokens that may be used over the course of the run. + // If the run exceeds the number of prompt tokens specified, the run will end with status 'incomplete'. + MaxPromptTokens int `json:"max_prompt_tokens,omitempty"` + + // The maximum number of completion tokens that may be used over the course of the run. + // If the run exceeds the number of completion tokens specified, the run will end with status 'incomplete'. + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` + + // ThreadTruncationStrategy defines the truncation strategy to use for the thread. + TruncationStrategy *ThreadTruncationStrategy `json:"truncation_strategy,omitempty"` + + // This can be either a string or a ToolChoice object. + ToolChoice any `json:"tool_choice,omitempty"` + // This can be either a string or a ResponseFormat object. + ResponseFormat any `json:"response_format,omitempty"` + // Disable the default behavior of parallel tool calls by setting it: false. + ParallelToolCalls any `json:"parallel_tool_calls,omitempty"` +} + +// ThreadTruncationStrategy defines the truncation strategy to use for the thread. +// https://platform.openai.com/docs/assistants/how-it-works/truncation-strategy. +type ThreadTruncationStrategy struct { + // default 'auto'. + Type TruncationStrategy `json:"type,omitempty"` + // this field should be set if the truncation strategy is set to LastMessages. + LastMessages *int `json:"last_messages,omitempty"` +} + +// TruncationStrategy defines the existing truncation strategies existing for thread management in an assistant. +type TruncationStrategy string + +const ( + // TruncationStrategyAuto messages in the middle of the thread will be dropped to fit the context length of the model. + TruncationStrategyAuto = TruncationStrategy("auto") + // TruncationStrategyLastMessages the thread will be truncated to the n most recent messages in the thread. + TruncationStrategyLastMessages = TruncationStrategy("last_messages") +) + +// ReponseFormat specifies the format the model must output. +// https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-response_format. +// Type can either be text or json_object. +type ReponseFormat struct { + Type string `json:"type"` +} + +type RunModifyRequest struct { + Metadata map[string]any `json:"metadata,omitempty"` +} + +// RunList is a list of runs. +type RunList struct { + Runs []Run `json:"data"` + + httpHeader +} + +type SubmitToolOutputsRequest struct { + ToolOutputs []ToolOutput `json:"tool_outputs"` +} + +type ToolOutput struct { + ToolCallID string `json:"tool_call_id"` + Output any `json:"output"` +} + +type CreateThreadAndRunRequest struct { + RunRequest + Thread ThreadRequest `json:"thread"` +} + +type RunStep struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + AssistantID string `json:"assistant_id"` + ThreadID string `json:"thread_id"` + RunID string `json:"run_id"` + Type RunStepType `json:"type"` + Status RunStepStatus `json:"status"` + StepDetails StepDetails `json:"step_details"` + LastError *RunLastError `json:"last_error,omitempty"` + ExpiredAt *int64 `json:"expired_at,omitempty"` + CancelledAt *int64 `json:"cancelled_at,omitempty"` + FailedAt *int64 `json:"failed_at,omitempty"` + CompletedAt *int64 `json:"completed_at,omitempty"` + Metadata map[string]any `json:"metadata"` + + httpHeader +} + +type RunStepStatus string + +const ( + RunStepStatusInProgress RunStepStatus = "in_progress" + RunStepStatusCancelling RunStepStatus = "cancelled" + RunStepStatusFailed RunStepStatus = "failed" + RunStepStatusCompleted RunStepStatus = "completed" + RunStepStatusExpired RunStepStatus = "expired" +) + +type RunStepType string + +const ( + RunStepTypeMessageCreation RunStepType = "message_creation" + RunStepTypeToolCalls RunStepType = "tool_calls" +) + +type StepDetails struct { + Type RunStepType `json:"type"` + MessageCreation *StepDetailsMessageCreation `json:"message_creation,omitempty"` + ToolCalls []ToolCall `json:"tool_calls,omitempty"` +} + +type StepDetailsMessageCreation struct { + MessageID string `json:"message_id"` +} + +// RunStepList is a list of steps. +type RunStepList struct { + RunSteps []RunStep `json:"data"` + + FirstID string `json:"first_id"` + LastID string `json:"last_id"` + HasMore bool `json:"has_more"` + + httpHeader +} + +type Pagination struct { + Limit *int + Order *string + After *string + Before *string +} + +// CreateRun creates a new run. +func (c *Client) CreateRun( + ctx context.Context, + threadID string, + request RunRequest, +) (response Run, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs", threadID) + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveRun retrieves a run. +func (c *Client) RetrieveRun( + ctx context.Context, + threadID string, + runID string, +) (response Run, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s", threadID, runID) + req, err := c.newRequest( + ctx, + http.MethodGet, + c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ModifyRun modifies a run. +func (c *Client) ModifyRun( + ctx context.Context, + threadID string, + runID string, + request RunModifyRequest, +) (response Run, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s", threadID, runID) + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ListRuns lists runs. +func (c *Client) ListRuns( + ctx context.Context, + threadID string, + pagination Pagination, +) (response RunList, err error) { + urlValues := url.Values{} + if pagination.Limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit)) + } + if pagination.Order != nil { + urlValues.Add("order", *pagination.Order) + } + if pagination.After != nil { + urlValues.Add("after", *pagination.After) + } + if pagination.Before != nil { + urlValues.Add("before", *pagination.Before) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("/threads/%s/runs%s", threadID, encodedValues) + req, err := c.newRequest( + ctx, + http.MethodGet, + c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// SubmitToolOutputs submits tool outputs. +func (c *Client) SubmitToolOutputs( + ctx context.Context, + threadID string, + runID string, + request SubmitToolOutputsRequest) (response Run, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/submit_tool_outputs", threadID, runID) + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// CancelRun cancels a run. +func (c *Client) CancelRun( + ctx context.Context, + threadID string, + runID string) (response Run, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/cancel", threadID, runID) + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// CreateThreadAndRun submits tool outputs. +func (c *Client) CreateThreadAndRun( + ctx context.Context, + request CreateThreadAndRunRequest) (response Run, err error) { + urlSuffix := "/threads/runs" + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveRunStep retrieves a run step. +func (c *Client) RetrieveRunStep( + ctx context.Context, + threadID string, + runID string, + stepID string, +) (response RunStep, err error) { + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/steps/%s", threadID, runID, stepID) + req, err := c.newRequest( + ctx, + http.MethodGet, + c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ListRunSteps lists run steps. +func (c *Client) ListRunSteps( + ctx context.Context, + threadID string, + runID string, + pagination Pagination, +) (response RunStepList, err error) { + urlValues := url.Values{} + if pagination.Limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit)) + } + if pagination.Order != nil { + urlValues.Add("order", *pagination.Order) + } + if pagination.After != nil { + urlValues.Add("after", *pagination.After) + } + if pagination.Before != nil { + urlValues.Add("before", *pagination.Before) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("/threads/%s/runs/%s/steps%s", threadID, runID, encodedValues) + req, err := c.newRequest( + ctx, + http.MethodGet, + c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/speech.go b/constraint/vendor/github.com/sashabaranov/go-openai/speech.go new file mode 100644 index 000000000..20b52e334 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/speech.go @@ -0,0 +1,59 @@ +package openai + +import ( + "context" + "net/http" +) + +type SpeechModel string + +const ( + TTSModel1 SpeechModel = "tts-1" + TTSModel1HD SpeechModel = "tts-1-hd" + TTSModelCanary SpeechModel = "canary-tts" +) + +type SpeechVoice string + +const ( + VoiceAlloy SpeechVoice = "alloy" + VoiceEcho SpeechVoice = "echo" + VoiceFable SpeechVoice = "fable" + VoiceOnyx SpeechVoice = "onyx" + VoiceNova SpeechVoice = "nova" + VoiceShimmer SpeechVoice = "shimmer" +) + +type SpeechResponseFormat string + +const ( + SpeechResponseFormatMp3 SpeechResponseFormat = "mp3" + SpeechResponseFormatOpus SpeechResponseFormat = "opus" + SpeechResponseFormatAac SpeechResponseFormat = "aac" + SpeechResponseFormatFlac SpeechResponseFormat = "flac" + SpeechResponseFormatWav SpeechResponseFormat = "wav" + SpeechResponseFormatPcm SpeechResponseFormat = "pcm" +) + +type CreateSpeechRequest struct { + Model SpeechModel `json:"model"` + Input string `json:"input"` + Voice SpeechVoice `json:"voice"` + ResponseFormat SpeechResponseFormat `json:"response_format,omitempty"` // Optional, default to mp3 + Speed float64 `json:"speed,omitempty"` // Optional, default to 1.0 +} + +func (c *Client) CreateSpeech(ctx context.Context, request CreateSpeechRequest) (response RawResponse, err error) { + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL("/audio/speech", withModel(string(request.Model))), + withBody(request), + withContentType("application/json"), + ) + if err != nil { + return + } + + return c.sendRequestRaw(req) +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/stream.go b/constraint/vendor/github.com/sashabaranov/go-openai/stream.go new file mode 100644 index 000000000..a61c7c970 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/stream.go @@ -0,0 +1,55 @@ +package openai + +import ( + "context" + "errors" + "net/http" +) + +var ( + ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") +) + +type CompletionStream struct { + *streamReader[CompletionResponse] +} + +// CreateCompletionStream — API call to create a completion w/ streaming +// support. It sets whether to stream back partial progress. If set, tokens will be +// sent as data-only server-sent events as they become available, with the +// stream terminated by a data: [DONE] message. +func (c *Client) CreateCompletionStream( + ctx context.Context, + request CompletionRequest, +) (stream *CompletionStream, err error) { + urlSuffix := "/completions" + if !checkEndpointSupportsModel(urlSuffix, request.Model) { + err = ErrCompletionUnsupportedModel + return + } + + if !checkPromptType(request.Prompt) { + err = ErrCompletionRequestPromptTypeNotSupported + return + } + + request.Stream = true + req, err := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(urlSuffix, withModel(request.Model)), + withBody(request), + ) + if err != nil { + return nil, err + } + + resp, err := sendRequestStream[CompletionResponse](c, req) + if err != nil { + return + } + stream = &CompletionStream{ + streamReader: resp, + } + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/stream_reader.go b/constraint/vendor/github.com/sashabaranov/go-openai/stream_reader.go new file mode 100644 index 000000000..ecfa26807 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/stream_reader.go @@ -0,0 +1,118 @@ +package openai + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + + utils "github.com/sashabaranov/go-openai/internal" +) + +var ( + headerData = []byte("data: ") + errorPrefix = []byte(`data: {"error":`) +) + +type streamable interface { + ChatCompletionStreamResponse | CompletionResponse +} + +type streamReader[T streamable] struct { + emptyMessagesLimit uint + isFinished bool + + reader *bufio.Reader + response *http.Response + errAccumulator utils.ErrorAccumulator + unmarshaler utils.Unmarshaler + + httpHeader +} + +func (stream *streamReader[T]) Recv() (response T, err error) { + rawLine, err := stream.RecvRaw() + if err != nil { + return + } + + err = stream.unmarshaler.Unmarshal(rawLine, &response) + if err != nil { + return + } + return response, nil +} + +func (stream *streamReader[T]) RecvRaw() ([]byte, error) { + if stream.isFinished { + return nil, io.EOF + } + + return stream.processLines() +} + +//nolint:gocognit +func (stream *streamReader[T]) processLines() ([]byte, error) { + var ( + emptyMessagesCount uint + hasErrorPrefix bool + ) + + for { + rawLine, readErr := stream.reader.ReadBytes('\n') + if readErr != nil || hasErrorPrefix { + respErr := stream.unmarshalError() + if respErr != nil { + return nil, fmt.Errorf("error, %w", respErr.Error) + } + return nil, readErr + } + + noSpaceLine := bytes.TrimSpace(rawLine) + if bytes.HasPrefix(noSpaceLine, errorPrefix) { + hasErrorPrefix = true + } + if !bytes.HasPrefix(noSpaceLine, headerData) || hasErrorPrefix { + if hasErrorPrefix { + noSpaceLine = bytes.TrimPrefix(noSpaceLine, headerData) + } + writeErr := stream.errAccumulator.Write(noSpaceLine) + if writeErr != nil { + return nil, writeErr + } + emptyMessagesCount++ + if emptyMessagesCount > stream.emptyMessagesLimit { + return nil, ErrTooManyEmptyStreamMessages + } + + continue + } + + noPrefixLine := bytes.TrimPrefix(noSpaceLine, headerData) + if string(noPrefixLine) == "[DONE]" { + stream.isFinished = true + return nil, io.EOF + } + + return noPrefixLine, nil + } +} + +func (stream *streamReader[T]) unmarshalError() (errResp *ErrorResponse) { + errBytes := stream.errAccumulator.Bytes() + if len(errBytes) == 0 { + return + } + + err := stream.unmarshaler.Unmarshal(errBytes, &errResp) + if err != nil { + errResp = nil + } + + return +} + +func (stream *streamReader[T]) Close() error { + return stream.response.Body.Close() +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/thread.go b/constraint/vendor/github.com/sashabaranov/go-openai/thread.go new file mode 100644 index 000000000..bc08e2bcb --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/thread.go @@ -0,0 +1,171 @@ +package openai + +import ( + "context" + "net/http" +) + +const ( + threadsSuffix = "/threads" +) + +type Thread struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + Metadata map[string]any `json:"metadata"` + ToolResources ToolResources `json:"tool_resources,omitempty"` + + httpHeader +} + +type ThreadRequest struct { + Messages []ThreadMessage `json:"messages,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + ToolResources *ToolResourcesRequest `json:"tool_resources,omitempty"` +} + +type ToolResources struct { + CodeInterpreter *CodeInterpreterToolResources `json:"code_interpreter,omitempty"` + FileSearch *FileSearchToolResources `json:"file_search,omitempty"` +} + +type CodeInterpreterToolResources struct { + FileIDs []string `json:"file_ids,omitempty"` +} + +type FileSearchToolResources struct { + VectorStoreIDs []string `json:"vector_store_ids,omitempty"` +} + +type ToolResourcesRequest struct { + CodeInterpreter *CodeInterpreterToolResourcesRequest `json:"code_interpreter,omitempty"` + FileSearch *FileSearchToolResourcesRequest `json:"file_search,omitempty"` +} + +type CodeInterpreterToolResourcesRequest struct { + FileIDs []string `json:"file_ids,omitempty"` +} + +type FileSearchToolResourcesRequest struct { + VectorStoreIDs []string `json:"vector_store_ids,omitempty"` + VectorStores []VectorStoreToolResources `json:"vector_stores,omitempty"` +} + +type VectorStoreToolResources struct { + FileIDs []string `json:"file_ids,omitempty"` + ChunkingStrategy *ChunkingStrategy `json:"chunking_strategy,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +type ChunkingStrategy struct { + Type ChunkingStrategyType `json:"type"` + Static *StaticChunkingStrategy `json:"static,omitempty"` +} + +type StaticChunkingStrategy struct { + MaxChunkSizeTokens int `json:"max_chunk_size_tokens"` + ChunkOverlapTokens int `json:"chunk_overlap_tokens"` +} + +type ChunkingStrategyType string + +const ( + ChunkingStrategyTypeAuto ChunkingStrategyType = "auto" + ChunkingStrategyTypeStatic ChunkingStrategyType = "static" +) + +type ModifyThreadRequest struct { + Metadata map[string]any `json:"metadata"` + ToolResources *ToolResources `json:"tool_resources,omitempty"` +} + +type ThreadMessageRole string + +const ( + ThreadMessageRoleAssistant ThreadMessageRole = "assistant" + ThreadMessageRoleUser ThreadMessageRole = "user" +) + +type ThreadMessage struct { + Role ThreadMessageRole `json:"role"` + Content string `json:"content"` + FileIDs []string `json:"file_ids,omitempty"` + Attachments []ThreadAttachment `json:"attachments,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +type ThreadAttachment struct { + FileID string `json:"file_id"` + Tools []ThreadAttachmentTool `json:"tools"` +} + +type ThreadAttachmentTool struct { + Type string `json:"type"` +} + +type ThreadDeleteResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +// CreateThread creates a new thread. +func (c *Client) CreateThread(ctx context.Context, request ThreadRequest) (response Thread, err error) { + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(threadsSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// RetrieveThread retrieves a thread. +func (c *Client) RetrieveThread(ctx context.Context, threadID string) (response Thread, err error) { + urlSuffix := threadsSuffix + "/" + threadID + req, err := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// ModifyThread modifies a thread. +func (c *Client) ModifyThread( + ctx context.Context, + threadID string, + request ModifyThreadRequest, +) (response Thread, err error) { + urlSuffix := threadsSuffix + "/" + threadID + req, err := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} + +// DeleteThread deletes a thread. +func (c *Client) DeleteThread( + ctx context.Context, + threadID string, +) (response ThreadDeleteResponse, err error) { + urlSuffix := threadsSuffix + "/" + threadID + req, err := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + if err != nil { + return + } + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sashabaranov/go-openai/vector_store.go b/constraint/vendor/github.com/sashabaranov/go-openai/vector_store.go new file mode 100644 index 000000000..682bb1cf9 --- /dev/null +++ b/constraint/vendor/github.com/sashabaranov/go-openai/vector_store.go @@ -0,0 +1,348 @@ +package openai + +import ( + "context" + "fmt" + "net/http" + "net/url" +) + +const ( + vectorStoresSuffix = "/vector_stores" + vectorStoresFilesSuffix = "/files" + vectorStoresFileBatchesSuffix = "/file_batches" +) + +type VectorStoreFileCount struct { + InProgress int `json:"in_progress"` + Completed int `json:"completed"` + Failed int `json:"failed"` + Cancelled int `json:"cancelled"` + Total int `json:"total"` +} + +type VectorStore struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + Name string `json:"name"` + UsageBytes int `json:"usage_bytes"` + FileCounts VectorStoreFileCount `json:"file_counts"` + Status string `json:"status"` + ExpiresAfter *VectorStoreExpires `json:"expires_after"` + ExpiresAt *int `json:"expires_at"` + Metadata map[string]any `json:"metadata"` + + httpHeader +} + +type VectorStoreExpires struct { + Anchor string `json:"anchor"` + Days int `json:"days"` +} + +// VectorStoreRequest provides the vector store request parameters. +type VectorStoreRequest struct { + Name string `json:"name,omitempty"` + FileIDs []string `json:"file_ids,omitempty"` + ExpiresAfter *VectorStoreExpires `json:"expires_after,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +// VectorStoresList is a list of vector store. +type VectorStoresList struct { + VectorStores []VectorStore `json:"data"` + LastID *string `json:"last_id"` + FirstID *string `json:"first_id"` + HasMore bool `json:"has_more"` + httpHeader +} + +type VectorStoreDeleteResponse struct { + ID string `json:"id"` + Object string `json:"object"` + Deleted bool `json:"deleted"` + + httpHeader +} + +type VectorStoreFile struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + VectorStoreID string `json:"vector_store_id"` + UsageBytes int `json:"usage_bytes"` + Status string `json:"status"` + + httpHeader +} + +type VectorStoreFileRequest struct { + FileID string `json:"file_id"` +} + +type VectorStoreFilesList struct { + VectorStoreFiles []VectorStoreFile `json:"data"` + FirstID *string `json:"first_id"` + LastID *string `json:"last_id"` + HasMore bool `json:"has_more"` + + httpHeader +} + +type VectorStoreFileBatch struct { + ID string `json:"id"` + Object string `json:"object"` + CreatedAt int64 `json:"created_at"` + VectorStoreID string `json:"vector_store_id"` + Status string `json:"status"` + FileCounts VectorStoreFileCount `json:"file_counts"` + + httpHeader +} + +type VectorStoreFileBatchRequest struct { + FileIDs []string `json:"file_ids"` +} + +// CreateVectorStore creates a new vector store. +func (c *Client) CreateVectorStore(ctx context.Context, request VectorStoreRequest) (response VectorStore, err error) { + req, _ := c.newRequest( + ctx, + http.MethodPost, + c.fullURL(vectorStoresSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion), + ) + + err = c.sendRequest(req, &response) + return +} + +// RetrieveVectorStore retrieves an vector store. +func (c *Client) RetrieveVectorStore( + ctx context.Context, + vectorStoreID string, +) (response VectorStore, err error) { + urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// ModifyVectorStore modifies a vector store. +func (c *Client) ModifyVectorStore( + ctx context.Context, + vectorStoreID string, + request VectorStoreRequest, +) (response VectorStore, err error) { + urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID) + req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// DeleteVectorStore deletes an vector store. +func (c *Client) DeleteVectorStore( + ctx context.Context, + vectorStoreID string, +) (response VectorStoreDeleteResponse, err error) { + urlSuffix := fmt.Sprintf("%s/%s", vectorStoresSuffix, vectorStoreID) + req, _ := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// ListVectorStores Lists the currently available vector store. +func (c *Client) ListVectorStores( + ctx context.Context, + pagination Pagination, +) (response VectorStoresList, err error) { + urlValues := url.Values{} + + if pagination.After != nil { + urlValues.Add("after", *pagination.After) + } + if pagination.Order != nil { + urlValues.Add("order", *pagination.Order) + } + if pagination.Limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit)) + } + if pagination.Before != nil { + urlValues.Add("before", *pagination.Before) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s%s", vectorStoresSuffix, encodedValues) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// CreateVectorStoreFile creates a new vector store file. +func (c *Client) CreateVectorStoreFile( + ctx context.Context, + vectorStoreID string, + request VectorStoreFileRequest, +) (response VectorStoreFile, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix) + req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// RetrieveVectorStoreFile retrieves a vector store file. +func (c *Client) RetrieveVectorStoreFile( + ctx context.Context, + vectorStoreID string, + fileID string, +) (response VectorStoreFile, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, fileID) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// DeleteVectorStoreFile deletes an existing file. +func (c *Client) DeleteVectorStoreFile( + ctx context.Context, + vectorStoreID string, + fileID string, +) (err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, fileID) + req, _ := c.newRequest(ctx, http.MethodDelete, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, nil) + return +} + +// ListVectorStoreFiles Lists the currently available files for a vector store. +func (c *Client) ListVectorStoreFiles( + ctx context.Context, + vectorStoreID string, + pagination Pagination, +) (response VectorStoreFilesList, err error) { + urlValues := url.Values{} + if pagination.After != nil { + urlValues.Add("after", *pagination.After) + } + if pagination.Limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit)) + } + if pagination.Before != nil { + urlValues.Add("before", *pagination.Before) + } + if pagination.Order != nil { + urlValues.Add("order", *pagination.Order) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s/%s%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFilesSuffix, encodedValues) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// CreateVectorStoreFileBatch creates a new vector store file batch. +func (c *Client) CreateVectorStoreFileBatch( + ctx context.Context, + vectorStoreID string, + request VectorStoreFileBatchRequest, +) (response VectorStoreFileBatch, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s", vectorStoresSuffix, vectorStoreID, vectorStoresFileBatchesSuffix) + req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), + withBody(request), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// RetrieveVectorStoreFileBatch retrieves a vector store file batch. +func (c *Client) RetrieveVectorStoreFileBatch( + ctx context.Context, + vectorStoreID string, + batchID string, +) (response VectorStoreFileBatch, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s", vectorStoresSuffix, vectorStoreID, vectorStoresFileBatchesSuffix, batchID) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// CancelVectorStoreFileBatch cancel a new vector store file batch. +func (c *Client) CancelVectorStoreFileBatch( + ctx context.Context, + vectorStoreID string, + batchID string, +) (response VectorStoreFileBatch, err error) { + urlSuffix := fmt.Sprintf("%s/%s%s/%s%s", vectorStoresSuffix, + vectorStoreID, vectorStoresFileBatchesSuffix, batchID, "/cancel") + req, _ := c.newRequest(ctx, http.MethodPost, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} + +// ListVectorStoreFiles Lists the currently available files for a vector store. +func (c *Client) ListVectorStoreFilesInBatch( + ctx context.Context, + vectorStoreID string, + batchID string, + pagination Pagination, +) (response VectorStoreFilesList, err error) { + urlValues := url.Values{} + if pagination.After != nil { + urlValues.Add("after", *pagination.After) + } + if pagination.Limit != nil { + urlValues.Add("limit", fmt.Sprintf("%d", *pagination.Limit)) + } + if pagination.Before != nil { + urlValues.Add("before", *pagination.Before) + } + if pagination.Order != nil { + urlValues.Add("order", *pagination.Order) + } + + encodedValues := "" + if len(urlValues) > 0 { + encodedValues = "?" + urlValues.Encode() + } + + urlSuffix := fmt.Sprintf("%s/%s%s/%s%s%s", vectorStoresSuffix, + vectorStoreID, vectorStoresFileBatchesSuffix, batchID, "/files", encodedValues) + req, _ := c.newRequest(ctx, http.MethodGet, c.fullURL(urlSuffix), + withBetaAssistantVersion(c.config.AssistantVersion)) + + err = c.sendRequest(req, &response) + return +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/LICENSE b/constraint/vendor/github.com/sethvargo/go-retry/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/constraint/vendor/github.com/sethvargo/go-retry/README.md b/constraint/vendor/github.com/sethvargo/go-retry/README.md new file mode 100644 index 000000000..47731828f --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/README.md @@ -0,0 +1,185 @@ +# Retry + +[![GoDoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/mod/github.com/sethvargo/go-retry) + +Retry is a Go library for facilitating retry logic and backoff. It's highly +extensible with full control over how and when retries occur. You can also write +your own custom backoff functions by implementing the Backoff interface. + +## Features + +- **Extensible** - Inspired by Go's built-in HTTP package, this Go backoff and + retry library is extensible via middleware. You can write custom backoff + functions or use a provided filter. + +- **Independent** - No external dependencies besides the Go standard library, + meaning it won't bloat your project. + +- **Concurrent** - Unless otherwise specified, everything is safe for concurrent + use. + +- **Context-aware** - Use native Go contexts to control cancellation. + +## Usage + +Here is an example use for connecting to a database using Go's `database/sql` +package: + +```golang +package main + +import ( + "context" + "database/sql" + "log" + "time" + + "github.com/sethvargo/go-retry" +) + +func main() { + db, err := sql.Open("mysql", "...") + if err != nil { + log.Fatal(err) + } + + ctx := context.Background() + if err := retry.Fibonacci(ctx, 1*time.Second, func(ctx context.Context) error { + if err := db.PingContext(ctx); err != nil { + // This marks the error as retryable + return retry.RetryableError(err) + } + return nil + }); err != nil { + log.Fatal(err) + } +} +``` + +## Backoffs + +In addition to your own custom algorithms, there are built-in algorithms for +backoff in the library. + +### Constant + +A very rudimentary backoff, just returns a constant value. Here is an example: + +```text +1s -> 1s -> 1s -> 1s -> 1s -> 1s +``` + +Usage: + +```golang +NewConstant(1 * time.Second) +``` + +### Exponential + +Arguably the most common backoff, the next value is double the previous value. +Here is an example: + +```text +1s -> 2s -> 4s -> 8s -> 16s -> 32s -> 64s +``` + +Usage: + +```golang +NewExponential(1 * time.Second) +``` + +### Fibonacci + +The Fibonacci backoff uses the Fibonacci sequence to calculate the backoff. The +next value is the sum of the current value and the previous value. This means +retires happen quickly at first, but then gradually take slower, ideal for +network-type issues. Here is an example: + +```text +1s -> 1s -> 2s -> 3s -> 5s -> 8s -> 13s +``` + +Usage: + +```golang +NewFibonacci(1 * time.Second) +``` + +## Modifiers (Middleware) + +The built-in backoff algorithms never terminate and have no caps or limits - you +control their behavior with middleware. There's built-in middleware, but you can +also write custom middleware. + +### Jitter + +To reduce the changes of a thundering herd, add random jitter to the returned +value. + +```golang +b := NewFibonacci(1 * time.Second) + +// Return the next value, +/- 500ms +b = WithJitter(500*time.Millisecond, b) + +// Return the next value, +/- 5% of the result +b = WithJitterPercent(5, b) +``` + +### MaxRetries + +To terminate a retry, specify the maximum number of _retries_. Note this +is _retries_, not _attempts_. Attempts is retries + 1. + +```golang +b := NewFibonacci(1 * time.Second) + +// Stop after 4 retries, when the 5th attempt has failed. In this example, the worst case elapsed +// time would be 1s + 1s + 2s + 3s = 7s. +b = WithMaxRetries(4, b) +``` + +### CappedDuration + +To ensure an individual calculated duration never exceeds a value, use a cap: + +```golang +b := NewFibonacci(1 * time.Second) + +// Ensure the maximum value is 2s. In this example, the sleep values would be +// 1s, 1s, 2s, 2s, 2s, 2s... +b = WithCappedDuration(2 * time.Second, b) +``` + +### WithMaxDuration + +For a best-effort limit on the total execution time, specify a max duration: + +```golang +b := NewFibonacci(1 * time.Second) + +// Ensure the maximum total retry time is 5s. +b = WithMaxDuration(5 * time.Second, b) +``` + +## Benchmarks + +Here are benchmarks against some other popular Go backoff and retry libraries. +You can run these benchmarks yourself via the `benchmark/` folder. Commas and +spacing fixed for clarity. + +```text +Benchmark/cenkalti-7 13,052,668 87.3 ns/op +Benchmark/lestrrat-7 902,044 1,355 ns/op +Benchmark/sethvargo-7 203,914,245 5.73 ns/op +``` + +## Notes and Caveats + +- Randomization uses `math/rand` seeded with the Unix timestamp instead of + `crypto/rand`. +- Ordering of addition of multiple modifiers will make a difference. + For example; ensure you add `CappedDuration` before `WithMaxDuration`, otherwise it may early out too early. + Another example is you could add `Jitter` before or after capping depending on your desired outcome. diff --git a/constraint/vendor/github.com/sethvargo/go-retry/backoff.go b/constraint/vendor/github.com/sethvargo/go-retry/backoff.go new file mode 100644 index 000000000..de3974fdb --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/backoff.go @@ -0,0 +1,134 @@ +package retry + +import ( + "sync" + "time" +) + +// Backoff is an interface that backs off. +type Backoff interface { + // Next returns the time duration to wait and whether to stop. + Next() (next time.Duration, stop bool) +} + +var _ Backoff = (BackoffFunc)(nil) + +// BackoffFunc is a backoff expressed as a function. +type BackoffFunc func() (time.Duration, bool) + +// Next implements Backoff. +func (b BackoffFunc) Next() (time.Duration, bool) { + return b() +} + +// WithJitter wraps a backoff function and adds the specified jitter. j can be +// interpreted as "+/- j". For example, if j were 5 seconds and the backoff +// returned 20s, the value could be between 15 and 25 seconds. The value can +// never be less than 0. +func WithJitter(j time.Duration, next Backoff) Backoff { + r := newLockedRandom(time.Now().UnixNano()) + + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + diff := time.Duration(r.Int63n(int64(j)*2) - int64(j)) + val = val + diff + if val < 0 { + val = 0 + } + return val, false + }) +} + +// WithJitterPercent wraps a backoff function and adds the specified jitter +// percentage. j can be interpreted as "+/- j%". For example, if j were 5 and +// the backoff returned 20s, the value could be between 19 and 21 seconds. The +// value can never be less than 0 or greater than 100. +func WithJitterPercent(j uint64, next Backoff) Backoff { + r := newLockedRandom(time.Now().UnixNano()) + + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + // Get a value between -j and j, the convert to a percentage + top := r.Int63n(int64(j)*2) - int64(j) + pct := 1 - float64(top)/100.0 + + val = time.Duration(float64(val) * pct) + if val < 0 { + val = 0 + } + return val, false + }) +} + +// WithMaxRetries executes the backoff function up until the maximum attempts. +func WithMaxRetries(max uint64, next Backoff) Backoff { + var l sync.Mutex + var attempt uint64 + + return BackoffFunc(func() (time.Duration, bool) { + l.Lock() + defer l.Unlock() + + if attempt >= max { + return 0, true + } + attempt++ + + val, stop := next.Next() + if stop { + return 0, true + } + + return val, false + }) +} + +// WithCappedDuration sets a maximum on the duration returned from the next +// backoff. This is NOT a total backoff time, but rather a cap on the maximum +// value a backoff can return. Without another middleware, the backoff will +// continue infinitely. +func WithCappedDuration(cap time.Duration, next Backoff) Backoff { + return BackoffFunc(func() (time.Duration, bool) { + val, stop := next.Next() + if stop { + return 0, true + } + + if val <= 0 || val > cap { + val = cap + } + return val, false + }) +} + +// WithMaxDuration sets a maximum on the total amount of time a backoff should +// execute. It's best-effort, and should not be used to guarantee an exact +// amount of time. +func WithMaxDuration(timeout time.Duration, next Backoff) Backoff { + start := time.Now() + + return BackoffFunc(func() (time.Duration, bool) { + diff := timeout - time.Since(start) + if diff <= 0 { + return 0, true + } + + val, stop := next.Next() + if stop { + return 0, true + } + + if val <= 0 || val > diff { + val = diff + } + return val, false + }) +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/backoff_constant.go b/constraint/vendor/github.com/sethvargo/go-retry/backoff_constant.go new file mode 100644 index 000000000..ef01fa0a0 --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/backoff_constant.go @@ -0,0 +1,25 @@ +package retry + +import ( + "context" + "time" +) + +// Constant is a wrapper around Retry that uses a constant backoff. It panics if +// the given base is less than zero. +func Constant(ctx context.Context, t time.Duration, f RetryFunc) error { + return Do(ctx, NewConstant(t), f) +} + +// NewConstant creates a new constant backoff using the value t. The wait time +// is the provided constant value. It panics if the given base is less than +// zero. +func NewConstant(t time.Duration) Backoff { + if t <= 0 { + panic("t must be greater than 0") + } + + return BackoffFunc(func() (time.Duration, bool) { + return t, false + }) +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/backoff_exponential.go b/constraint/vendor/github.com/sethvargo/go-retry/backoff_exponential.go new file mode 100644 index 000000000..a85b498e5 --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/backoff_exponential.go @@ -0,0 +1,47 @@ +package retry + +import ( + "context" + "math" + "sync/atomic" + "time" +) + +type exponentialBackoff struct { + base time.Duration + attempt uint64 +} + +// Exponential is a wrapper around Retry that uses an exponential backoff. See +// NewExponential. +func Exponential(ctx context.Context, base time.Duration, f RetryFunc) error { + return Do(ctx, NewExponential(base), f) +} + +// NewExponential creates a new exponential backoff using the starting value of +// base and doubling on each failure (1, 2, 4, 8, 16, 32, 64...), up to max. +// +// Once it overflows, the function constantly returns the maximum time.Duration +// for a 64-bit integer. +// +// It panics if the given base is less than zero. +func NewExponential(base time.Duration) Backoff { + if base <= 0 { + panic("base must be greater than 0") + } + + return &exponentialBackoff{ + base: base, + } +} + +// Next implements Backoff. It is safe for concurrent use. +func (b *exponentialBackoff) Next() (time.Duration, bool) { + next := b.base << (atomic.AddUint64(&b.attempt, 1) - 1) + if next <= 0 { + atomic.AddUint64(&b.attempt, ^uint64(0)) + next = math.MaxInt64 + } + + return next, false +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go b/constraint/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go new file mode 100644 index 000000000..250a026ec --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/backoff_fibonacci.go @@ -0,0 +1,56 @@ +package retry + +import ( + "context" + "math" + "sync/atomic" + "time" + "unsafe" +) + +type state [2]time.Duration + +type fibonacciBackoff struct { + state unsafe.Pointer +} + +// Fibonacci is a wrapper around Retry that uses a Fibonacci backoff. See +// NewFibonacci. +func Fibonacci(ctx context.Context, base time.Duration, f RetryFunc) error { + return Do(ctx, NewFibonacci(base), f) +} + +// NewFibonacci creates a new Fibonacci backoff using the starting value of +// base. The wait time is the sum of the previous two wait times on each failed +// attempt (1, 1, 2, 3, 5, 8, 13...). +// +// Once it overflows, the function constantly returns the maximum time.Duration +// for a 64-bit integer. +// +// It panics if the given base is less than zero. +func NewFibonacci(base time.Duration) Backoff { + if base <= 0 { + panic("base must be greater than 0") + } + + return &fibonacciBackoff{ + state: unsafe.Pointer(&state{0, base}), + } +} + +// Next implements Backoff. It is safe for concurrent use. +func (b *fibonacciBackoff) Next() (time.Duration, bool) { + for { + curr := atomic.LoadPointer(&b.state) + currState := (*state)(curr) + next := currState[0] + currState[1] + + if next <= 0 { + return math.MaxInt64, false + } + + if atomic.CompareAndSwapPointer(&b.state, curr, unsafe.Pointer(&state{currState[1], next})) { + return next, false + } + } +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/rand.go b/constraint/vendor/github.com/sethvargo/go-retry/rand.go new file mode 100644 index 000000000..4799fb064 --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/rand.go @@ -0,0 +1,54 @@ +package retry + +import ( + "math/rand" + "sync" +) + +type lockedSource struct { + src *rand.Rand + mu sync.Mutex +} + +var _ rand.Source64 = (*lockedSource)(nil) + +func newLockedRandom(seed int64) *lockedSource { + return &lockedSource{src: rand.New(rand.NewSource(seed))} +} + +// Int63 mimics math/rand.(*Rand).Int63 with mutex locked. +func (r *lockedSource) Int63() int64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.src.Int63() +} + +// Seed mimics math/rand.(*Rand).Seed with mutex locked. +func (r *lockedSource) Seed(seed int64) { + r.mu.Lock() + defer r.mu.Unlock() + r.src.Seed(seed) +} + +// Uint64 mimics math/rand.(*Rand).Uint64 with mutex locked. +func (r *lockedSource) Uint64() uint64 { + r.mu.Lock() + defer r.mu.Unlock() + return r.src.Uint64() +} + +// Int63n mimics math/rand.(*Rand).Int63n with mutex locked. +func (r *lockedSource) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + if n&(n-1) == 0 { // n is power of two, can mask + return r.Int63() & (n - 1) + } + max := int64((1 << 63) - 1 - (1<<63)%uint64(n)) + v := r.Int63() + for v > max { + v = r.Int63() + } + return v % n +} diff --git a/constraint/vendor/github.com/sethvargo/go-retry/retry.go b/constraint/vendor/github.com/sethvargo/go-retry/retry.go new file mode 100644 index 000000000..a3eeadc12 --- /dev/null +++ b/constraint/vendor/github.com/sethvargo/go-retry/retry.go @@ -0,0 +1,104 @@ +// Package retry provides helpers for retrying. +// +// This package defines flexible interfaces for retrying Go functions that may +// be flakey or eventually consistent. It abstracts the "backoff" (how long to +// wait between tries) and "retry" (execute the function again) mechanisms for +// maximum flexibility. Furthermore, everything is an interface, so you can +// define your own implementations. +// +// The package is modeled after Go's built-in HTTP package, making it easy to +// customize the built-in backoff with your own custom logic. Additionally, +// callers specify which errors are retryable by wrapping them. This is helpful +// with complex operations where only certain results should retry. +package retry + +import ( + "context" + "errors" + "time" +) + +// RetryFunc is a function passed to [Do]. +type RetryFunc func(ctx context.Context) error + +// RetryFuncValue is a function passed to [Do] which returns a value. +type RetryFuncValue[T any] func(ctx context.Context) (T, error) + +type retryableError struct { + err error +} + +// RetryableError marks an error as retryable. +func RetryableError(err error) error { + if err == nil { + return nil + } + return &retryableError{err} +} + +// Unwrap implements error wrapping. +func (e *retryableError) Unwrap() error { + return e.err +} + +// Error returns the error string. +func (e *retryableError) Error() string { + if e.err == nil { + return "retryable: " + } + return "retryable: " + e.err.Error() +} + +func DoValue[T any](ctx context.Context, b Backoff, f RetryFuncValue[T]) (T, error) { + var nilT T + + for { + // Return immediately if ctx is canceled + select { + case <-ctx.Done(): + return nilT, ctx.Err() + default: + } + + v, err := f(ctx) + if err == nil { + return v, nil + } + + // Not retryable + var rerr *retryableError + if !errors.As(err, &rerr) { + return nilT, err + } + + next, stop := b.Next() + if stop { + return nilT, rerr.Unwrap() + } + + // ctx.Done() has priority, so we test it alone first + select { + case <-ctx.Done(): + return nilT, ctx.Err() + default: + } + + t := time.NewTimer(next) + select { + case <-ctx.Done(): + t.Stop() + return nilT, ctx.Err() + case <-t.C: + continue + } + } +} + +// Do wraps a function with a backoff to retry. The provided context is the same +// context passed to the [RetryFunc]. +func Do(ctx context.Context, b Backoff, f RetryFunc) error { + _, err := DoValue(ctx, b, func(ctx context.Context) (*struct{}, error) { + return nil, f(ctx) + }) + return err +} diff --git a/constraint/vendor/github.com/stoewer/go-strcase/README.md b/constraint/vendor/github.com/stoewer/go-strcase/README.md index 0e8635d80..84a640e71 100644 --- a/constraint/vendor/github.com/stoewer/go-strcase/README.md +++ b/constraint/vendor/github.com/stoewer/go-strcase/README.md @@ -1,5 +1,5 @@ -[![CircleCI](https://circleci.com/gh/stoewer/go-strcase/tree/master.svg?style=svg)](https://circleci.com/gh/stoewer/go-strcase/tree/master) -[![codecov](https://codecov.io/gh/stoewer/go-strcase/branch/master/graph/badge.svg)](https://codecov.io/gh/stoewer/go-strcase) +[![GH Actions](https://github.com/stoewer/go-strcase/actions/workflows/lint-test.yml/badge.svg?branch=master)](https://github.com/stoewer/go-strcase/actions) +[![codecov](https://codecov.io/github/stoewer/go-strcase/branch/master/graph/badge.svg?token=c0UokYnop5)](https://codecov.io/github/stoewer/go-strcase) [![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) --- diff --git a/constraint/vendor/github.com/stoewer/go-strcase/camel.go b/constraint/vendor/github.com/stoewer/go-strcase/camel.go index 5c233cc8f..ff9e66e0c 100644 --- a/constraint/vendor/github.com/stoewer/go-strcase/camel.go +++ b/constraint/vendor/github.com/stoewer/go-strcase/camel.go @@ -27,6 +27,9 @@ func camelCase(s string, upper bool) string { buffer = append(buffer, toUpper(curr)) } else if isLower(prev) { buffer = append(buffer, curr) + } else if isUpper(prev) && isUpper(curr) && isLower(next) { + // Assume a case like "R" for "XRequestId" + buffer = append(buffer, curr) } else { buffer = append(buffer, toLower(curr)) } diff --git a/constraint/vendor/github.com/walles/env/LICENSE b/constraint/vendor/github.com/walles/env/LICENSE new file mode 100644 index 000000000..71cdb232f --- /dev/null +++ b/constraint/vendor/github.com/walles/env/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Johan Walles + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/constraint/vendor/github.com/walles/env/README.md b/constraint/vendor/github.com/walles/env/README.md new file mode 100644 index 000000000..0c0220625 --- /dev/null +++ b/constraint/vendor/github.com/walles/env/README.md @@ -0,0 +1,55 @@ +![CI Status](https://github.com/walles/env/actions/workflows/ci.yml/badge.svg?branch=main) + +Functions for parsing environment variable values into typed variables. + +# Examples + +Note that the resulting values are all typed. + +```go +import "github.com/walles/env" + +// Enabled will be of type bool +enabled := env.GetOr("ENABLED", strconv.ParseBool, false) + +// Duration will be of type time.Duration +duration, err := env.Get("TIMEOUT", time.ParseDuration) + +// Username will be of type string. If it's not set in the environment, +// then MustGet will panic. +username := env.MustGet("USERNAME", env.String) + +// LuckyNumbers will be of type []int +luckyNumbers, err := env.Get("LUCKY_NUMBERS", env.ListOf(strconv.Atoi, ",")) + +// FluffyNumber will be a 64 bit precision float +fluffyNumber, err := env.Get("FLOAT", env.WithBitSize(strconv.ParseFloat, 64)) + +// This will parse both hex and decimal into an uint64 +// +// Some allowed number formats: 0xC0de, 1234 +number, err := env.Get("HEX", env.WithBaseAndBitSize(strconv.ParseUint, 0, 64)) + +// Timestamp will be of type time.Time +timestamp, err := env.Get("TIMESTAMP", env.WithTimeSpec(time.Parse, time.RFC3339)) + +// UsersAndScores will be of type map[string]int. +// +// In this case, "Adam:50,Eva:60" will be parsed into { "Adam":50, "Eva":60 }. +usersAndScores, err := env.Get("USERS_AND_SCORES", Map(env.String, ":", strconv.Atoi, ",")) +``` + +# Installing + +To add to your `go.mod` file: + +``` +go get github.com/walles/env +``` + +# Alternatives + +If you like bindings based APIs better then these ones seem popular: + +* +* diff --git a/constraint/vendor/github.com/walles/env/env.go b/constraint/vendor/github.com/walles/env/env.go new file mode 100644 index 000000000..839b4aa5b --- /dev/null +++ b/constraint/vendor/github.com/walles/env/env.go @@ -0,0 +1,218 @@ +package env + +import ( + "fmt" + "os" + "strings" +) + +type parseError struct { + message string +} + +func (pe parseError) Error() string { + return pe.message +} + +// Get a value from environment and run it through the parse function. Return +// the result if there was one. +// +// If the parsing fails or if the variable was not set an error will be +// returned. +// +// Parse errors vs not-set errors can be told apart using env.IsParseError(). +// +// # Example Usage +// +// port, err := env.Get("PORT", strconv.Atoi) +func Get[V any](environmentVariableName string, parse func(string) (V, error)) (V, error) { + rawValue, found := os.LookupEnv(environmentVariableName) + if !found { + var noResult V + return noResult, fmt.Errorf("Environment variable not set: %s", environmentVariableName) + } + + parsedValue, err := parse(rawValue) + if err != nil { + var noResult V + return noResult, parseError{ + message: fmt.Sprintf("Parsing %s value: %v", environmentVariableName, err), + } + } + + return parsedValue, nil +} + +// Returns true if this was a parse error returned by env.Get(). False usually +// means an env.Get() error is because the variable was not set. +// +// False will also be returned if the error was nil (not an error), or if it's +// not from env.Get(). +func IsParseError(err error) bool { + _, isParseError := err.(parseError) + return isParseError +} + +// Get a value from environment and run it through the parse function. Return +// the result if there was one. +// +// If the parsing fails or if the variable was not set then the fallback value +// will be returned. +// +// # Example Usage +// +// port := env.GetOr("PORT", strconv.Atoi, 8080) +func GetOr[V any](environmentVariableName string, parse func(string) (V, error), fallback V) V { + rawValue, found := os.LookupEnv(environmentVariableName) + if !found { + return fallback + } + + parsedValue, err := parse(rawValue) + if err != nil { + return fallback + } + + return parsedValue +} + +// Get a value from environment and run it through the parse function. Return +// the result if there was one. +// +// If the parsing fails or if the variable was not set then this function will +// panic. +// +// # Example Usage +// +// port := env.MustGet("PORT", strconv.Atoi) +func MustGet[V any](environmentVariableName string, parse func(string) (V, error)) V { + parsedValue, err := Get(environmentVariableName, parse) + if err != nil { + panic(err) + } + + return parsedValue +} + +// Helper function for reading lists from environment variables. +// +// # Example Usage +// +// numbers, err := env.Get("NUMBERS", env.ListOf(strconv.Atoi, ",")) +func ListOf[V any](parse func(string) (V, error), separator string) func(string) ([]V, error) { + return func(stringWithSeparators string) ([]V, error) { + separatedString := strings.Split(stringWithSeparators, separator) + + var result []V + for index, part := range separatedString { + parsedValue, err := parse(part) + if err != nil { + return nil, fmt.Errorf("Element %d: %w", index+1, err) + } + + result = append(result, parsedValue) + } + + return result, nil + } +} + +// Helper function for reading maps from environment variables. +// +// # Example Usage +// +// This can be used to parse a string of the form "a:5,b:9,c:42": +// +// mapping, err := env.Get("MAPPING", env.Map(env.String, ":", strconv.Atoi, ",")) +func Map[K comparable, V any]( + parseKey func(string) (K, error), + keyValueSeparator string, + parseValue func(string) (V, error), + entriesSeparator string, +) func(string) (map[K]V, error) { + return func(stringWithMap string) (map[K]V, error) { + entries := strings.Split(stringWithMap, entriesSeparator) + + result := make(map[K]V) + var empty map[K]V + for index, entry := range entries { + rawKeyAndValue := strings.Split(entry, keyValueSeparator) + if len(rawKeyAndValue) != 2 { + return empty, fmt.Errorf(`Element %d doesn't have exactly one separator ("%s"): %s`, + index+1, + keyValueSeparator, + entry, + ) + } + + rawKey := rawKeyAndValue[0] + rawValue := rawKeyAndValue[1] + + parsedKey, err := parseKey(rawKey) + if err != nil { + return empty, fmt.Errorf("Key %d: %w", index+1, err) + } + + parsedValue, err := parseValue(rawValue) + if err != nil { + return empty, fmt.Errorf("Value %d: %w", index+1, err) + } + + result[parsedKey] = parsedValue + } + + return result, nil + } +} + +// Helper function for parsing floats and similar from environment variables. +// +// # Example Usage +// +// number, err := env.Get("FLOAT", env.WithBitSize(strconv.ParseFloat, 64)) +func WithBitSize[V any](parse func(string, int) (V, error), bitSize int) func(string) (V, error) { + return func(raw string) (V, error) { + return parse(raw, bitSize) + } +} + +// Helper function for parsing ints of different bases from environment +// variables. +// +// Pro tip: Passing base 0 with [strconv.ParseInt] and [strconv.ParseUint] +// will make them try to figure out the base by themselves. +// +// # Example Usage +// +// number, err := env.Get("HEX", env.WithBaseAndBitSize(strconv.ParseUint, 0, 64)) +// +// [strconv.ParseInt]: https://pkg.go.dev/strconv#ParseInt +// [strconv.ParseUint]: https://pkg.go.dev/strconv#ParseUint +func WithBaseAndBitSize[V any](parse func(string, int, int) (V, error), base, bitSize int) func(string) (V, error) { + return func(raw string) (V, error) { + return parse(raw, base, bitSize) + } +} + +// Helper function for parsing timestamps using [time.Parse] from environment +// variables. +// +// # Example Usage +// +// timestamp, err := Get("TEST", WithTimeSpec(time.Parse, time.RFC3339)) +// +// [time.Parse]: https://pkg.go.dev/time#Parse +func WithTimeSpec[V any](parse func(string, string) (V, error), layout string) func(string) (V, error) { + return func(raw string) (V, error) { + return parse(layout, raw) + } +} + +// Helper function for reading strings from the environment. +// +// # Example Usage +// +// userName, err := env.Get("USERNAME", env.String) +func String(input string) (string, error) { + return input, nil +} diff --git a/constraint/vendor/github.com/x448/float16/.travis.yml b/constraint/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 000000000..8902bdaaf --- /dev/null +++ b/constraint/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/constraint/vendor/github.com/x448/float16/LICENSE b/constraint/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 000000000..bf6e35785 --- /dev/null +++ b/constraint/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/constraint/vendor/github.com/x448/float16/README.md b/constraint/vendor/github.com/x448/float16/README.md new file mode 100644 index 000000000..b524b8135 --- /dev/null +++ b/constraint/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/constraint/vendor/github.com/x448/float16/float16.go b/constraint/vendor/github.com/x448/float16/float16.go new file mode 100644 index 000000000..1a0e6dad0 --- /dev/null +++ b/constraint/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/constraint/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 000000000..773c9b643 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/constraint/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/constraint/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 000000000..088d19a6c --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/doc.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 000000000..ad73d8cb9 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 000000000..af6ef171f --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 000000000..949e2165c --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 000000000..e854d7e84 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 000000000..29e629d66 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 000000000..cecad8bae --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 000000000..b6f2e28d4 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 000000000..a13a6b733 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 000000000..1217776ea --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 000000000..69a348f0f --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 000000000..0dd01b063 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/limit.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 000000000..86babf1a8 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/span.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 000000000..6ebea12a9 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 000000000..cbcfabde3 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 000000000..dbc477a59 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd..b25641c55 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf58..a83a02627 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,20 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f0a9bb9ef..a01bfafbe 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index d01bdccf4..3ea05d019 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,11 +9,10 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -23,21 +22,18 @@ type middleware struct { operation string server string - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool - - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -56,8 +52,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -67,7 +61,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -78,7 +71,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -88,36 +80,8 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) + h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -134,7 +98,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -156,6 +120,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -166,14 +135,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -183,13 +150,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,35 +178,49 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) - h.responseBytesCounter.Add(ctx, rww.written, addOpts...) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, + }) +} + +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 000000000..a945f5566 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 000000000..fbc344cbd --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3ec0ad00c..eaf4c3796 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,18 +1,28 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -23,6 +33,16 @@ type ResponseTelemetry struct { type HTTPServer struct { duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -43,9 +63,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -53,25 +73,20 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) -} - -func NewHTTPServer() HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) - return HTTPServer{duplicate: env == "http/dup"} + return OldHTTPServer{}.Route(route) } -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -80,3 +95,196 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, + } +) + +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } + + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } + return server +} + +type HTTPClient struct { + duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" + client := HTTPClient{ + duplicate: duplicate, + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + + return client +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) + } + return OldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return OldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return CurrentHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["old"] = MetricOpts{ + measurement: set, + addOptions: set, + } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) + + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 000000000..32630864b --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={}" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={}" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={}" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={}" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={}" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={}" --out=v1.20.0.go diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 000000000..8c3c62751 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,519 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "slices" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type CurrentHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type CurrentHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e7f293761..558efd059 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -9,18 +12,19 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { @@ -49,7 +53,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. } func requiredHTTPPort(https bool, port int) int { // nolint:revive @@ -89,3 +93,19 @@ var methodLookup = map[string]attribute.KeyValue{ http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodTrace: semconvNew.HTTPRequestMethodTrace, } + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c3e838aaa..57d1507b6 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,13 +10,16 @@ import ( "errors" "io" "net/http" + "slices" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -31,14 +37,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -63,7 +69,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -72,3 +78,189 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type OldHTTPClient struct{} + +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go deleted file mode 100644 index 0c5d4c460..000000000 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "net/http" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" -) - -type newHTTPServer struct{} - -// TraceRequest returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { - count := 3 // ServerAddress, Method, Scheme - - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - count++ - } - - method, methodOriginal := n.method(req.Method) - if methodOriginal != (attribute.KeyValue{}) { - count++ - } - - scheme := n.scheme(req.TLS != nil) - - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - count++ - if peerPort > 0 { - count++ - } - } - - useragent := req.UserAgent() - if useragent != "" { - count++ - } - - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP != "" { - count++ - } - - if req.URL != nil && req.URL.Path != "" { - count++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - count++ - } - if protoVersion != "" { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - attrs = append(attrs, - semconvNew.ServerAddress(host), - method, - scheme, - ) - - if hostPort > 0 { - attrs = append(attrs, semconvNew.ServerPort(hostPort)) - } - if methodOriginal != (attribute.KeyValue{}) { - attrs = append(attrs, methodOriginal) - } - - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) - if peerPort > 0 { - attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) - } - } - - if useragent := req.UserAgent(); useragent != "" { - attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, semconvNew.ClientAddress(clientIP)) - } - - if req.URL != nil && req.URL.Path != "" { - attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - return attrs -} - -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -// TraceResponse returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - var count int - - if resp.ReadBytes > 0 { - count++ - } - if resp.WriteBytes > 0 { - count++ - } - if resp.StatusCode > 0 { - count++ - } - - attributes := make([]attribute.KeyValue, 0, count) - - if resp.ReadBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), - ) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), - ) - } - if resp.StatusCode > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseStatusCode(resp.StatusCode), - ) - } - - return attributes -} - -// Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { - return semconvNew.HTTPRoute(route) -} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index a9a9226b3..b80a1db61 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 000000000..9476ef01b --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 0d3cb2e4a..44b86ad86 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,13 +11,13 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) @@ -26,17 +26,15 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace - - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -63,43 +61,19 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace -} - -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) + t.semconv = semconv.NewHTTPClient(c.Meter) + t.metricAttributesFn = c.MetricAttributesFn } func defaultTransportFormatter(_ string, r *http.Request) string { @@ -143,54 +117,68 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) + // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, addOpts...) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil +} + +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest } // newWrappedBody returns a new and appropriately scoped *wrappedBody as an diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index b0957f28c..386f09e1b 100644 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.53.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 948f8406c..000000000 --- a/constraint/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *respWriterWrapper) Flush() { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/.gitignore b/constraint/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664b..ae8577ef3 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/.gitignore +++ b/constraint/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/constraint/vendor/go.opentelemetry.io/otel/.golangci.yml b/constraint/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b649..ce3f40b60 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/constraint/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint @@ -20,13 +22,16 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -58,16 +63,17 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + - perfsprint + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -92,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -124,8 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -153,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. @@ -300,3 +317,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/constraint/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/constraint/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e..599d59cd1 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/constraint/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,199 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +242,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +369,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -1836,7 +2030,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2410,7 +2604,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3003,7 +3197,13 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3286,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/constraint/vendor/go.opentelemetry.io/otel/CODEOWNERS b/constraint/vendor/go.opentelemetry.io/otel/CODEOWNERS index 202554933..945a07d2b 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/constraint/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/constraint/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/constraint/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58..22a2e9dbd 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/constraint/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -626,13 +629,14 @@ should be canceled. ## Approvers and Maintainers -### Approvers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent -- [Chester Cheung](https://github.com/hanyuancheung), Tencent +### Approvers ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -641,16 +645,18 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/constraint/vendor/go.opentelemetry.io/otel/Makefile b/constraint/vendor/go.opentelemetry.io/otel/Makefile index f33619f76..a7f6d8cc6 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/Makefile +++ b/constraint/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -145,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix @@ -239,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -264,7 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/constraint/vendor/go.opentelemetry.io/otel/README.md b/constraint/vendor/go.opentelemetry.io/otel/README.md index 5a8909317..d9a192076 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/README.md +++ b/constraint/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) @@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/constraint/vendor/go.opentelemetry.io/otel/RELEASING.md b/constraint/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d..4ebef4f9d 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/constraint/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository @@ -140,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/constraint/vendor/go.opentelemetry.io/otel/VERSIONING.md b/constraint/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362..b8cb605c1 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/constraint/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/constraint/vendor/go.opentelemetry.io/otel/attribute/set.go b/constraint/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdb..6cbefcead 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/constraint/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/constraint/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/constraint/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc..0e1fe2422 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/constraint/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(c) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/constraint/vendor/go.opentelemetry.io/otel/codes/codes.go b/constraint/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6..49a35b122 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/constraint/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} @@ -83,7 +84,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/constraint/vendor/go.opentelemetry.io/otel/doc.go b/constraint/vendor/go.opentelemetry.io/otel/doc.go index 441c59501..921f85961 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/doc.go +++ b/constraint/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc..2e7690e43 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0..bf27ef022 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927..8409b5f8f 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. @@ -289,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac..b7bd429ff 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57..4abf48d1f 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a7996..0a317d926 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b..f156ee667 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/constraint/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/constraint/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/constraint/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d84794..691d96c75 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/constraint/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/constraint/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/constraint/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6..ae92a4251 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/constraint/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/constraint/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/constraint/vendor/go.opentelemetry.io/otel/internal/global/meter.go index cfd1df9bf..a6acd8dca 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/constraint/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,9 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -76,7 +78,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -92,17 +94,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -110,12 +124,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -133,169 +147,336 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -307,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -335,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} + +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -346,9 +575,10 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/constraint/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/constraint/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b4..8982aa0dc 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/constraint/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // @@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/constraint/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/constraint/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b35..b2fe3e41d 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/constraint/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/constraint/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db778..f8435d8f2 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/constraint/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba5324..e079aaef1 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/instrument.go b/constraint/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e4023..a535782e1 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/meter.go b/constraint/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e01..14e08c24a 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/constraint/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 000000000..bb8969435 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/constraint/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/constraint/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 000000000..ca6fcbdc0 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/constraint/vendor/go.opentelemetry.io/otel/renovate.json b/constraint/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55ca..4f80c898a 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/renovate.json +++ b/constraint/vendor/go.opentelemetry.io/otel/renovate.json @@ -15,10 +15,12 @@ "enabled": true }, { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4..f2cdf3c65 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045..34852a47b 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d..c02aeefdd 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7..cf3c88e15 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 71386e2da..3677c83d7 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,17 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -// implements hostIDReader +// implements hostIDReader. type hostIDReaderWindows struct{} -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) - if err != nil { return "", err } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index 5e3d199d7..a6a5a53c0 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,7 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { return "", err } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 1d399a75d..ccc97e1b6 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { @@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() { bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { - <-bsp.timer.C + // Handle both GODEBUG=asynctimerchan=[0|1] properly. + select { + case <-bsp.timer.C: + default: + } } if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 821c83faa..8c308dd60 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -12,25 +12,26 @@ import ( // evictedQueue is a FIFO queue with a configurable capacity. type evictedQueue[T any] struct { - queue []T - capacity int - droppedCount int - logDropped func() + queue []T + capacity int + droppedCount int + logDroppedMsg string + logDroppedOnce sync.Once } func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Event]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Event", } } func newEvictedQueueLink(capacity int) evictedQueue[Link] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Link]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Link", } } @@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) { eq.queue = append(eq.queue, value) } +func (eq *evictedQueue[T]) logDropped() { + eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) +} + // copy returns a copy of the evictedQueue. func (eq *evictedQueue[T]) copy() []T { return slices.Clone(eq.queue) diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5beb..185aa7c08 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f7246..9b672a1d7 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 32f862790..d511d0f27 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -99,7 +99,7 @@ func (s snapshot) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns information about the instrumentation // library that created the span. -func (s snapshot) InstrumentationLibrary() instrumentation.Library { +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility return s.instrumentationScope } diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index ac90f1a26..8f4fc3850 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -62,7 +62,7 @@ type ReadOnlySpan interface { // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. - InstrumentationLibrary() instrumentation.Library + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span @@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool { s.mu.Lock() defer s.mu.Unlock() + return s.isRecording() +} + +// isRecording returns if this span is being recorded. If this span has ended +// this will return false. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) isRecording() bool { + if s == nil { + return false + } return s.endTime.IsZero() } @@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool { // included in the set status when the code is for an error. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } if s.status.Code > code { return } @@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { // attributes the span is configured to have, the last added attributes will // be dropped. func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if !s.IsRecording() { + if s == nil || len(attributes) == 0 { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { @@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. - s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + s.attributes = slices.Grow(s.attributes, len(attributes)) for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. @@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. - sum := len(attrs) + len(s.attributes) - s.attributes = slices.Grow(s.attributes, min(sum, limit)) + + // max size = limit + maxCap := min(len(attrs)+len(s.attributes), limit) + if cap(s.attributes) < maxCap { + s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) + } for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. @@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if idx, ok := exists[a.Key]; ok { // Perform all updates before dropping, even when at capacity. + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes[idx] = a continue } @@ -324,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. @@ -386,9 +454,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // the span's duration in case some operation below takes a while. et := monotonicEndTime(s.startTime) - // Do relative expensive check now that we have an end time and see if we - // need to do any more processing. - if !s.IsRecording() { + // Lock the span now that we have an end time and see if we need to do any more processing. + s.mu.Lock() + if !s.isRecording() { + s.mu.Unlock() return } @@ -413,10 +482,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } if s.executionTracerTaskEnd != nil { + s.mu.Unlock() s.executionTracerTaskEnd() + s.mu.Lock() } - s.mu.Lock() // Setting endTime to non-zero marks the span as ended and not recording. if config.Timestamp().IsZero() { s.endTime = et @@ -450,7 +520,13 @@ func monotonicEndTime(start time.Time) time.Time { // does not change the Span status. If this span is not being recorded or err is nil // than this method does nothing. func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.IsRecording() { + if s == nil || err == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } @@ -486,14 +562,23 @@ func recordStackTrace() string { } // AddEvent adds an event with the provided name and options. If this span is -// not being recorded than this method does nothing. +// not being recorded then this method does nothing. func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if !s.IsRecording() { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } s.addEvent(name, o...) } +// addEvent adds an event with the provided name and options. +// +// This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} @@ -510,20 +595,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { e.Attributes = e.Attributes[:limit] } - s.mu.Lock() s.events.add(e) - s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than // this method does nothing. func (s *recordingSpan) SetName(name string) { - if !s.IsRecording() { + if s == nil { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } s.name = name } @@ -579,29 +665,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue { func (s *recordingSpan) dedupeAttrs() { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) } // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // using record as the record of unique attribute keys to their index. // // This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { +func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { // Use the fact that slices share the same backing array. unique := s.attributes[:0] for _, a := range s.attributes { - if idx, ok := (*record)[a.Key]; ok { + if idx, ok := record[a.Key]; ok { unique[idx] = a } else { unique = append(unique, a) - (*record)[a.Key] = len(unique) - 1 + record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } @@ -642,7 +725,7 @@ func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. -func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope @@ -657,7 +740,7 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() { + if s == nil { return } if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && @@ -665,6 +748,12 @@ func (s *recordingSpan) AddLink(link trace.Link) { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} // Discard attributes over limit. @@ -678,9 +767,7 @@ func (s *recordingSpan) AddLink(link trace.Link) { l.Attributes = l.Attributes[:limit] } - s.mu.Lock() s.links.add(l) - s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -755,12 +842,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } func (s *recordingSpan) addChild() { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } s.childSpanCount++ - s.mu.Unlock() } func (*recordingSpan) private() {} diff --git a/constraint/vendor/go.opentelemetry.io/otel/sdk/version.go b/constraint/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7c..6b4038510 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/constraint/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe960..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 6e688345c..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4387 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go deleted file mode 100644 index d27e8a8f8..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go deleted file mode 100644 index 6c019aafc..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go deleted file mode 100644 index 7235bb51d..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go deleted file mode 100644 index a6b953f62..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ /dev/null @@ -1,1071 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Experimental - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Experimental - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Experimental - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Experimental - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" - - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index d66bbe9c2..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2545 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go deleted file mode 100644 index fe80b1731..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234e..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/config.go b/constraint/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e00..9c0b720a4 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/context.go b/constraint/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174b..8c45a7107 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/doc.go b/constraint/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d10..cdbf41d6d 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/provider.go b/constraint/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/span.go b/constraint/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/trace.go b/constraint/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab..d49adf671 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/tracer.go b/constraint/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/constraint/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/constraint/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf243..dc5e34cad 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/constraint/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/constraint/vendor/go.opentelemetry.io/otel/verify_examples.sh b/constraint/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fc..000000000 --- a/constraint/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/constraint/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/constraint/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/constraint/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/constraint/vendor/go.opentelemetry.io/otel/version.go b/constraint/vendor/go.opentelemetry.io/otel/version.go index ab2896052..eb22002d8 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/version.go +++ b/constraint/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/constraint/vendor/go.opentelemetry.io/otel/versions.yaml b/constraint/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a..ce4fe59b0 100644 --- a/constraint/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/constraint/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,21 +23,20 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.56.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.10.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/constraint/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/constraint/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35b..b342a0a94 100644 --- a/constraint/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/constraint/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/constraint/vendor/golang.org/x/exp/LICENSE b/constraint/vendor/golang.org/x/exp/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/constraint/vendor/golang.org/x/exp/LICENSE +++ b/constraint/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/constraint/vendor/golang.org/x/exp/slices/slices.go b/constraint/vendor/golang.org/x/exp/slices/slices.go index 5e8158bba..46ceac343 100644 --- a/constraint/vendor/golang.org/x/exp/slices/slices.go +++ b/constraint/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check - return append(s[:i], s[j:]...) + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/constraint/vendor/golang.org/x/exp/slices/sort.go b/constraint/vendor/golang.org/x/exp/slices/sort.go index b67897f76..f58bbc7ba 100644 --- a/constraint/vendor/golang.org/x/exp/slices/sort.go +++ b/constraint/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/constraint/vendor/golang.org/x/net/html/doc.go b/constraint/vendor/golang.org/x/net/html/doc.go index 3a7e5ab17..885c4c593 100644 --- a/constraint/vendor/golang.org/x/net/html/doc.go +++ b/constraint/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/constraint/vendor/golang.org/x/net/html/doctype.go b/constraint/vendor/golang.org/x/net/html/doctype.go index c484e5a94..bca3ae9a0 100644 --- a/constraint/vendor/golang.org/x/net/html/doctype.go +++ b/constraint/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/constraint/vendor/golang.org/x/net/html/foreign.go b/constraint/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc4..e8515d8e8 100644 --- a/constraint/vendor/golang.org/x/net/html/foreign.go +++ b/constraint/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/constraint/vendor/golang.org/x/net/html/iter.go b/constraint/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 000000000..54be8fd30 --- /dev/null +++ b/constraint/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/constraint/vendor/golang.org/x/net/html/node.go b/constraint/vendor/golang.org/x/net/html/node.go index 1350eef22..77741a195 100644 --- a/constraint/vendor/golang.org/x/net/html/node.go +++ b/constraint/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/constraint/vendor/golang.org/x/net/http2/server.go b/constraint/vendor/golang.org/x/net/http2/server.go index 617b4a476..b55547aec 100644 --- a/constraint/vendor/golang.org/x/net/http2/server.go +++ b/constraint/vendor/golang.org/x/net/http2/server.go @@ -306,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -323,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -913,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1782,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2212,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2240,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2266,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2307,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { @@ -2880,6 +2916,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } diff --git a/constraint/vendor/golang.org/x/net/http2/transport.go b/constraint/vendor/golang.org/x/net/http2/transport.go index 0c5f64aa8..b2e2ed337 100644 --- a/constraint/vendor/golang.org/x/net/http2/transport.go +++ b/constraint/vendor/golang.org/x/net/http2/transport.go @@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -324,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -335,25 +368,27 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -363,6 +398,25 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -420,12 +474,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -530,6 +584,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -562,7 +618,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -573,7 +636,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -598,6 +661,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -616,9 +695,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -752,11 +832,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } var group synctestGroupInterface if t.transportTestHooks != nil { @@ -960,7 +1042,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -992,16 +1074,40 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1014,7 +1120,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1052,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1376,6 +1483,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1391,12 +1500,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1578,6 +1706,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1731,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1790,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1910,7 +2072,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -1926,6 +2088,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1946,7 +2112,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -1983,7 +2149,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2180,10 +2346,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2409,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2432,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2324,7 +2510,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2370,13 +2556,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2494,15 +2683,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2686,7 +2894,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2821,9 +3029,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2917,6 +3138,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2934,6 +3170,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2942,7 +3179,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -2971,7 +3208,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3046,6 +3283,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,13 +3311,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3228,7 +3478,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/constraint/vendor/golang.org/x/net/http2/unencrypted.go b/constraint/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 000000000..b2de21161 --- /dev/null +++ b/constraint/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/constraint/vendor/golang.org/x/oauth2/LICENSE b/constraint/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/constraint/vendor/golang.org/x/oauth2/LICENSE +++ b/constraint/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/constraint/vendor/golang.org/x/oauth2/README.md b/constraint/vendor/golang.org/x/oauth2/README.md index 781770c20..48dbb9d84 100644 --- a/constraint/vendor/golang.org/x/oauth2/README.md +++ b/constraint/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/constraint/vendor/golang.org/x/oauth2/token.go b/constraint/vendor/golang.org/x/oauth2/token.go index 5bbb33217..109997d77 100644 --- a/constraint/vendor/golang.org/x/oauth2/token.go +++ b/constraint/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/constraint/vendor/golang.org/x/sys/unix/ioctl_linux.go b/constraint/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab..7ca4fa12a 100644 --- a/constraint/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/constraint/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/constraint/vendor/golang.org/x/sys/unix/mkerrors.sh b/constraint/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba..6ab02b6c3 100644 --- a/constraint/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/constraint/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/constraint/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/constraint/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2..be8c00207 100644 --- a/constraint/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/constraint/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/constraint/vendor/golang.org/x/sys/unix/syscall_linux.go b/constraint/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434..230a94549 100644 --- a/constraint/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/constraint/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/constraint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/constraint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1..7bf5c04bb 100644 --- a/constraint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/constraint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b46248..6ebc48b3f 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2625,6 +2633,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2948,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c0..c0d45e320 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -237,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -283,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -321,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f425..c731d24f0 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -237,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -284,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -322,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec7..680018a4a 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -289,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -327,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b..a63909f30 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -240,6 +242,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -280,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -318,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa..9b0a2573f 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -238,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -276,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -314,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf..958e6e064 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a76..50c7f25bd 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522..ced21d66d 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c646871..226c04419 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d63..3122737cd 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -237,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -337,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -375,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df73419..eb5d3467e 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -237,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -379,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 247913792..e921ebc60 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -237,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -379,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146e..38ba81c55 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -273,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -311,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d64439..71f040097 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -234,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -345,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -383,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1..c44a31332 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/constraint/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -239,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -336,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -422,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/constraint/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/constraint/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da557..5cc1e8eb2 100644 --- a/constraint/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/constraint/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d43..17c53bd9b 100644 --- a/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941a..2392226a7 100644 --- a/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/constraint/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/constraint/vendor/golang.org/x/sys/unix/ztypes_linux.go b/constraint/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e4549..5537148dc 100644 --- a/constraint/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/constraint/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2586,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3533,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3794,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3834,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3842,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4023,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4110,6 +4118,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4291,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/constraint/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/constraint/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af46..2e5d5a443 100644 --- a/constraint/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/constraint/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/constraint/vendor/golang.org/x/sys/windows/dll_windows.go b/constraint/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf63..3ca814f54 100644 --- a/constraint/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/constraint/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/constraint/vendor/golang.org/x/sys/windows/syscall_windows.go b/constraint/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a314..4a3254386 100644 --- a/constraint/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/constraint/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -725,20 +727,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +888,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1684,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/constraint/vendor/golang.org/x/sys/windows/types_windows.go b/constraint/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c..9d138de5f 100644 --- a/constraint/vendor/golang.org/x/sys/windows/types_windows.go +++ b/constraint/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -2203,6 +2204,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/constraint/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/constraint/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc0..01c0716c2 100644 --- a/constraint/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/constraint/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -275,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1606,6 +1613,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1653,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2393,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2409,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/constraint/vendor/golang.org/x/term/README.md b/constraint/vendor/golang.org/x/term/README.md index d03d0aefe..05ff623f9 100644 --- a/constraint/vendor/golang.org/x/term/README.md +++ b/constraint/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/constraint/vendor/golang.org/x/text/width/kind_string.go b/constraint/vendor/golang.org/x/text/width/kind_string.go deleted file mode 100644 index dd3febd43..000000000 --- a/constraint/vendor/golang.org/x/text/width/kind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package width - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Neutral-0] - _ = x[EastAsianAmbiguous-1] - _ = x[EastAsianWide-2] - _ = x[EastAsianNarrow-3] - _ = x[EastAsianFullwidth-4] - _ = x[EastAsianHalfwidth-5] -} - -const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" - -var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/constraint/vendor/golang.org/x/text/width/tables10.0.0.go b/constraint/vendor/golang.org/x/text/width/tables10.0.0.go deleted file mode 100644 index 07c1cb17a..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables10.0.0.go +++ /dev/null @@ -1,1328 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.10 && !go1.13 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "10.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/constraint/vendor/golang.org/x/text/width/tables11.0.0.go b/constraint/vendor/golang.org/x/text/width/tables11.0.0.go deleted file mode 100644 index 89288b3da..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables11.0.0.go +++ /dev/null @@ -1,1340 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.13 && !go1.14 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "11.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 101 blocks, 6464 entries, 12928 bytes -// The third block is the zero block. -var widthValues = [6464]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, - // Block 0x60, offset 0x1800 - 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, - 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, - 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, - 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x187a: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, - 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, - 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, - 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, - 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, - 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, - 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, - 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, - 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, - 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, - 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, - 0x193c: 0x2000, 0x193d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, - 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14936 bytes (14KiB) diff --git a/constraint/vendor/golang.org/x/text/width/tables12.0.0.go b/constraint/vendor/golang.org/x/text/width/tables12.0.0.go deleted file mode 100644 index 755ee9122..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables12.0.0.go +++ /dev/null @@ -1,1360 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.14 && !go1.16 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "12.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 104 blocks, 6656 entries, 13312 bytes -// The third block is the zero block. -var widthValues = [6656]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, - 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, - 0x1236: 0x4000, 0x1237: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, - 0x16fa: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1715: 0x4000, 0x1716: 0x4000, - 0x1724: 0x4000, - // Block 0x5d, offset 0x1740 - 0x177b: 0x4000, - 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, - 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, - 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d5: 0x4000, - 0x17eb: 0x4000, 0x17ec: 0x4000, - 0x17f4: 0x4000, 0x17f5: 0x4000, - 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, - // Block 0x60, offset 0x1800 - 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, - 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, - 0x182a: 0x4000, 0x182b: 0x4000, - // Block 0x61, offset 0x1840 - 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, - 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, - 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, - 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, - 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, - 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, - 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, - 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, - 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, - 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, - 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, - 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, - 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, - 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, - 0x19fc: 0x2000, 0x19fd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, - 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, - 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15320 bytes (14KiB) diff --git a/constraint/vendor/golang.org/x/text/width/tables13.0.0.go b/constraint/vendor/golang.org/x/text/width/tables13.0.0.go deleted file mode 100644 index 40c169edf..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables13.0.0.go +++ /dev/null @@ -1,1361 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.16 && !go1.21 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "13.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 105 blocks, 6720 entries, 13440 bytes -// The third block is the zero block. -var widthValues = [6720]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, - 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, - // Block 0x3e, offset 0xf80 - 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, - 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, - 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, - 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, - 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, - 0xfbc: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, - 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, - 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, - 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, - 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, - 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, - 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, - 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, - 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, - 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, - 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, - 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, - 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, - 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, - 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, - 0x106a: 0x4000, 0x106b: 0x4000, - // Block 0x42, offset 0x1080 - 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, - 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, - 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, - 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, - 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, - 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, - 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, - 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, - 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, - 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, - 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, - 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, - 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, - 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, - 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, - 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, - 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, - 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, - 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, - 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, - 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, - // Block 0x44, offset 0x1100 - 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, - 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, - 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, - 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, - 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, - 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, - 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, - 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, - 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, - 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, - 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, - // Block 0x45, offset 0x1140 - 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, - 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, - 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, - 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, - 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, - 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, - 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, - 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, - 0x117d: 0x2000, - // Block 0x46, offset 0x1180 - 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, - 0x11a4: 0x4000, - 0x11b0: 0x4000, 0x11b1: 0x4000, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, - 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, - 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, - 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, - 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, - 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, - 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, - 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, - 0x11f6: 0x4000, 0x11f7: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, - 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, - 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, - 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, - 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, - 0x129e: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, - 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, - 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, - 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, - 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, - 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, - 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, - 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, - 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, - 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1344: 0x4000, - // Block 0x4e, offset 0x1380 - 0x138f: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, - 0x13d0: 0x2000, 0x13d1: 0x2000, - 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, - 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, - 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, - 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, - 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, - 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, - 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, - 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, - 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, - 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, - 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, - 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, - 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, - 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, - 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, - 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f4: 0x4000, - 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bf: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, - 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, - 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, - 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, - 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, - // Block 0x5c, offset 0x1700 - 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, - 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, - 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, - 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, - 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, - 0x173a: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1755: 0x4000, 0x1756: 0x4000, - 0x1764: 0x4000, - // Block 0x5e, offset 0x1780 - 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, - 0x182b: 0x4000, 0x182c: 0x4000, - 0x1834: 0x4000, 0x1835: 0x4000, - 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, - 0x183c: 0x4000, - // Block 0x61, offset 0x1840 - 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, - 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, - 0x186a: 0x4000, 0x186b: 0x4000, - // Block 0x62, offset 0x1880 - 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, - 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, - 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, - 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, - 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, - 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, - 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, - 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, - 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, - 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, - 0x1986: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, - 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, - 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, - 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, - 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, - 0x19b6: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, - 0x19d0: 0x4000, 0x19d1: 0x4000, - 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, - 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, - 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, - 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, - 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, - 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, - 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, - 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, - 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, - 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, - 0x1a3c: 0x2000, 0x1a3d: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, - 0x265: 0x3c, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, - // Block 0xd, offset 0x340 - 0x37f: 0x44, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, - 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, - 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, - // Block 0x10, offset 0x400 - 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, - 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, - 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, - 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, - 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, - 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15448 bytes (15KiB) diff --git a/constraint/vendor/golang.org/x/text/width/tables15.0.0.go b/constraint/vendor/golang.org/x/text/width/tables15.0.0.go deleted file mode 100644 index 2b8528967..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables15.0.0.go +++ /dev/null @@ -1,1367 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build go1.21 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "15.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14912 bytes (14.56 KiB). Checksum: 4468b6cd178303d2. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 105 blocks, 6720 entries, 13440 bytes -// The third block is the zero block. -var widthValues = [6720]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, - 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, - // Block 0x3e, offset 0xf80 - 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, - 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, - 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, - 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, - 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, - 0xfbc: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, - 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, - 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, - 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, - 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, - 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, - 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, - 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, - 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, - 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, - 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, - 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, - 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, - 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, - 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, - 0x106a: 0x4000, 0x106b: 0x4000, - // Block 0x42, offset 0x1080 - 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, - 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, - 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, - 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, - 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, - 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, - 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, - 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, - 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, - 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, - 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, - 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, - 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, - 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, - 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, - 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, - 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, - 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, - 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, - 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, - 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, - // Block 0x44, offset 0x1100 - 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, - 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, - 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, - 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, - 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, - 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, - 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, - 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, - 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, - 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, - 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, - // Block 0x45, offset 0x1140 - 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, - 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, - 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, - 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, - 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, - 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, - 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, - 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, - 0x117d: 0x2000, - // Block 0x46, offset 0x1180 - 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, - 0x11a4: 0x4000, - 0x11b0: 0x4000, 0x11b1: 0x4000, - // Block 0x47, offset 0x11c0 - 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, - 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, - 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, - 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, - 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, - 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, - 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, - 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, - 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, - 0x11f6: 0x4000, 0x11f7: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, - // Block 0x4a, offset 0x1280 - 0x12b0: 0x4000, 0x12b1: 0x4000, 0x12b2: 0x4000, 0x12b3: 0x4000, 0x12b5: 0x4000, - 0x12b6: 0x4000, 0x12b7: 0x4000, 0x12b8: 0x4000, 0x12b9: 0x4000, 0x12ba: 0x4000, 0x12bb: 0x4000, - 0x12bd: 0x4000, 0x12be: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x4000, 0x12c1: 0x4000, 0x12c2: 0x4000, 0x12c3: 0x4000, 0x12c4: 0x4000, 0x12c5: 0x4000, - 0x12c6: 0x4000, 0x12c7: 0x4000, 0x12c8: 0x4000, 0x12c9: 0x4000, 0x12ca: 0x4000, 0x12cb: 0x4000, - 0x12cc: 0x4000, 0x12cd: 0x4000, 0x12ce: 0x4000, 0x12cf: 0x4000, 0x12d0: 0x4000, 0x12d1: 0x4000, - 0x12d2: 0x4000, 0x12d3: 0x4000, 0x12d4: 0x4000, 0x12d5: 0x4000, 0x12d6: 0x4000, 0x12d7: 0x4000, - 0x12d8: 0x4000, 0x12d9: 0x4000, 0x12da: 0x4000, 0x12db: 0x4000, 0x12dc: 0x4000, 0x12dd: 0x4000, - 0x12de: 0x4000, 0x12df: 0x4000, 0x12e0: 0x4000, 0x12e1: 0x4000, 0x12e2: 0x4000, - 0x12f2: 0x4000, - // Block 0x4c, offset 0x1300 - 0x1310: 0x4000, 0x1311: 0x4000, - 0x1312: 0x4000, 0x1315: 0x4000, - 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, - 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, - 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, - 0x133c: 0x4000, 0x133d: 0x4000, 0x133e: 0x4000, 0x133f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x4000, 0x1341: 0x4000, 0x1342: 0x4000, 0x1343: 0x4000, 0x1344: 0x4000, 0x1345: 0x4000, - 0x1346: 0x4000, 0x1347: 0x4000, 0x1348: 0x4000, 0x1349: 0x4000, 0x134a: 0x4000, 0x134b: 0x4000, - 0x134c: 0x4000, 0x134d: 0x4000, 0x134e: 0x4000, 0x134f: 0x4000, 0x1350: 0x4000, 0x1351: 0x4000, - 0x1352: 0x4000, 0x1353: 0x4000, 0x1354: 0x4000, 0x1355: 0x4000, 0x1356: 0x4000, 0x1357: 0x4000, - 0x1358: 0x4000, 0x1359: 0x4000, 0x135a: 0x4000, 0x135b: 0x4000, 0x135c: 0x4000, 0x135d: 0x4000, - 0x135e: 0x4000, 0x135f: 0x4000, 0x1360: 0x4000, 0x1361: 0x4000, 0x1362: 0x4000, 0x1363: 0x4000, - 0x1364: 0x4000, 0x1365: 0x4000, 0x1366: 0x4000, 0x1367: 0x4000, 0x1368: 0x4000, 0x1369: 0x4000, - 0x136a: 0x4000, 0x136b: 0x4000, 0x136c: 0x4000, 0x136d: 0x4000, 0x136e: 0x4000, 0x136f: 0x4000, - 0x1370: 0x4000, 0x1371: 0x4000, 0x1372: 0x4000, 0x1373: 0x4000, 0x1374: 0x4000, 0x1375: 0x4000, - 0x1376: 0x4000, 0x1377: 0x4000, 0x1378: 0x4000, 0x1379: 0x4000, 0x137a: 0x4000, 0x137b: 0x4000, - // Block 0x4e, offset 0x1380 - 0x1384: 0x4000, - // Block 0x4f, offset 0x13c0 - 0x13cf: 0x4000, - // Block 0x50, offset 0x1400 - 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, - 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, - 0x1410: 0x2000, 0x1411: 0x2000, - 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, - 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, - 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, - 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, - 0x142a: 0x2000, 0x142b: 0x2000, 0x142c: 0x2000, 0x142d: 0x2000, - 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, - 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, - 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, - // Block 0x51, offset 0x1440 - 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, - 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, - 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x2000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x2000, - 0x1452: 0x2000, 0x1453: 0x2000, 0x1454: 0x2000, 0x1455: 0x2000, 0x1456: 0x2000, 0x1457: 0x2000, - 0x1458: 0x2000, 0x1459: 0x2000, 0x145a: 0x2000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, - 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, - 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, - 0x1470: 0x2000, 0x1471: 0x2000, 0x1472: 0x2000, 0x1473: 0x2000, 0x1474: 0x2000, 0x1475: 0x2000, - 0x1476: 0x2000, 0x1477: 0x2000, 0x1478: 0x2000, 0x1479: 0x2000, 0x147a: 0x2000, 0x147b: 0x2000, - 0x147c: 0x2000, 0x147d: 0x2000, 0x147e: 0x2000, 0x147f: 0x2000, - // Block 0x52, offset 0x1480 - 0x1480: 0x2000, 0x1481: 0x2000, 0x1482: 0x2000, 0x1483: 0x2000, 0x1484: 0x2000, 0x1485: 0x2000, - 0x1486: 0x2000, 0x1487: 0x2000, 0x1488: 0x2000, 0x1489: 0x2000, 0x148a: 0x2000, 0x148b: 0x2000, - 0x148c: 0x2000, 0x148d: 0x2000, 0x148e: 0x4000, 0x148f: 0x2000, 0x1490: 0x2000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x2000, 0x149c: 0x2000, 0x149d: 0x2000, - 0x149e: 0x2000, 0x149f: 0x2000, 0x14a0: 0x2000, 0x14a1: 0x2000, 0x14a2: 0x2000, 0x14a3: 0x2000, - 0x14a4: 0x2000, 0x14a5: 0x2000, 0x14a6: 0x2000, 0x14a7: 0x2000, 0x14a8: 0x2000, 0x14a9: 0x2000, - 0x14aa: 0x2000, 0x14ab: 0x2000, 0x14ac: 0x2000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, - 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, - 0x1510: 0x4000, 0x1511: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, - 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, - 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, - 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, - 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, - 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, - 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, - 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, - 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1634: 0x4000, - 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, - // Block 0x59, offset 0x1640 - 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, - 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, - 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, - 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, - 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, - 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1680: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, - 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, - 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, - 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, - 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, - 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, - 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, - 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, - 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, - 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, - 0x16bc: 0x4000, 0x16bd: 0x4000, 0x16be: 0x4000, 0x16bf: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, - 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, - 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, - 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, - 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, - 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, - 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, - 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, - 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, - 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, - 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, - 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, - 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, - 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, 0x1728: 0x4000, 0x1729: 0x4000, - 0x172a: 0x4000, 0x172b: 0x4000, 0x172c: 0x4000, 0x172d: 0x4000, 0x172e: 0x4000, 0x172f: 0x4000, - 0x1730: 0x4000, 0x1731: 0x4000, 0x1732: 0x4000, 0x1733: 0x4000, 0x1734: 0x4000, 0x1735: 0x4000, - 0x1736: 0x4000, 0x1737: 0x4000, 0x1738: 0x4000, 0x1739: 0x4000, 0x173a: 0x4000, 0x173b: 0x4000, - 0x173c: 0x4000, 0x173d: 0x4000, - // Block 0x5d, offset 0x1740 - 0x174b: 0x4000, - 0x174c: 0x4000, 0x174d: 0x4000, 0x174e: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, 0x1753: 0x4000, 0x1754: 0x4000, 0x1755: 0x4000, 0x1756: 0x4000, 0x1757: 0x4000, - 0x1758: 0x4000, 0x1759: 0x4000, 0x175a: 0x4000, 0x175b: 0x4000, 0x175c: 0x4000, 0x175d: 0x4000, - 0x175e: 0x4000, 0x175f: 0x4000, 0x1760: 0x4000, 0x1761: 0x4000, 0x1762: 0x4000, 0x1763: 0x4000, - 0x1764: 0x4000, 0x1765: 0x4000, 0x1766: 0x4000, 0x1767: 0x4000, - 0x177a: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1795: 0x4000, 0x1796: 0x4000, - 0x17a4: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17fb: 0x4000, - 0x17fc: 0x4000, 0x17fd: 0x4000, 0x17fe: 0x4000, 0x17ff: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, - 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, - 0x1852: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, - 0x185c: 0x4000, 0x185d: 0x4000, - 0x185e: 0x4000, 0x185f: 0x4000, - 0x186b: 0x4000, 0x186c: 0x4000, - 0x1874: 0x4000, 0x1875: 0x4000, - 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, - 0x187c: 0x4000, - // Block 0x62, offset 0x1880 - 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, - 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, - 0x18aa: 0x4000, 0x18ab: 0x4000, - 0x18b0: 0x4000, - // Block 0x63, offset 0x18c0 - 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, - 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, - 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, - 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, - 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, - 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, - 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, - 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, - 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, - // Block 0x64, offset 0x1900 - 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, - 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, - 0x190c: 0x4000, 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, - 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, - 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, - 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, - 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, - 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, - 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, - 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, - 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, - // Block 0x65, offset 0x1940 - 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, 0x1975: 0x4000, - 0x1976: 0x4000, 0x1977: 0x4000, 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, 0x197b: 0x4000, - 0x197c: 0x4000, - // Block 0x66, offset 0x1980 - 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, - 0x1986: 0x4000, 0x1987: 0x4000, 0x1988: 0x4000, - 0x1990: 0x4000, 0x1991: 0x4000, - 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, - 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, - 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, - 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, 0x19a9: 0x4000, - 0x19aa: 0x4000, 0x19ab: 0x4000, 0x19ac: 0x4000, 0x19ad: 0x4000, 0x19ae: 0x4000, 0x19af: 0x4000, - 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, - 0x19b6: 0x4000, 0x19b7: 0x4000, 0x19b8: 0x4000, 0x19b9: 0x4000, 0x19ba: 0x4000, 0x19bb: 0x4000, - 0x19bc: 0x4000, 0x19bd: 0x4000, 0x19bf: 0x4000, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, 0x19c3: 0x4000, 0x19c4: 0x4000, 0x19c5: 0x4000, - 0x19ce: 0x4000, 0x19cf: 0x4000, 0x19d0: 0x4000, 0x19d1: 0x4000, - 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, 0x19d7: 0x4000, - 0x19d8: 0x4000, 0x19d9: 0x4000, 0x19da: 0x4000, 0x19db: 0x4000, - 0x19e0: 0x4000, 0x19e1: 0x4000, 0x19e2: 0x4000, 0x19e3: 0x4000, - 0x19e4: 0x4000, 0x19e5: 0x4000, 0x19e6: 0x4000, 0x19e7: 0x4000, 0x19e8: 0x4000, - 0x19f0: 0x4000, 0x19f1: 0x4000, 0x19f2: 0x4000, 0x19f3: 0x4000, 0x19f4: 0x4000, 0x19f5: 0x4000, - 0x19f6: 0x4000, 0x19f7: 0x4000, 0x19f8: 0x4000, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, - 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, - 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, - 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, - 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, - 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, - 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, - 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, - 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, - 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, - 0x1a3c: 0x2000, 0x1a3d: 0x2000, -} - -// widthIndex: 23 blocks, 1472 entries, 1472 bytes -// Block 0 is the zero block. -var widthIndex = [1472]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x10, 0xf3: 0x13, 0xf4: 0x14, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, - 0x265: 0x3c, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, - // Block 0xd, offset 0x340 - 0x37f: 0x44, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, - 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, - // Block 0xf, offset 0x3c0 - 0x3ff: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x0e, 0x401: 0x0e, 0x402: 0x0e, 0x403: 0x0e, 0x404: 0x49, 0x405: 0x4a, 0x406: 0x0e, 0x407: 0x0e, - 0x408: 0x0e, 0x409: 0x0e, 0x40a: 0x0e, 0x40b: 0x4b, - // Block 0x11, offset 0x440 - 0x440: 0x4c, 0x443: 0x4d, 0x444: 0x4e, 0x445: 0x4f, 0x446: 0x50, - 0x448: 0x51, 0x449: 0x52, 0x44c: 0x53, 0x44d: 0x54, 0x44e: 0x55, 0x44f: 0x56, - 0x450: 0x57, 0x451: 0x58, 0x452: 0x0e, 0x453: 0x59, 0x454: 0x5a, 0x455: 0x5b, 0x456: 0x5c, 0x457: 0x5d, - 0x458: 0x0e, 0x459: 0x5e, 0x45a: 0x0e, 0x45b: 0x5f, 0x45f: 0x60, - 0x464: 0x61, 0x465: 0x62, 0x466: 0x0e, 0x467: 0x0e, - 0x469: 0x63, 0x46a: 0x64, 0x46b: 0x65, - // Block 0x12, offset 0x480 - 0x496: 0x0b, 0x497: 0x06, - 0x498: 0x0c, 0x49a: 0x0d, 0x49b: 0x0e, 0x49f: 0x0f, - 0x4a0: 0x06, 0x4a1: 0x06, 0x4a2: 0x06, 0x4a3: 0x06, 0x4a4: 0x06, 0x4a5: 0x06, 0x4a6: 0x06, 0x4a7: 0x06, - 0x4a8: 0x06, 0x4a9: 0x06, 0x4aa: 0x06, 0x4ab: 0x06, 0x4ac: 0x06, 0x4ad: 0x06, 0x4ae: 0x06, 0x4af: 0x06, - 0x4b0: 0x06, 0x4b1: 0x06, 0x4b2: 0x06, 0x4b3: 0x06, 0x4b4: 0x06, 0x4b5: 0x06, 0x4b6: 0x06, 0x4b7: 0x06, - 0x4b8: 0x06, 0x4b9: 0x06, 0x4ba: 0x06, 0x4bb: 0x06, 0x4bc: 0x06, 0x4bd: 0x06, 0x4be: 0x06, 0x4bf: 0x06, - // Block 0x13, offset 0x4c0 - 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x09, - // Block 0x14, offset 0x500 - 0x500: 0x08, 0x501: 0x08, 0x502: 0x08, 0x503: 0x08, 0x504: 0x08, 0x505: 0x08, 0x506: 0x08, 0x507: 0x08, - 0x508: 0x08, 0x509: 0x08, 0x50a: 0x08, 0x50b: 0x08, 0x50c: 0x08, 0x50d: 0x08, 0x50e: 0x08, 0x50f: 0x08, - 0x510: 0x08, 0x511: 0x08, 0x512: 0x08, 0x513: 0x08, 0x514: 0x08, 0x515: 0x08, 0x516: 0x08, 0x517: 0x08, - 0x518: 0x08, 0x519: 0x08, 0x51a: 0x08, 0x51b: 0x08, 0x51c: 0x08, 0x51d: 0x08, 0x51e: 0x08, 0x51f: 0x08, - 0x520: 0x08, 0x521: 0x08, 0x522: 0x08, 0x523: 0x08, 0x524: 0x08, 0x525: 0x08, 0x526: 0x08, 0x527: 0x08, - 0x528: 0x08, 0x529: 0x08, 0x52a: 0x08, 0x52b: 0x08, 0x52c: 0x08, 0x52d: 0x08, 0x52e: 0x08, 0x52f: 0x08, - 0x530: 0x08, 0x531: 0x08, 0x532: 0x08, 0x533: 0x08, 0x534: 0x08, 0x535: 0x08, 0x536: 0x08, 0x537: 0x08, - 0x538: 0x08, 0x539: 0x08, 0x53a: 0x08, 0x53b: 0x08, 0x53c: 0x08, 0x53d: 0x08, 0x53e: 0x08, 0x53f: 0x66, - // Block 0x15, offset 0x540 - 0x560: 0x11, - 0x570: 0x09, 0x571: 0x09, 0x572: 0x09, 0x573: 0x09, 0x574: 0x09, 0x575: 0x09, 0x576: 0x09, 0x577: 0x09, - 0x578: 0x09, 0x579: 0x09, 0x57a: 0x09, 0x57b: 0x09, 0x57c: 0x09, 0x57d: 0x09, 0x57e: 0x09, 0x57f: 0x12, - // Block 0x16, offset 0x580 - 0x580: 0x09, 0x581: 0x09, 0x582: 0x09, 0x583: 0x09, 0x584: 0x09, 0x585: 0x09, 0x586: 0x09, 0x587: 0x09, - 0x588: 0x09, 0x589: 0x09, 0x58a: 0x09, 0x58b: 0x09, 0x58c: 0x09, 0x58d: 0x09, 0x58e: 0x09, 0x58f: 0x12, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 15512 bytes (15KiB) diff --git a/constraint/vendor/golang.org/x/text/width/tables9.0.0.go b/constraint/vendor/golang.org/x/text/width/tables9.0.0.go deleted file mode 100644 index d981330a9..000000000 --- a/constraint/vendor/golang.org/x/text/width/tables9.0.0.go +++ /dev/null @@ -1,1296 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -//go:build !go1.10 - -package width - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *widthTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return widthValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := widthIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = widthIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = widthIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *widthTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return widthValues[c0] - } - i := widthIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = widthIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = widthIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. -type widthTrie struct{} - -func newWidthTrie(i int) *widthTrie { - return &widthTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { - switch { - default: - return uint16(widthValues[n<<6+uint32(b)]) - } -} - -// widthValues: 99 blocks, 6336 entries, 12672 bytes -// The third block is the zero block. -var widthValues = [6336]uint16{ - // Block 0x0, offset 0x0 - 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, - 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, - 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, - 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, - 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, - 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, - // Block 0x1, offset 0x40 - 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, - 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, - 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, - 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, - 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, - 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, - 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, - 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, - 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, - 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, - 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, - 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, - 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, - 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, - 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, - 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, - // Block 0x4, offset 0x100 - 0x106: 0x2000, - 0x110: 0x2000, - 0x117: 0x2000, - 0x118: 0x2000, - 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, - 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, - 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, - 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, - 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, - 0x13c: 0x2000, 0x13e: 0x2000, - // Block 0x5, offset 0x140 - 0x141: 0x2000, - 0x151: 0x2000, - 0x153: 0x2000, - 0x15b: 0x2000, - 0x166: 0x2000, 0x167: 0x2000, - 0x16b: 0x2000, - 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, - 0x178: 0x2000, - 0x17f: 0x2000, - // Block 0x6, offset 0x180 - 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, - 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, - 0x18d: 0x2000, - 0x192: 0x2000, 0x193: 0x2000, - 0x1a6: 0x2000, 0x1a7: 0x2000, - 0x1ab: 0x2000, - // Block 0x7, offset 0x1c0 - 0x1ce: 0x2000, 0x1d0: 0x2000, - 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, - 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, - // Block 0x8, offset 0x200 - 0x211: 0x2000, - 0x221: 0x2000, - // Block 0x9, offset 0x240 - 0x244: 0x2000, - 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, - 0x24d: 0x2000, 0x250: 0x2000, - 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, - 0x25f: 0x2000, - // Block 0xa, offset 0x280 - 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, - 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, - 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, - 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, - 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, - 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, - 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, - 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, - 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, - 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, - 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, - 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, - 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, - 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, - 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, - 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, - 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, - 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, - // Block 0xc, offset 0x300 - 0x311: 0x2000, - 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, - 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, - 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, - 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, - 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, - 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, - 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, - // Block 0xd, offset 0x340 - 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, - 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, - // Block 0xe, offset 0x380 - 0x381: 0x2000, - 0x390: 0x2000, 0x391: 0x2000, - 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, - 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, - 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, - 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, - 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, - 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, - 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, - 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, - 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, - 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, - // Block 0x10, offset 0x400 - 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, - 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, - 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, - 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, - 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, - 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, - 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, - 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, - 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, - 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, - 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, - // Block 0x11, offset 0x440 - 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, - 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, - 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, - 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, - 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, - 0x45e: 0x4000, 0x45f: 0x4000, - // Block 0x12, offset 0x480 - 0x490: 0x2000, - 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, - 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, - 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, - 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, - 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, - 0x4bb: 0x2000, - 0x4be: 0x2000, - // Block 0x13, offset 0x4c0 - 0x4f4: 0x2000, - 0x4ff: 0x2000, - // Block 0x14, offset 0x500 - 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, - 0x529: 0xa009, - 0x52c: 0x2000, - // Block 0x15, offset 0x540 - 0x543: 0x2000, 0x545: 0x2000, - 0x549: 0x2000, - 0x553: 0x2000, 0x556: 0x2000, - 0x561: 0x2000, 0x562: 0x2000, - 0x566: 0x2000, - 0x56b: 0x2000, - // Block 0x16, offset 0x580 - 0x593: 0x2000, 0x594: 0x2000, - 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, - 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, - 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, - 0x5aa: 0x2000, 0x5ab: 0x2000, - 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, - 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, - // Block 0x17, offset 0x5c0 - 0x5c9: 0x2000, - 0x5d0: 0x200a, 0x5d1: 0x200b, - 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, - 0x5d8: 0x2000, 0x5d9: 0x2000, - 0x5f8: 0x2000, 0x5f9: 0x2000, - // Block 0x18, offset 0x600 - 0x612: 0x2000, 0x614: 0x2000, - 0x627: 0x2000, - // Block 0x19, offset 0x640 - 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, - 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, - 0x64f: 0x2000, 0x651: 0x2000, - 0x655: 0x2000, - 0x65a: 0x2000, 0x65d: 0x2000, - 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, - 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, - 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, - 0x674: 0x2000, 0x675: 0x2000, - 0x676: 0x2000, 0x677: 0x2000, - 0x67c: 0x2000, 0x67d: 0x2000, - // Block 0x1a, offset 0x680 - 0x688: 0x2000, - 0x68c: 0x2000, - 0x692: 0x2000, - 0x6a0: 0x2000, 0x6a1: 0x2000, - 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, - 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, - // Block 0x1b, offset 0x6c0 - 0x6c2: 0x2000, 0x6c3: 0x2000, - 0x6c6: 0x2000, 0x6c7: 0x2000, - 0x6d5: 0x2000, - 0x6d9: 0x2000, - 0x6e5: 0x2000, - 0x6ff: 0x2000, - // Block 0x1c, offset 0x700 - 0x712: 0x2000, - 0x71a: 0x4000, 0x71b: 0x4000, - 0x729: 0x4000, - 0x72a: 0x4000, - // Block 0x1d, offset 0x740 - 0x769: 0x4000, - 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, - 0x770: 0x4000, 0x773: 0x4000, - // Block 0x1e, offset 0x780 - 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, - 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, - 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, - 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, - 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, - 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, - 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, - 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, - 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, - 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, - 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, - 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, - 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, - 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, - 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, - 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, - // Block 0x20, offset 0x800 - 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, - 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, - 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, - 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, - 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, - 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, - 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, - 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, - 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, - 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, - 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, - // Block 0x21, offset 0x840 - 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, - 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, - 0x850: 0x2000, 0x851: 0x2000, - 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, - 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, - 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, - 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, - 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, - 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, - // Block 0x22, offset 0x880 - 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, - 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, - 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, - 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, - 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, - 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, - 0x8b2: 0x2000, 0x8b3: 0x2000, - 0x8b6: 0x2000, 0x8b7: 0x2000, - 0x8bc: 0x2000, 0x8bd: 0x2000, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x2000, 0x8c1: 0x2000, - 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, - 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, - 0x8e2: 0x2000, 0x8e3: 0x2000, - 0x8e4: 0x2000, 0x8e5: 0x2000, - 0x8ef: 0x2000, - 0x8fd: 0x4000, 0x8fe: 0x4000, - // Block 0x24, offset 0x900 - 0x905: 0x2000, - 0x906: 0x2000, 0x909: 0x2000, - 0x90e: 0x2000, 0x90f: 0x2000, - 0x914: 0x4000, 0x915: 0x4000, - 0x91c: 0x2000, - 0x91e: 0x2000, - // Block 0x25, offset 0x940 - 0x940: 0x2000, 0x942: 0x2000, - 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, - 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, - 0x952: 0x4000, 0x953: 0x4000, - 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, - 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, - 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, - 0x97f: 0x4000, - // Block 0x26, offset 0x980 - 0x993: 0x4000, - 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, - 0x9aa: 0x4000, 0x9ab: 0x4000, - 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, - // Block 0x27, offset 0x9c0 - 0x9c4: 0x4000, 0x9c5: 0x4000, - 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, - 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, - 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, - 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, - 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, - 0x9e8: 0x2000, 0x9e9: 0x2000, - 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, - 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, - 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, - 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, - // Block 0x28, offset 0xa00 - 0xa05: 0x4000, - 0xa0a: 0x4000, 0xa0b: 0x4000, - 0xa28: 0x4000, - 0xa3d: 0x2000, - // Block 0x29, offset 0xa40 - 0xa4c: 0x4000, 0xa4e: 0x4000, - 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, - 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, - 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, - // Block 0x2a, offset 0xa80 - 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, - 0xab0: 0x4000, - 0xabf: 0x4000, - // Block 0x2b, offset 0xac0 - 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, - 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, - // Block 0x2c, offset 0xb00 - 0xb05: 0x6010, - 0xb06: 0x6011, - // Block 0x2d, offset 0xb40 - 0xb5b: 0x4000, 0xb5c: 0x4000, - // Block 0x2e, offset 0xb80 - 0xb90: 0x4000, - 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, - 0xb98: 0x2000, 0xb99: 0x2000, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, - 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, - 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, - 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, - 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, - 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, - 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, - 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, - 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, - 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, - 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, - // Block 0x30, offset 0xc00 - 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, - 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, - 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, - 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, - 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, - 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, - 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, - 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, - 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, - // Block 0x31, offset 0xc40 - 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, - 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, - 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, - 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, - 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, - 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, - // Block 0x32, offset 0xc80 - 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, - 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, - 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, - 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, - 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, - 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, - 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, - 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, - 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, - 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, - 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, - // Block 0x33, offset 0xcc0 - 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, - 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, - 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, - 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, - 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, - 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, - 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, - 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, - 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, - 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, - 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, - // Block 0x34, offset 0xd00 - 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, - 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, - 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, - 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, - 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, - 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, - 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, - 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, - 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, - 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, - 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, - // Block 0x35, offset 0xd40 - 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, - 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, - 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, - 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, - 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, - 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, - 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, - 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, - 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, - 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, - 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, - // Block 0x36, offset 0xd80 - 0xd85: 0x4000, - 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, - 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, - 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, - 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, - 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, - 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, - 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, - 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, - 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, - 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, - 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, - 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, - 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, - 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, - 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, - 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, - 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, - 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, - 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, - // Block 0x38, offset 0xe00 - 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, - 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, - 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, - 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, - 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, - 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, - 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, - 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, - 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, - 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, - // Block 0x39, offset 0xe40 - 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, - 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, - 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, - 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, - 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, - 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, - 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, - 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, - 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, - // Block 0x3a, offset 0xe80 - 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, - 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, - 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, - 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, - 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, - 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, - 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, - 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, - 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, - 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, - 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, - // Block 0x3b, offset 0xec0 - 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, - 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, - 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, - 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, - 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, - 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, - 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, - 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, - 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, - 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, - 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, - // Block 0x3c, offset 0xf00 - 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, - 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, - 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, - 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, - 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, - 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, - 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, - 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, - 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, - 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, - 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, - // Block 0x3d, offset 0xf40 - 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, - 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, - 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, - 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, - 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, - 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, - 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, - 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, - 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, - 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, - 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, - // Block 0x3e, offset 0xf80 - 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, - 0xf86: 0x4000, - // Block 0x3f, offset 0xfc0 - 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, - 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, - 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, - 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, - 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, - 0xffc: 0x4000, - // Block 0x40, offset 0x1000 - 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, - 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, - 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, - 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, - 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, - 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, - // Block 0x41, offset 0x1040 - 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, - 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, - 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, - 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, - 0x1058: 0x4000, 0x1059: 0x4000, - 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, - 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, - 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, - // Block 0x42, offset 0x1080 - 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, - 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, - 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, - 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, - 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, - 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, - 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, - 0x10aa: 0x4000, 0x10ab: 0x4000, - // Block 0x43, offset 0x10c0 - 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, - 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, - 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, - 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, - 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, - 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, - 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, - 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, - 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, - 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, - 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, - // Block 0x44, offset 0x1100 - 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, - 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, - 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, - 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, - 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, - 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, - 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, - 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, - 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, - 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, - 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, - // Block 0x45, offset 0x1140 - 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, - 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, - 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, - 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, - 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, - 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, - 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, - 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, - 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, - 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, - 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, - // Block 0x46, offset 0x1180 - 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, - 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, - 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, - 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, - 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, - 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, - 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, - 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, - 0x11bd: 0x2000, - // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, - // Block 0x48, offset 0x1200 - 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, - 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, - 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, - 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, - 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, - 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, - 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, - 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, - // Block 0x49, offset 0x1240 - 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, - 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, - 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, - 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, - 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, - 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, - 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, - 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, - 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, - // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, - // Block 0x4b, offset 0x12c0 - 0x12c4: 0x4000, - // Block 0x4c, offset 0x1300 - 0x130f: 0x4000, - // Block 0x4d, offset 0x1340 - 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, - 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, - 0x1350: 0x2000, 0x1351: 0x2000, - 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, - 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, - 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, - 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, - 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, - 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, - 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, - 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, - // Block 0x4e, offset 0x1380 - 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, - 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, - 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, - 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, - 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, - 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, - 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, - 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, - 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, - 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, - 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, - 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, - 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, - 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, - 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, - // Block 0x50, offset 0x1400 - 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, - 0x1410: 0x4000, 0x1411: 0x4000, - 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, - 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, - 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, - 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, - 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, - 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, - 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, - // Block 0x51, offset 0x1440 - 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, - 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, - 0x1450: 0x4000, 0x1451: 0x4000, - // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, - 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, - 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, - 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, - 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, - 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, - 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, - 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, - // Block 0x54, offset 0x1500 - 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, - 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, - 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, - 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, - 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, - 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, - // Block 0x55, offset 0x1540 - 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, - 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, - 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, - 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, - 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1574: 0x4000, - 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, - // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, - 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, - 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, - 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, - 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, - 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, - 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, - 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, - // Block 0x57, offset 0x15c0 - 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, - 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, - 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, - 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, - 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15ff: 0x4000, - // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, - 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, - 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, - 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, - 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, - 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, - 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, - 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, - 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, - 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, - // Block 0x59, offset 0x1640 - 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, - 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, - 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, - 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, - 0x167a: 0x4000, - // Block 0x5a, offset 0x1680 - 0x1695: 0x4000, 0x1696: 0x4000, - 0x16a4: 0x4000, - // Block 0x5b, offset 0x16c0 - 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, - // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, - // Block 0x5d, offset 0x1740 - 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, - 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, - 0x176b: 0x4000, 0x176c: 0x4000, - 0x1774: 0x4000, 0x1775: 0x4000, - 0x1776: 0x4000, - // Block 0x5e, offset 0x1780 - 0x1790: 0x4000, 0x1791: 0x4000, - 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, - 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, - 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, - 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, - 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, - 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, - 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, - 0x17de: 0x4000, - // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, - // Block 0x61, offset 0x1840 - 0x1840: 0x4000, - // Block 0x62, offset 0x1880 - 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, - 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, - 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, - 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, - 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, - 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, - 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, - 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, - 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, - 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, - 0x18bc: 0x2000, 0x18bd: 0x2000, -} - -// widthIndex: 22 blocks, 1408 entries, 1408 bytes -// Block 0 is the zero block. -var widthIndex = [1408]uint8{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, - 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, - 0xd0: 0x0c, 0xd1: 0x0d, - 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, - 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, - 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, - // Block 0x4, offset 0x100 - 0x104: 0x0e, 0x105: 0x0f, - // Block 0x5, offset 0x140 - 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, - 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, - 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, - 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, - 0x166: 0x2a, - 0x16c: 0x2b, 0x16d: 0x2c, - 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, - // Block 0x6, offset 0x180 - 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, - 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, - 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, - 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, - 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, - 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, - 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, - 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, - 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, - 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, - 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, - 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, - 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, - 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, - 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, - // Block 0x8, offset 0x200 - 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, - 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, - 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, - 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, - 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, - 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, - 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, - 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, - // Block 0x9, offset 0x240 - 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, - 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, - 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, - 0x265: 0x3d, - 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, - 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, - // Block 0xa, offset 0x280 - 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, - 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, - 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, - 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, - 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, - 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, - 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, - 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, - 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, - 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, - 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, - // Block 0xc, offset 0x300 - 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, - 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, - 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, - 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, - 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, - 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, - 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, - // Block 0xd, offset 0x340 - 0x37f: 0x45, - // Block 0xe, offset 0x380 - 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, - 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, - 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, - 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, - 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, - 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x48, - // Block 0x10, offset 0x400 - 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, - 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, - 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, - 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, - 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, - // Block 0x11, offset 0x440 - 0x456: 0x0b, 0x457: 0x06, - 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, - 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, - 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, - 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, - 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, - // Block 0x12, offset 0x480 - 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, - 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, - 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, - 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, - 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, - 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, - 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, - // Block 0x14, offset 0x500 - 0x520: 0x10, - 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, - 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, - // Block 0x15, offset 0x540 - 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, - 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, -} - -// inverseData contains 4-byte entries of the following format: -// -// <0 padding> -// -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// -// { 0x01, 0xE0, 0x00, 0x00 } -// -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// -// E0 ^ A1 = 41. -// -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// -// E0 ^ A2 = 42. -// -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8. -var inverseData = [150][4]byte{ - {0x00, 0x00, 0x00, 0x00}, - {0x03, 0xe3, 0x80, 0xa0}, - {0x03, 0xef, 0xbc, 0xa0}, - {0x03, 0xef, 0xbc, 0xe0}, - {0x03, 0xef, 0xbd, 0xe0}, - {0x03, 0xef, 0xbf, 0x02}, - {0x03, 0xef, 0xbf, 0x00}, - {0x03, 0xef, 0xbf, 0x0e}, - {0x03, 0xef, 0xbf, 0x0c}, - {0x03, 0xef, 0xbf, 0x0f}, - {0x03, 0xef, 0xbf, 0x39}, - {0x03, 0xef, 0xbf, 0x3b}, - {0x03, 0xef, 0xbf, 0x3f}, - {0x03, 0xef, 0xbf, 0x2a}, - {0x03, 0xef, 0xbf, 0x0d}, - {0x03, 0xef, 0xbf, 0x25}, - {0x03, 0xef, 0xbd, 0x1a}, - {0x03, 0xef, 0xbd, 0x26}, - {0x01, 0xa0, 0x00, 0x00}, - {0x03, 0xef, 0xbd, 0x25}, - {0x03, 0xef, 0xbd, 0x23}, - {0x03, 0xef, 0xbd, 0x2e}, - {0x03, 0xef, 0xbe, 0x07}, - {0x03, 0xef, 0xbe, 0x05}, - {0x03, 0xef, 0xbd, 0x06}, - {0x03, 0xef, 0xbd, 0x13}, - {0x03, 0xef, 0xbd, 0x0b}, - {0x03, 0xef, 0xbd, 0x16}, - {0x03, 0xef, 0xbd, 0x0c}, - {0x03, 0xef, 0xbd, 0x15}, - {0x03, 0xef, 0xbd, 0x0d}, - {0x03, 0xef, 0xbd, 0x1c}, - {0x03, 0xef, 0xbd, 0x02}, - {0x03, 0xef, 0xbd, 0x1f}, - {0x03, 0xef, 0xbd, 0x1d}, - {0x03, 0xef, 0xbd, 0x17}, - {0x03, 0xef, 0xbd, 0x08}, - {0x03, 0xef, 0xbd, 0x09}, - {0x03, 0xef, 0xbd, 0x0e}, - {0x03, 0xef, 0xbd, 0x04}, - {0x03, 0xef, 0xbd, 0x05}, - {0x03, 0xef, 0xbe, 0x3f}, - {0x03, 0xef, 0xbe, 0x00}, - {0x03, 0xef, 0xbd, 0x2c}, - {0x03, 0xef, 0xbe, 0x06}, - {0x03, 0xef, 0xbe, 0x0c}, - {0x03, 0xef, 0xbe, 0x0f}, - {0x03, 0xef, 0xbe, 0x0d}, - {0x03, 0xef, 0xbe, 0x0b}, - {0x03, 0xef, 0xbe, 0x19}, - {0x03, 0xef, 0xbe, 0x15}, - {0x03, 0xef, 0xbe, 0x11}, - {0x03, 0xef, 0xbe, 0x31}, - {0x03, 0xef, 0xbe, 0x33}, - {0x03, 0xef, 0xbd, 0x0f}, - {0x03, 0xef, 0xbe, 0x30}, - {0x03, 0xef, 0xbe, 0x3e}, - {0x03, 0xef, 0xbe, 0x32}, - {0x03, 0xef, 0xbe, 0x36}, - {0x03, 0xef, 0xbd, 0x14}, - {0x03, 0xef, 0xbe, 0x2e}, - {0x03, 0xef, 0xbd, 0x1e}, - {0x03, 0xef, 0xbe, 0x10}, - {0x03, 0xef, 0xbf, 0x13}, - {0x03, 0xef, 0xbf, 0x15}, - {0x03, 0xef, 0xbf, 0x17}, - {0x03, 0xef, 0xbf, 0x1f}, - {0x03, 0xef, 0xbf, 0x1d}, - {0x03, 0xef, 0xbf, 0x1b}, - {0x03, 0xef, 0xbf, 0x09}, - {0x03, 0xef, 0xbf, 0x0b}, - {0x03, 0xef, 0xbf, 0x37}, - {0x03, 0xef, 0xbe, 0x04}, - {0x01, 0xe0, 0x00, 0x00}, - {0x03, 0xe2, 0xa6, 0x1a}, - {0x03, 0xe2, 0xa6, 0x26}, - {0x03, 0xe3, 0x80, 0x23}, - {0x03, 0xe3, 0x80, 0x2e}, - {0x03, 0xe3, 0x80, 0x25}, - {0x03, 0xe3, 0x83, 0x1e}, - {0x03, 0xe3, 0x83, 0x14}, - {0x03, 0xe3, 0x82, 0x06}, - {0x03, 0xe3, 0x82, 0x0b}, - {0x03, 0xe3, 0x82, 0x0c}, - {0x03, 0xe3, 0x82, 0x0d}, - {0x03, 0xe3, 0x82, 0x02}, - {0x03, 0xe3, 0x83, 0x0f}, - {0x03, 0xe3, 0x83, 0x08}, - {0x03, 0xe3, 0x83, 0x09}, - {0x03, 0xe3, 0x83, 0x2c}, - {0x03, 0xe3, 0x83, 0x0c}, - {0x03, 0xe3, 0x82, 0x13}, - {0x03, 0xe3, 0x82, 0x16}, - {0x03, 0xe3, 0x82, 0x15}, - {0x03, 0xe3, 0x82, 0x1c}, - {0x03, 0xe3, 0x82, 0x1f}, - {0x03, 0xe3, 0x82, 0x1d}, - {0x03, 0xe3, 0x82, 0x1a}, - {0x03, 0xe3, 0x82, 0x17}, - {0x03, 0xe3, 0x82, 0x08}, - {0x03, 0xe3, 0x82, 0x09}, - {0x03, 0xe3, 0x82, 0x0e}, - {0x03, 0xe3, 0x82, 0x04}, - {0x03, 0xe3, 0x82, 0x05}, - {0x03, 0xe3, 0x82, 0x3f}, - {0x03, 0xe3, 0x83, 0x00}, - {0x03, 0xe3, 0x83, 0x06}, - {0x03, 0xe3, 0x83, 0x05}, - {0x03, 0xe3, 0x83, 0x0d}, - {0x03, 0xe3, 0x83, 0x0b}, - {0x03, 0xe3, 0x83, 0x07}, - {0x03, 0xe3, 0x83, 0x19}, - {0x03, 0xe3, 0x83, 0x15}, - {0x03, 0xe3, 0x83, 0x11}, - {0x03, 0xe3, 0x83, 0x31}, - {0x03, 0xe3, 0x83, 0x33}, - {0x03, 0xe3, 0x83, 0x30}, - {0x03, 0xe3, 0x83, 0x3e}, - {0x03, 0xe3, 0x83, 0x32}, - {0x03, 0xe3, 0x83, 0x36}, - {0x03, 0xe3, 0x83, 0x2e}, - {0x03, 0xe3, 0x82, 0x07}, - {0x03, 0xe3, 0x85, 0x04}, - {0x03, 0xe3, 0x84, 0x10}, - {0x03, 0xe3, 0x85, 0x30}, - {0x03, 0xe3, 0x85, 0x0d}, - {0x03, 0xe3, 0x85, 0x13}, - {0x03, 0xe3, 0x85, 0x15}, - {0x03, 0xe3, 0x85, 0x17}, - {0x03, 0xe3, 0x85, 0x1f}, - {0x03, 0xe3, 0x85, 0x1d}, - {0x03, 0xe3, 0x85, 0x1b}, - {0x03, 0xe3, 0x85, 0x09}, - {0x03, 0xe3, 0x85, 0x0f}, - {0x03, 0xe3, 0x85, 0x0b}, - {0x03, 0xe3, 0x85, 0x37}, - {0x03, 0xe3, 0x85, 0x3b}, - {0x03, 0xe3, 0x85, 0x39}, - {0x03, 0xe3, 0x85, 0x3f}, - {0x02, 0xc2, 0x02, 0x00}, - {0x02, 0xc2, 0x0e, 0x00}, - {0x02, 0xc2, 0x0c, 0x00}, - {0x02, 0xc2, 0x00, 0x00}, - {0x03, 0xe2, 0x82, 0x0f}, - {0x03, 0xe2, 0x94, 0x2a}, - {0x03, 0xe2, 0x86, 0x39}, - {0x03, 0xe2, 0x86, 0x3b}, - {0x03, 0xe2, 0x86, 0x3f}, - {0x03, 0xe2, 0x96, 0x0d}, - {0x03, 0xe2, 0x97, 0x25}, -} - -// Total table size 14680 bytes (14KiB) diff --git a/constraint/vendor/golang.org/x/text/width/transform.go b/constraint/vendor/golang.org/x/text/width/transform.go deleted file mode 100644 index 0049f700a..000000000 --- a/constraint/vendor/golang.org/x/text/width/transform.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package width - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -type foldTransform struct { - transform.NopResetter -} - -func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if elem(v)&tagNeedsFold != 0 { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if elem(v)&tagNeedsFold == 0 { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type narrowTransform struct { - transform.NopResetter -} - -func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - if src[n] < utf8.RuneSelf { - // ASCII fast path. - for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { - } - continue - } - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - if src[nSrc] < utf8.RuneSelf { - // ASCII fast path. - start, end := nSrc, len(src) - if d := len(dst) - nDst; d < end-start { - end = nSrc + d - } - for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { - } - n := copy(dst[nDst:], src[start:nSrc]) - if nDst += n; nDst == len(dst) { - nSrc = start + n - if nSrc == len(src) { - return nDst, nSrc, nil - } - if src[nSrc] < utf8.RuneSelf { - return nDst, nSrc, transform.ErrShortDst - } - } - continue - } - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} - -type wideTransform struct { - transform.NopResetter -} - -func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { - for n < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[n:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - err = transform.ErrShortSrc - } else { - n = len(src) - } - break - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - } else { - err = transform.ErrEndOfSpan - break - } - n += size - } - return n, err -} - -func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - for nSrc < len(src) { - // TODO: Consider ASCII fast path. Special-casing ASCII handling can - // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably - // not enough to warrant the extra code and complexity. - v, size := trie.lookup(src[nSrc:]) - if size == 0 { // incomplete UTF-8 encoding - if !atEOF { - return nDst, nSrc, transform.ErrShortSrc - } - size = 1 // gobble 1 byte - } - if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { - if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { - return nDst, nSrc, transform.ErrShortDst - } - nDst += size - } else { - data := inverseData[byte(v)] - if len(dst)-nDst < int(data[0]) { - return nDst, nSrc, transform.ErrShortDst - } - i := 1 - for end := int(data[0]); i < end; i++ { - dst[nDst] = data[i] - nDst++ - } - dst[nDst] = data[i] ^ src[nSrc+size-1] - nDst++ - } - nSrc += size - } - return nDst, nSrc, nil -} diff --git a/constraint/vendor/golang.org/x/text/width/trieval.go b/constraint/vendor/golang.org/x/text/width/trieval.go deleted file mode 100644 index ca8e45fd1..000000000 --- a/constraint/vendor/golang.org/x/text/width/trieval.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package width - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/constraint/vendor/golang.org/x/text/width/width.go b/constraint/vendor/golang.org/x/text/width/width.go deleted file mode 100644 index 29c7509be..000000000 --- a/constraint/vendor/golang.org/x/text/width/width.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate stringer -type=Kind -//go:generate go run gen.go gen_common.go gen_trieval.go - -// Package width provides functionality for handling different widths in text. -// -// Wide characters behave like ideographs; they tend to allow line breaks after -// each character and remain upright in vertical text layout. Narrow characters -// are kept together in words or runs that are rotated sideways in vertical text -// layout. -// -// For more information, see https://unicode.org/reports/tr11/. -package width // import "golang.org/x/text/width" - -import ( - "unicode/utf8" - - "golang.org/x/text/transform" -) - -// TODO -// 1) Reduce table size by compressing blocks. -// 2) API proposition for computing display length -// (approximation, fixed pitch only). -// 3) Implement display length. - -// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. -type Kind int - -const ( - // Neutral characters do not occur in legacy East Asian character sets. - Neutral Kind = iota - - // EastAsianAmbiguous characters that can be sometimes wide and sometimes - // narrow and require additional information not contained in the character - // code to further resolve their width. - EastAsianAmbiguous - - // EastAsianWide characters are wide in its usual form. They occur only in - // the context of East Asian typography. These runes may have explicit - // halfwidth counterparts. - EastAsianWide - - // EastAsianNarrow characters are narrow in its usual form. They often have - // fullwidth counterparts. - EastAsianNarrow - - // Note: there exist Narrow runes that do not have fullwidth or wide - // counterparts, despite what the definition says (e.g. U+27E6). - - // EastAsianFullwidth characters have a compatibility decompositions of type - // wide that map to a narrow counterpart. - EastAsianFullwidth - - // EastAsianHalfwidth characters have a compatibility decomposition of type - // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON - // SIGN. - EastAsianHalfwidth - - // Note: there exist runes that have a halfwidth counterparts but that are - // classified as Ambiguous, rather than wide (e.g. U+2190). -) - -// TODO: the generated tries need to return size 1 for invalid runes for the -// width to be computed correctly (each byte should render width 1) - -var trie = newWidthTrie(0) - -// Lookup reports the Properties of the first rune in b and the number of bytes -// of its UTF-8 encoding. -func Lookup(b []byte) (p Properties, size int) { - v, sz := trie.lookup(b) - return Properties{elem(v), b[sz-1]}, sz -} - -// LookupString reports the Properties of the first rune in s and the number of -// bytes of its UTF-8 encoding. -func LookupString(s string) (p Properties, size int) { - v, sz := trie.lookupString(s) - return Properties{elem(v), s[sz-1]}, sz -} - -// LookupRune reports the Properties of rune r. -func LookupRune(r rune) Properties { - var buf [4]byte - n := utf8.EncodeRune(buf[:], r) - v, _ := trie.lookup(buf[:n]) - last := byte(r) - if r >= utf8.RuneSelf { - last = 0x80 + byte(r&0x3f) - } - return Properties{elem(v), last} -} - -// Properties provides access to width properties of a rune. -type Properties struct { - elem elem - last byte -} - -func (e elem) kind() Kind { - return Kind(e >> typeShift) -} - -// Kind returns the Kind of a rune as defined in Unicode TR #11. -// See https://unicode.org/reports/tr11/ for more details. -func (p Properties) Kind() Kind { - return p.elem.kind() -} - -// Folded returns the folded variant of a rune or 0 if the rune is canonical. -func (p Properties) Folded() rune { - if p.elem&tagNeedsFold != 0 { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Narrow returns the narrow variant of a rune or 0 if the rune is already -// narrow or doesn't have a narrow variant. -func (p Properties) Narrow() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// Wide returns the wide variant of a rune or 0 if the rune is already -// wide or doesn't have a wide variant. -func (p Properties) Wide() rune { - if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { - buf := inverseData[byte(p.elem)] - buf[buf[0]] ^= p.last - r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) - return r - } - return 0 -} - -// TODO for Properties: -// - Add Fullwidth/Halfwidth or Inverted methods for computing variants -// mapping. -// - Add width information (including information on non-spacing runes). - -// Transformer implements the transform.Transformer interface. -type Transformer struct { - t transform.SpanningTransformer -} - -// Reset implements the transform.Transformer interface. -func (t Transformer) Reset() { t.t.Reset() } - -// Transform implements the transform.Transformer interface. -func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - return t.t.Transform(dst, src, atEOF) -} - -// Span implements the transform.SpanningTransformer interface. -func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { - return t.t.Span(src, atEOF) -} - -// Bytes returns a new byte slice with the result of applying t to b. -func (t Transformer) Bytes(b []byte) []byte { - b, _, _ = transform.Bytes(t, b) - return b -} - -// String returns a string with the result of applying t to s. -func (t Transformer) String(s string) string { - s, _, _ = transform.String(t, s) - return s -} - -var ( - // Fold is a transform that maps all runes to their canonical width. - // - // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm - // provide a more generic folding mechanism. - Fold Transformer = Transformer{foldTransform{}} - - // Widen is a transform that maps runes to their wide variant, if - // available. - Widen Transformer = Transformer{wideTransform{}} - - // Narrow is a transform that maps runes to their narrow variant, if - // available. - Narrow Transformer = Transformer{narrowTransform{}} -) - -// TODO: Consider the following options: -// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some -// generalized variant of this. -// - Consider a wide Won character to be the default width (or some generalized -// variant of this). -// - Filter the set of characters that gets converted (the preferred approach is -// to allow applying filters to transforms). diff --git a/constraint/vendor/golang.org/x/time/rate/rate.go b/constraint/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493..93a798ab6 100644 --- a/constraint/vendor/golang.org/x/time/rate/rate.go +++ b/constraint/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/constraint/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/constraint/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go index 6b867a46e..c90c6015d 100644 --- a/constraint/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go +++ b/constraint/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -1105,25 +1105,66 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. +// +// Comprehensions for the standard environment macros evaluation can be best +// visualized as the following pseudocode: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +// +// Comprehensions for the optional V2 macros which support map-to-map +// translation differ slightly from the standard environment macros in that +// they expose both the key or index in addition to the value for each list +// or map entry: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var`, `iter_var2` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` type Expr_Comprehension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The name of the iteration variable. + // The name of the first iteration variable. + // When the iter_range is a list, this variable is the list element. + // When the iter_range is a map, this variable is the map entry key. IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - // The range over which var iterates. + // The name of the second iteration variable, empty if not set. + // When the iter_range is a list, this variable is the integer index. + // When the iter_range is a map, this variable is the map entry value. + // This field is only set for comprehension v2 macros. + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + // The range over which the comprehension iterates. IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` // The name of the variable used for accumulation of the result. AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` // The initial value of the accumulator. AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - // An expression which can contain iter_var and accu_var. + // An expression which can contain iter_var, iter_var2, and accu_var. // // Computes the next value of accu_var. LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` @@ -1172,6 +1213,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -1485,7 +1533,7 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xae, 0x0d, 0x0a, 0x04, 0x45, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -1567,132 +1615,134 @@ var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xfd, - 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a, + 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x3d, 0x0a, 0x0a, 0x69, - 0x74, 0x65, 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, - 0x09, 0x69, 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, - 0x63, 0x75, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, - 0x63, 0x75, 0x56, 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, - 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, - 0x69, 0x74, 0x12, 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, - 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, - 0x70, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, - 0x6f, 0x70, 0x53, 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, - 0x0a, 0x09, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, - 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, - 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, - 0x00, 0x52, 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x18, 0x01, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x8c, 0x07, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, - 0x0a, 0x0e, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74, + 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f, + 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56, + 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, - 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, - 0x03, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73, + 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, - 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x22, 0x6f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, - 0x15, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, - 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, - 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x5d, 0x0a, 0x0f, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53, + 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, - 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x42, 0x6e, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, + 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a, + 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x42, 0x0b, 0x53, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, + 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, + 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, + 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c, + 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/constraint/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/constraint/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3..f388426b0 100644 --- a/constraint/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/constraint/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/constraint/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/constraint/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e5621827..3cd9a5bb8 100644 --- a/constraint/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/constraint/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/constraint/vendor/google.golang.org/grpc/CONTRIBUTING.md b/constraint/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e..d9bfa6e1e 100644 --- a/constraint/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/constraint/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/constraint/vendor/google.golang.org/grpc/balancer/balancer.go b/constraint/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a..382ad6941 100644 --- a/constraint/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/constraint/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,54 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -421,18 +362,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -445,20 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/constraint/vendor/google.golang.org/grpc/balancer/base/balancer.go b/constraint/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8..d5ed172ae 100644 --- a/constraint/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/constraint/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 000000000..7d66cb491 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,35 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import ( + rand "math/rand/v2" + "time" +) + +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 5b592f48a..ea8899818 100644 --- a/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,21 +23,26 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -50,7 +55,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 000000000..76fa5fea9 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,932 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) +) + +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.rawConnectivityState { + case connectivity.Idle: + scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true + lastErr = scd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/constraint/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/constraint/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31..80a42d225 100644 --- a/constraint/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/constraint/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/constraint/vendor/google.golang.org/grpc/balancer/subconn.go b/constraint/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 000000000..ea27c4fa7 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing poilices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/constraint/vendor/google.golang.org/grpc/balancer_wrapper.go b/constraint/vendor/google.golang.org/grpc/balancer_wrapper.go index 6561b769e..c2688376a 100644 --- a/constraint/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/constraint/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,15 +24,25 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -187,12 +197,13 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -252,12 +263,39 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +305,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -274,17 +315,25 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +350,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +358,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,15 +386,15 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. @@ -353,13 +404,112 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} + +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + registerFn := acbw.healthListenerRegFn() + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) + }) +} diff --git a/constraint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/constraint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index fcd1cfe80..21dd72969 100644 --- a/constraint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/constraint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1015,7 +999,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1057,105 +1041,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/constraint/vendor/google.golang.org/grpc/clientconn.go b/constraint/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3f..4f57b5543 100644 --- a/constraint/vendor/google.golang.org/grpc/clientconn.go +++ b/constraint/vendor/google.golang.org/grpc/clientconn.go @@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -825,14 +822,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -1141,10 +1137,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1180,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1251,6 +1251,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1294,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1369,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1443,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1475,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") @@ -1504,29 +1506,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/constraint/vendor/google.golang.org/grpc/codec.go b/constraint/vendor/google.golang.org/grpc/codec.go index e840858b7..959c2f99d 100644 --- a/constraint/vendor/google.golang.org/grpc/codec.go +++ b/constraint/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/constraint/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/constraint/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443..4c805c644 100644 --- a/constraint/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/constraint/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/constraint/vendor/google.golang.org/grpc/credentials/tls.go b/constraint/vendor/google.golang.org/grpc/credentials/tls.go index 411435854..bd5fe22b6 100644 --- a/constraint/vendor/google.golang.org/grpc/credentials/tls.go +++ b/constraint/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } @@ -200,25 +202,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/constraint/vendor/google.golang.org/grpc/dialoptions.go b/constraint/vendor/google.golang.org/grpc/dialoptions.go index 27c1b9bb6..f3a045296 100644 --- a/constraint/vendor/google.golang.org/grpc/dialoptions.go +++ b/constraint/vendor/google.golang.org/grpc/dialoptions.go @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -429,6 +428,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -436,7 +440,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -445,10 +449,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -660,16 +662,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -680,7 +672,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/constraint/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/constraint/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 930140f57..ad75313a1 100644 --- a/constraint/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/constraint/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -20,10 +20,10 @@ package stats import ( "maps" - "testing" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -35,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -43,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -155,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -250,21 +250,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { } // snapshotMetricsRegistryForTesting snapshots the global data of the metrics -// registry. Registers a cleanup function on the provided testing.T that sets -// the metrics registry to its original state. Only called in testing functions. -func snapshotMetricsRegistryForTesting(t *testing.T) { +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { oldDefaultMetrics := DefaultMetrics oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) - t.Cleanup(func() { + return func() { DefaultMetrics = oldDefaultMetrics registeredMetrics = oldRegisteredMetrics metricsRegistry = oldMetricsRegistry - }) + } } diff --git a/constraint/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/constraint/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a63..ee1423605 100644 --- a/constraint/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/constraint/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" +import "google.golang.org/grpc/stats" // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,75 +40,15 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -// Metric is an identifier for a metric. -type Metric string +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} +// Metric was replaced by direct usage of strings. +type Metric = string -// NewMetrics returns a Metrics containing Metrics. +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } + return stats.NewMetricSet(metrics...) } diff --git a/constraint/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/constraint/vendor/google.golang.org/grpc/grpclog/internal/logger.go index 0d9a824ce..e524fdd40 100644 --- a/constraint/vendor/google.golang.org/grpc/grpclog/internal/logger.go +++ b/constraint/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -81,7 +81,7 @@ func (l *LoggerWrapper) Errorf(format string, args ...any) { } // V reports whether verbosity level l is at least the requested verbose level. -func (*LoggerWrapper) V(l int) bool { +func (*LoggerWrapper) V(int) bool { // Returns true for all verbose level. return true } diff --git a/constraint/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/constraint/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98..ed90060c3 100644 --- a/constraint/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/constraint/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/constraint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/constraint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index e65cf0ea1..467de16bd 100644 --- a/constraint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/constraint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 +// protoc-gen-go v1.35.2 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -99,11 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,7 +233,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/constraint/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d..b6ae7f258 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/constraint/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/constraint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/constraint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a926..85540f86a 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/constraint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/constraint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/constraint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a87..966932891 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/constraint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/channel.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54..3ec662799 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index bb531225d..64c791953 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e150..078bb8123 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/server.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6e..b5a824992 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/socket.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b2..90103847c 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba..b20802e6e 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a..0e6e18e18 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/constraint/vendor/google.golang.org/grpc/internal/channelz/trace.go b/constraint/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b867403..2bffe4777 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/constraint/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/constraint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/constraint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 00abc7c2b..1e42b6fdc 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/constraint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,11 +45,16 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/constraint/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/constraint/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb..9afeb444d 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/constraint/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,10 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + // TODO: https://github.com/grpc/grpc-go/issues/7866 - Control this using + // an env variable when all LB policies handle endpoints. + XDSDualstackEndpointsEnabled = false ) diff --git a/constraint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/constraint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d6392..8e8e86128 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/constraint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/constraint/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/constraint/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775..683d1955c 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/constraint/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/constraint/vendor/google.golang.org/grpc/internal/idle/idle.go b/constraint/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c..2c13ee9da 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/constraint/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/constraint/vendor/google.golang.org/grpc/internal/internal.go b/constraint/vendor/google.golang.org/grpc/internal/internal.go index 65f936a62..c17b98194 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/internal.go +++ b/constraint/vendor/google.golang.org/grpc/internal/internal.go @@ -29,10 +29,12 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -149,6 +151,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -183,7 +199,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +207,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +221,7 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false - - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + UserSetDefaultScheme = false // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. @@ -217,10 +231,9 @@ var ( SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) // SnapshotMetricRegistryForTesting snapshots the global data of the metric - // registry. Registers a cleanup function on the provided testing.T that - // sets the metric registry to its original state. Only called in testing - // functions. - SnapshotMetricRegistryForTesting any // func(t *testing.T) + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() // SetDefaultBufferPoolForTesting updates the default buffer pool, for // testing purposes. @@ -236,7 +249,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md @@ -258,3 +271,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} diff --git a/constraint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/constraint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b..ba5c5a95d 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/constraint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -177,7 +178,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +238,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address @@ -258,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -320,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -349,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -377,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -425,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/constraint/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/constraint/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572..b901c7bac 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/constraint/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/constraint/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/constraint/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f..79044657b 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/constraint/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/constraint/vendor/google.golang.org/grpc/internal/status/status.go b/constraint/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc8205..1186f1e9a 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/status/status.go +++ b/constraint/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/constraint/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/constraint/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd7..54c24c2ff 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/constraint/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/constraint/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 000000000..8ed347c54 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/constraint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index ea0633bbd..ef72fbb3a 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -1033,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/constraint/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c515..dfc0f224e 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/constraint/vendor/google.golang.org/grpc/internal/transport/handler_server.go index e1cd86b2f..3dea23573 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,11 +473,9 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} -func (ht *serverHandlerTransport) IncrMsgRecv() {} - -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/constraint/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f46194fdc..f323ab7f4 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -335,10 +335,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -479,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -505,7 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -527,8 +530,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -595,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -736,7 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -761,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -772,7 +770,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -906,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1008,6 +992,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1012,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1078,7 +1067,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1107,10 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1120,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1129,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1235,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1316,11 +1306,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1322,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,15 +1363,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1393,6 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1434,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1628,7 +1616,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1667,11 +1661,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + errClose = connectionErrorf(true, err, "error reading from server: %v", err) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1685,7 +1678,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1696,15 +1689,15 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1728,7 +1721,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1770,7 +1763,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): @@ -1799,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/constraint/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f5163f770..997b0a59b 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -568,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -589,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1238,7 +1235,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/http_util.go b/constraint/vendor/google.golang.org/grpc/internal/transport/http_util.go index f609c6c66..3613d7b64 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -393,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/constraint/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 000000000..a22a90151 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/constraint/vendor/google.golang.org/grpc/internal/transport/transport.go b/constraint/vendor/google.golang.org/grpc/internal/transport/transport.go index fdd6fa86c..2859b8775 100644 --- a/constraint/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/constraint/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,81 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -func (s *Stream) ReadHeader(header []byte) (err error) { +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -570,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -610,13 +413,13 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err } - t.windowHandler(len(header)) + t.windowHandler(n) return n, nil } @@ -630,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -716,15 +508,9 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -773,18 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -804,12 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -819,19 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -843,12 +601,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/constraint/vendor/google.golang.org/grpc/keepalive/keepalive.go b/constraint/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7..eb42b19fb 100644 --- a/constraint/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/constraint/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/constraint/vendor/google.golang.org/grpc/mem/buffer_slice.go b/constraint/vendor/google.golang.org/grpc/mem/buffer_slice.go index d7775cea6..65002e2cc 100644 --- a/constraint/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/constraint/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,10 +19,14 @@ package mem import ( - "compress/flate" "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -92,9 +96,11 @@ func (s BufferSlice) Materialize() []byte { } // MaterializeToBuffer functions like Materialize except that it writes the data -// to a single Buffer pulled from the given BufferPool. As a special case, if the -// input BufferSlice only actually has one Buffer, this function has nothing to -// do and simply returns said Buffer. +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { if len(s) == 1 { s[0].Ref() @@ -124,7 +130,8 @@ func (s BufferSlice) Reader() Reader { // Remaining(), which returns the number of unread bytes remaining in the slice. // Buffers will be freed as they are read. type Reader interface { - flate.Reader + io.Reader + io.ByteReader // Close frees the underlying BufferSlice and never returns an error. Subsequent // calls to Read will return (0, io.EOF). Close() error @@ -217,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/constraint/vendor/google.golang.org/grpc/mem/buffers.go b/constraint/vendor/google.golang.org/grpc/mem/buffers.go index 975ceb718..ecbf0b9a7 100644 --- a/constraint/vendor/google.golang.org/grpc/mem/buffers.go +++ b/constraint/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -224,20 +231,29 @@ func (e emptyBuffer) Len() int { return 0 } -func (e emptyBuffer) split(n int) (left, right Buffer) { +func (e emptyBuffer) split(int) (left, right Buffer) { return e, e } -func (e emptyBuffer) read(buf []byte) (int, Buffer) { +func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/constraint/vendor/google.golang.org/grpc/preloader.go b/constraint/vendor/google.golang.org/grpc/preloader.go index e87a17f36..ee0ff969a 100644 --- a/constraint/vendor/google.golang.org/grpc/preloader.go +++ b/constraint/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/constraint/vendor/google.golang.org/grpc/resolver/resolver.go b/constraint/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511..8eb1cf3bc 100644 --- a/constraint/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/constraint/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -237,8 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/constraint/vendor/google.golang.org/grpc/rpc_util.go b/constraint/vendor/google.golang.org/grpc/rpc_util.go index db8865ec3..9fac2b08b 100644 --- a/constraint/vendor/google.golang.org/grpc/rpc_util.go +++ b/constraint/vendor/google.golang.org/grpc/rpc_util.go @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,8 +264,8 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } @@ -304,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -339,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -363,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -387,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -410,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -438,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -475,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -514,7 +514,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} // ForceCodecV2 returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -554,7 +554,7 @@ func (o ForceCodecV2CallOption) before(c *callInfo) error { return nil } -func (o ForceCodecV2CallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -579,7 +579,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -607,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -791,9 +788,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -818,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -842,7 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} } size = len(uncompressedBuf) } else { @@ -878,30 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) if err != nil { out.Free() return nil, 0, err @@ -909,10 +882,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +type recvCompressor interface { + RecvCompress() string +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/constraint/vendor/google.golang.org/grpc/server.go b/constraint/vendor/google.golang.org/grpc/server.go index 457d27338..9d5b2884d 100644 --- a/constraint/vendor/google.golang.org/grpc/server.go +++ b/constraint/vendor/google.golang.org/grpc/server.go @@ -87,12 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1137,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,15 +1355,21 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } - if channelz.IsOn() { - t.IncrMsgRecv() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1403,7 +1410,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1430,20 +1437,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1483,9 +1490,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1501,7 +1505,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1540,7 +1544,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1560,7 +1564,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1647,7 +1650,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1716,7 +1719,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1734,10 +1737,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1767,7 +1770,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1782,17 +1785,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1801,17 +1807,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1824,7 +1830,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2099,7 +2105,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2121,7 +2127,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/constraint/vendor/google.golang.org/grpc/service_config.go b/constraint/vendor/google.golang.org/grpc/service_config.go index 2671c5ef6..8d451e07c 100644 --- a/constraint/vendor/google.golang.org/grpc/service_config.go +++ b/constraint/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -267,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { @@ -297,7 +301,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/constraint/vendor/google.golang.org/grpc/stats/metrics.go b/constraint/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 000000000..641c8e979 --- /dev/null +++ b/constraint/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/constraint/vendor/google.golang.org/grpc/stats/stats.go b/constraint/vendor/google.golang.org/grpc/stats/stats.go index 71195c494..6f20d2d54 100644 --- a/constraint/vendor/google.golang.org/grpc/stats/stats.go +++ b/constraint/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/constraint/vendor/google.golang.org/grpc/stream.go b/constraint/vendor/google.golang.org/grpc/stream.go index bb2b2a216..54adbbced 100644 --- a/constraint/vendor/google.golang.org/grpc/stream.go +++ b/constraint/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -584,7 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1153,9 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1183,7 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.t.CloseStream(a.s, err) + a.s.Close(err) tr = a.s.Trailer() } @@ -1340,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1380,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1430,7 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1480,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1510,7 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.t.CloseStream(as.s, err) + as.s.Close(err) } if err != nil { @@ -1577,8 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec @@ -1628,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1710,7 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,9 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { @@ -1784,7 +1766,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/constraint/vendor/google.golang.org/grpc/stream_interfaces.go b/constraint/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c..0037fee0b 100644 --- a/constraint/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/constraint/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/constraint/vendor/google.golang.org/grpc/version.go b/constraint/vendor/google.golang.org/grpc/version.go index 1ffec6e2c..0e03fa4d4 100644 --- a/constraint/vendor/google.golang.org/grpc/version.go +++ b/constraint/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.66.0" +const Version = "1.70.0" diff --git a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b..cffdfda96 100644 --- a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,7 +192,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df22..0e72d8537 100644 --- a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c820..e9fe10394 100644 --- a/constraint/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/constraint/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/constraint/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/constraint/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac4..d972a3d98 100644 --- a/constraint/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/constraint/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,7 +185,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/descopts/options.go b/constraint/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c8..024ffebd3 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/constraint/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360..5a57ef6f3 100644 Binary files a/constraint/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/constraint/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/constraint/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/constraint/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d..bf1aba0e8 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,9 +5,14 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd34920..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b..378b826fa 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,31 +78,48 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) @@ -258,6 +276,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +370,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +445,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +486,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b0..d2f549497 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8d..67a51b327 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b..10132c9b3 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -68,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/flags/flags.go b/constraint/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd34..5cb3ee70f 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -22,3 +22,8 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy + +// ProtoLegacyWeak specifies whether to enable support for weak fields. +// This flag was split out of ProtoLegacy in preparation for removing +// support for weak fields (independent of the other protolegacy features). +const ProtoLegacyWeak = ProtoLegacy diff --git a/constraint/vendor/google.golang.org/protobuf/internal/genid/doc.go b/constraint/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd0121..d9b9d916a 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/constraint/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/constraint/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b4..f5ee7f5c2 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,59 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/constraint/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff..bef5a25fb 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/constraint/vendor/google.golang.org/protobuf/internal/genid/name.go b/constraint/vendor/google.golang.org/protobuf/internal/genid/name.go new file mode 100644 index 000000000..224f33930 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" + + BuilderSuffix_goname = "_builder" +) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/constraint/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85..9404270de 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 000000000..6075d6f69 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 000000000..ea276547c --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 000000000..e9a27583a --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8fa..fe2c719ce 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20..0d5b546e0 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44..7c1f66c8c 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 000000000..76818ea25 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae..229c69801 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66ea..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb73..111d95833 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -107,12 +118,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] @@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -189,6 +202,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 000000000..f81d7d0db --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23..077712c2c 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/convert.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a..f72ddd882 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a65..e4580b3ac 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/decode.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c2..e0dd21fa5 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/encode.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd21224..b2e212291 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,8 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool { return opts.flags&piface.MarshalDeterministic == 0 } +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/equal.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 000000000..9f6c32a7d --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 000000000..e8fb6c35b --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil || !fd.IsWeak() { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee6..b6849d669 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/merge.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f2..8ffdce67d 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d45..d1f79b422 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. @@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -133,6 +136,9 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -157,24 +165,28 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) + si.weakOffset = offsetOf(f) si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f) + case "XXX_presence": + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 000000000..d8dcd7886 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,632 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.IsWeak(): + // Weak fields are no different for opaque. + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsWeak(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 000000000..a69825699 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d7..31c19b54f 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b19..3cd1fbc21 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -76,7 +76,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +152,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +205,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { @@ -339,7 +333,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { + if !flags.ProtoLegacyWeak { panic("no support for proto1 weak fields") } @@ -416,7 +410,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -425,7 +419,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -472,7 +466,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -485,7 +479,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -503,41 +497,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 000000000..af5e063a1 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a2..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8..6bed45e35 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,15 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -23,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -114,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 000000000..38aa7b7dc --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/presence.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 000000000..914cb1ded --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/impl/validate.go b/constraint/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd7..b534a3d6d 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } diff --git a/constraint/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 000000000..82e5cab4a --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 000000000..ff4d4834b --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 000000000..dc2a64ca6 --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f3338..000000000 --- a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd09..832a7988f 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba..1ffddf687 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/constraint/vendor/google.golang.org/protobuf/internal/version/version.go b/constraint/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f686..f5c06280f 100644 --- a/constraint/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/constraint/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 36 + Patch = 3 PreRelease = "" ) diff --git a/constraint/vendor/google.golang.org/protobuf/proto/decode.go b/constraint/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534c..e28d7acb3 100644 --- a/constraint/vendor/google.golang.org/protobuf/proto/decode.go +++ b/constraint/vendor/google.golang.org/protobuf/proto/decode.go @@ -47,6 +47,12 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. @@ -104,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- @@ -156,7 +172,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { + } else if flags.ProtoLegacyWeak { if fd.IsWeak() && fd.Message().IsPlaceholder() { err = errUnknown // weak referent is not linked in } diff --git a/constraint/vendor/google.golang.org/protobuf/proto/encode.go b/constraint/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc3..f0473c586 100644 --- a/constraint/vendor/google.golang.org/protobuf/proto/encode.go +++ b/constraint/vendor/google.golang.org/protobuf/proto/encode.go @@ -63,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. diff --git a/constraint/vendor/google.golang.org/protobuf/proto/equal.go b/constraint/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03..c36d4a9cd 100644 --- a/constraint/vendor/google.golang.org/protobuf/proto/equal.go +++ b/constraint/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/constraint/vendor/google.golang.org/protobuf/proto/extension.go b/constraint/vendor/google.golang.org/protobuf/proto/extension.go index d248f2928..78445d116 100644 --- a/constraint/vendor/google.golang.org/protobuf/proto/extension.go +++ b/constraint/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/constraint/vendor/google.golang.org/protobuf/proto/size.go b/constraint/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae3..c8675806c 100644 --- a/constraint/vendor/google.golang.org/protobuf/proto/size.go +++ b/constraint/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { diff --git a/constraint/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/constraint/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 000000000..267fd0f1f --- /dev/null +++ b/constraint/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f5..69a050509 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 856175542..ebcb4a8ab 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2eb..5eaf65217 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,7 +149,7 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { + if f.IsWeak() && !flags.ProtoLegacyWeak { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda..f55b03695 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} @@ -43,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -123,10 +125,27 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + gf := goFeatures.Message() + fields := gf.Descriptor().Fields() + + if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByName("api_level"); gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6eb..742cb518c 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06ff..a4b78acef 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af..000000000 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead..0015fcb35 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990..479527b58 100644 --- a/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/constraint/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/constraint/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/constraint/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d8..28e9e9f03 100644 --- a/constraint/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/constraint/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. @@ -166,3 +185,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/constraint/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/constraint/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb3..93df1b569 100644 --- a/constraint/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/constraint/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/constraint/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/constraint/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb075..a551e7ae9 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -69,7 +69,7 @@ const ( Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. + // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 Edition_EDITION_2_TEST_ONLY Edition = 2 Edition_EDITION_99997_TEST_ONLY Edition = 99997 @@ -577,8 +577,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { } // If set to RETENTION_SOURCE, the option will be omitted from the binary. -// Note: as of January 2023, support for this is in progress and does not yet -// have an effect (b/264593489). type FieldOptions_OptionRetention int32 const ( @@ -640,8 +638,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { // This indicates the types of entities that the field may apply to when used // as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. Note: as of January 2023, support for this is -// in progress and does not yet have an effect (b/264593489). +// option on any kind of entity. type FieldOptions_OptionTargetType int32 const ( @@ -1208,20 +1205,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1227,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1256,12 +1251,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { // Describes a complete .proto file. type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. @@ -1286,16 +1278,16 @@ type FileDescriptorProto struct { // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1298,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1414,10 +1406,7 @@ func (x *FileDescriptorProto) GetEdition() Edition { // Describes a message type. type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` @@ -1429,16 +1418,16 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1438,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1535,11 +1524,7 @@ func (x *DescriptorProto) GetReservedName() []string { } type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` // For external users: DO NOT USE. We are in the process of open sourcing @@ -1551,7 +1536,10 @@ type ExtensionRangeOptions struct { // The verification state of the range. // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ExtensionRangeOptions fields. @@ -1561,11 +1549,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1562,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1621,10 +1607,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica // Describes a field within a message. type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` @@ -1676,15 +1659,15 @@ type FieldDescriptorProto struct { // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1678,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1789,21 +1772,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool { // Describes a oneof. type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1794,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1845,10 +1825,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` @@ -1858,16 +1835,16 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1855,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1930,22 +1907,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string { // Describes a value within an enum. type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1930,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1994,22 +1968,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +1991,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2058,11 +2029,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { // Describes a method of a service. type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` @@ -2072,6 +2040,8 @@ type MethodDescriptorProto struct { ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodDescriptorProto fields. @@ -2082,11 +2052,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2065,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2155,11 +2123,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool { } type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards @@ -2251,6 +2215,9 @@ type FileOptions struct { // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FileOptions fields. @@ -2267,11 +2234,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2247,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2446,11 +2411,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { } type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less @@ -2523,6 +2484,9 @@ type MessageOptions struct { Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MessageOptions fields. @@ -2534,11 +2498,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2511,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2615,17 +2577,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { } type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` + // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is only implemented to support use of // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release -- sorry, we'll try to include - // other types in a future version! + // type "bytes" in the open source release. + // TODO: make ctype actually deprecated. Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2692,6 +2651,9 @@ type FieldOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FieldOptions fields. @@ -2707,11 +2669,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2682,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2836,24 +2796,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { } type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2821,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2894,11 +2851,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set this option to true to allow mapping different tag names to the same // value. AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` @@ -2920,6 +2873,9 @@ type EnumOptions struct { Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumOptions fields. @@ -2929,11 +2885,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2898,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2996,11 +2950,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, @@ -3016,6 +2966,9 @@ type EnumValueOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumValueOptions fields. @@ -3026,11 +2979,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +2992,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3092,11 +3043,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { } type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? @@ -3106,6 +3053,9 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ServiceOptions fields. @@ -3115,11 +3065,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3078,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3167,11 +3115,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { } type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, @@ -3182,6 +3126,9 @@ type MethodOptions struct { Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodOptions fields. @@ -3192,11 +3139,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3152,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3257,11 +3202,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` @@ -3270,15 +3212,15 @@ type UninterpretedOption struct { DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3231,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3360,26 +3302,23 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3329,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3452,10 +3391,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { // feature resolution. The resolution with this object becomes a simple search // for the closest matching edition, followed by proto merges. type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` // The minimum supported edition (inclusive) when this was constructed. // Editions before this will not have defaults. @@ -3463,15 +3399,15 @@ type FeatureSetDefaults struct { // The maximum known edition (inclusive) when this was constructed. Editions // after this will not have reliable defaults. MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3418,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3521,10 +3457,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition { // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar @@ -3573,16 +3506,17 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3527,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3619,22 +3553,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3576,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3668,22 +3599,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { } type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3622,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3734,21 +3662,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3684,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3789,10 +3714,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { } type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The extension number declared within the extension range. Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` // The fully-qualified name of the extension field. There must be a leading @@ -3808,16 +3730,16 @@ type ExtensionRangeOptions_Declaration struct { Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` // If true, indicates that the extension must be defined as repeated. // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3750,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3885,21 +3807,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3829,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3940,21 +3859,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { } type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. + sizeCache protoimpl.SizeCache } func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3881,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3996,10 +3912,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string { // Information about the support window of a feature. type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The edition that this feature was first available in. In editions // earlier than this one, the default assigned to EDITION_LEGACY will be // used, and proto files will not be able to override it. @@ -4014,15 +3927,15 @@ type FieldOptions_FeatureSupport struct { // this one, the last default assigned will be used, and proto files will // not be able to override it. EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3946,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4082,21 +3995,18 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents // "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4017,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4141,24 +4051,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { // the defaults at the closest matching edition ordered at or before it should // be used. This field must be in strict ascending order by edition. type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` // Defaults of features that can be overridden in this edition. OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` // Defaults of features that can't be overridden in this edition. FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4076,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4206,10 +4113,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur } type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies which part of the FileDescriptorProto was defined at this // location. // @@ -4301,15 +4205,15 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4224,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4371,10 +4275,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { } type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -4386,17 +4287,17 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4308,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4463,494 +4364,474 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, + 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, + 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, + 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, + 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, + 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, + 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, + 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, + 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, + 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, + 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, + 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, + 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, - 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, + 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, - 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, - 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, + 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, + 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, + 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, + 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, + 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, + 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, @@ -4959,274 +4840,296 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, - 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, + 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, + 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, - 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, - 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, - 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, - 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, - 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, - 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, - 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, - 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, - 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, - 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, - 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, - 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, - 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, - 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, - 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, - 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, - 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, - 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, - 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, - 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, - 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, + 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, + 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, + 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, + 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, + 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, - 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, - 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, - 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, + 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, + 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, + 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, + 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, + 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, + 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, + 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, + 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, + 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, + 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, + 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5385,424 +5288,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/constraint/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index c432817bb..8e759fc9f 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/constraint/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -28,11 +28,7 @@ type extField struct { type Types struct { // atomicExtFiles is used with sync/atomic and hence must be the first word // of the struct to guarantee 64-bit alignment. - // - // TODO(stapelberg): once we only support Go 1.19 and newer, switch this - // field to be of type atomic.Uint64 to guarantee alignment on - // stack-allocated values, too. - atomicExtFiles uint64 + atomicExtFiles atomic.Uint64 extMu sync.Mutex files *protoregistry.Files @@ -90,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. - if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) { t.updateExtensions() } xd := t.extensionsByMessage[extField{message, field}] @@ -133,10 +129,10 @@ func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { func (t *Types) updateExtensions() { t.extMu.Lock() defer t.extMu.Unlock() - if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) { return } - defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + defer t.atomicExtFiles.Store(uint64(t.files.NumFiles())) t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { t.registerExtensions(fd.Extensions()) t.registerExtensionsInMessages(fd.Messages()) diff --git a/constraint/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/constraint/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c5..e0b72eaf9 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -18,22 +18,150 @@ import ( sync "sync" ) -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GoFeatures_APILevel int32 + +const ( + // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, + // but needs to be a separate value to distinguish between + // an explicitly set api level or a missing api level. + GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 + GoFeatures_API_OPEN GoFeatures_APILevel = 1 + GoFeatures_API_HYBRID GoFeatures_APILevel = 2 + GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 +) + +// Enum value maps for GoFeatures_APILevel. +var ( + GoFeatures_APILevel_name = map[int32]string{ + 0: "API_LEVEL_UNSPECIFIED", + 1: "API_OPEN", + 2: "API_HYBRID", + 3: "API_OPAQUE", + } + GoFeatures_APILevel_value = map[string]int32{ + "API_LEVEL_UNSPECIFIED": 0, + "API_OPEN": 1, + "API_HYBRID": 2, + "API_OPAQUE": 3, + } +) + +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { + p := new(GoFeatures_APILevel) + *p = x + return p +} + +func (x GoFeatures_APILevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_APILevel) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_APILevel(num) + return nil +} + +// Deprecated: Use GoFeatures_APILevel.Descriptor instead. +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} + +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[1] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} +} + +type GoFeatures struct { + state protoimpl.MessageState `protogen:"open.v1"` // Whether or not to generate the deprecated UnmarshalJSON method for enums. + // Can only be true for proto using the Open Struct api. LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + // One of OPEN, HYBRID or OPAQUE. + ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +172,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,6 +194,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { + if x != nil && x.ApiLevel != nil { + return *x.ApiLevel + } + return GoFeatures_API_LEVEL_UNSPECIFIED +} + +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -90,7 +232,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -103,14 +245,44 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, + 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, + 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, + 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, + 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, + 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -125,19 +297,24 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel + (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 2: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel + 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet + 2, // 3: pb.go:type_name -> pb.GoFeatures + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -145,32 +322,19 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + NumEnums: 2, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d3..191552cce 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -210,10 +210,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +241,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. @@ -368,11 +367,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +380,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +458,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd91..34d76e6cd 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -141,10 +141,7 @@ import ( // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years @@ -155,7 +152,9 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a new Duration from the provided time.Duration. @@ -245,11 +244,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +257,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +336,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb82..7fcdd382c 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -48,18 +48,16 @@ import ( // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6..e5d7da38c 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -284,12 +284,11 @@ import ( // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a field mask from a list of paths and verifies that @@ -467,11 +466,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +479,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +550,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc..f2c53ea33 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -186,12 +187,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) { // // The JSON representation for `Struct` is JSON object. type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewStruct constructs a Struct from a general-purpose Go map. @@ -233,11 +233,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +246,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,13 +275,10 @@ func (x *Struct) GetFields() map[string]*Value { // // The JSON representation for `Value` is JSON value. type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The kind of value. // - // Types that are assignable to Kind: + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_NumberValue @@ -291,24 +286,27 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +318,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +340,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +453,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +466,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -471,51 +481,63 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return NullValue_NULL_VALUE } func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } @@ -570,12 +592,11 @@ func (*Value_ListValue) isValue_Kind() {} // // The JSON representation for `ListValue` is JSON array. type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewList constructs a ListValue from a general-purpose Go slice. @@ -613,11 +634,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +647,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +761,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b..9550109aa 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -170,10 +170,7 @@ import ( // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -182,7 +179,9 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Now constructs a new Timestamp from the current time. @@ -254,11 +253,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +266,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +345,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/constraint/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826a..15b424ec1 100644 --- a/constraint/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/constraint/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -54,12 +54,11 @@ import ( // // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Double stores v in a new DoubleValue and returns a pointer to it. @@ -69,11 +68,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +81,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -110,12 +107,11 @@ func (x *DoubleValue) GetValue() float64 { // // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Float stores v in a new FloatValue and returns a pointer to it. @@ -125,11 +121,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +134,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -166,12 +160,11 @@ func (x *FloatValue) GetValue() float32 { // // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int64 stores v in a new Int64Value and returns a pointer to it. @@ -181,11 +174,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +187,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -222,12 +213,11 @@ func (x *Int64Value) GetValue() int64 { // // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt64 stores v in a new UInt64Value and returns a pointer to it. @@ -237,11 +227,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +240,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -278,12 +266,11 @@ func (x *UInt64Value) GetValue() uint64 { // // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int32 stores v in a new Int32Value and returns a pointer to it. @@ -293,11 +280,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +293,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -334,12 +319,11 @@ func (x *Int32Value) GetValue() int32 { // // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt32 stores v in a new UInt32Value and returns a pointer to it. @@ -349,11 +333,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +346,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -390,12 +372,11 @@ func (x *UInt32Value) GetValue() uint32 { // // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bool stores v in a new BoolValue and returns a pointer to it. @@ -405,11 +386,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +399,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -446,12 +425,11 @@ func (x *BoolValue) GetValue() bool { // // The JSON representation for `StringValue` is JSON string. type StringValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // String stores v in a new StringValue and returns a pointer to it. @@ -461,11 +439,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +452,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -502,12 +478,11 @@ func (x *StringValue) GetValue() string { // // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bytes stores v in a new BytesValue and returns a pointer to it. @@ -517,11 +492,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +505,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +602,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/constraint/vendor/github.com/evanphx/json-patch/.gitignore b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/.gitignore rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/.gitignore diff --git a/constraint/vendor/github.com/evanphx/json-patch/LICENSE b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/LICENSE rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/LICENSE diff --git a/constraint/vendor/github.com/evanphx/json-patch/README.md b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/README.md similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/README.md rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/README.md diff --git a/constraint/vendor/github.com/evanphx/json-patch/errors.go b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/errors.go similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/errors.go rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/errors.go diff --git a/constraint/vendor/github.com/evanphx/json-patch/merge.go b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/merge.go similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/merge.go rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/merge.go diff --git a/constraint/vendor/github.com/evanphx/json-patch/patch.go b/constraint/vendor/gopkg.in/evanphx/json-patch.v4/patch.go similarity index 100% rename from constraint/vendor/github.com/evanphx/json-patch/patch.go rename to constraint/vendor/gopkg.in/evanphx/json-patch.v4/patch.go diff --git a/constraint/vendor/gopkg.in/yaml.v2/.travis.yml b/constraint/vendor/gopkg.in/yaml.v2/.travis.yml deleted file mode 100644 index 7348c50c0..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v2 diff --git a/constraint/vendor/gopkg.in/yaml.v2/LICENSE b/constraint/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/constraint/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/constraint/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf6..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/constraint/vendor/gopkg.in/yaml.v2/NOTICE b/constraint/vendor/gopkg.in/yaml.v2/NOTICE deleted file mode 100644 index 866d74a7a..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/constraint/vendor/gopkg.in/yaml.v2/README.md b/constraint/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index b50c6e877..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/constraint/vendor/gopkg.in/yaml.v2/apic.go b/constraint/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index acf71402c..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,744 +0,0 @@ -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -var disableLineWrapping = false - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - if disableLineWrapping { - emitter.best_width = -1 - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/constraint/vendor/gopkg.in/yaml.v2/decode.go b/constraint/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index 129bc2a97..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,815 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - // For an alias node, alias holds the resolved alias. - alias *node - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + p.event.typ.String()) - } -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - n.children = append(n.children, p.parse()) - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - n.alias = p.doc.anchors[n.value] - if n.alias == nil { - failf("unknown anchor '%s' referenced", n.value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[*node]bool - mapType reflect.Type - terrors []string - strict bool - - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[*node]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == yaml_BINARY_TAG { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - return true - } - if resolved != nil { - out.SetString(n.value) - return true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else if tag == yaml_TIMESTAMP_TAG { - // It looks like a timestamp but for backward compatibility - // reasons we set it as a string, so that code that unmarshals - // timestamp-like values into interface{} will continue to - // see a string and not a time.Time. - // TODO(v3) Drop this. - out.Set(reflect.ValueOf(n.value)) - } else { - out.Set(reflect.ValueOf(resolved)) - } - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - return true - } - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - d.setMapIndex(n.children[i+1], out, k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { - if d.strict && out.MapIndex(k) != zeroValue { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) - return - } - out.SetMapIndex(k, v) -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - var doneFields []bool - if d.strict { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.strict { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.children[i+1], value) - d.setMapIndex(n.children[i+1], inlineMap, name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - if n.alias != nil && n.alias.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - if ni.alias != nil && ni.alias.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/emitterc.go b/constraint/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index a1c2cc526..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/encode.go b/constraint/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index 0ee738e11..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,390 +0,0 @@ -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// jsonNumber is the interface of the encoding/json.Number datatype. -// Repeating the interface here avoids a dependency on encoding/json, and also -// supports other libraries like jsoniter, which use a similar datatype with -// the same interface. Detecting this interface is useful when dealing with -// structures containing json.Number, which is a string under the hood. The -// encoder should prefer the use of Int64(), Float64() and string(), in that -// order, when encoding this type. -type jsonNumber interface { - Float64() (float64, error) - Int64() (int64, error) - String() string -} - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - // doneInit holds whether the initial stream_start_event has been - // emitted. - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch m := iface.(type) { - case jsonNumber: - integer, err := m.Int64() - if err == nil { - // In this case the json.Number is a valid int64 - in = reflect.ValueOf(integer) - break - } - float, err := m.Float64() - if err == nil { - // In this case the json.Number is a valid float64 - in = reflect.ValueOf(float) - break - } - // fallback case - no number could be obtained - in = reflect.ValueOf(m.String()) - case time.Time, *time.Time: - // Although time.Time implements TextMarshaler, - // we don't want to treat it as a string for YAML - // purposes because YAML has special support for - // timestamps. - case Marshaler: - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - case encoding.TextMarshaler: - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.Type() == ptrTimeType { - e.timev(tag, in.Elem()) - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - if in.Type() == timeType { - e.timev(tag, in) - } else { - e.structv(tag, in) - } - case reflect.Slice, reflect.Array: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = yaml_BINARY_TAG - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/parserc.go b/constraint/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 81d05dfe5..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1095 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/readerc.go b/constraint/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index 7c1f5fac3..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,412 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/resolve.go b/constraint/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 4120e0c91..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,258 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - case yaml_FLOAT_TAG: - if rtag == yaml_INT_TAG { - switch v := out.(type) { - case int64: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - case int: - rtag = yaml_FLOAT_TAG - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == yaml_TIMESTAMP_TAG { - t, ok := parseTimestamp(in) - if ok { - return yaml_TIMESTAMP_TAG, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - } - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - return yaml_STR_TAG, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/scannerc.go b/constraint/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index 0b9bb6030..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2711 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - if parser.tokens_head != len(parser.tokens) { - // If queue is non-empty, check if any potential simple key may - // occupy the head position. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the indentation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/sorter.go b/constraint/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 4c45e660a..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,113 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/writerc.go b/constraint/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index a2dde608c..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,26 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/yaml.go b/constraint/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index 30813884c..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members, or mapping -// keys that are duplicates, will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - strict bool - parser *parser -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// SetStrict sets whether strict decoding behaviour is enabled when -// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. -func (dec *Decoder) SetStrict(strict bool) { - dec.strict = strict -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder(dec.strict) - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder(strict) - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -// FutureLineWrap globally disables line wrapping when encoding long strings. -// This is a temporary and thus deprecated method introduced to faciliate -// migration towards v3, which offers more control of line lengths on -// individual encodings, and has a default matching the behavior introduced -// by this function. -// -// The default formatting of v2 was erroneously changed in v2.3.0 and reverted -// in v2.4.0, at which point this function was introduced to help migration. -func FutureLineWrap() { - disableLineWrapping = true -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/yamlh.go b/constraint/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index f6a9c8e34..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,739 +0,0 @@ -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/constraint/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/constraint/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c3..000000000 --- a/constraint/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/constraint/vendor/k8s.io/api/admission/v1/doc.go b/constraint/vendor/k8s.io/api/admission/v1/doc.go index cbc6bb59d..e7df9f629 100644 --- a/constraint/vendor/k8s.io/api/admission/v1/doc.go +++ b/constraint/vendor/k8s.io/api/admission/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=false - +// +k8s:prerelease-lifecycle-gen=true // +groupName=admission.k8s.io package v1 // import "k8s.io/api/admission/v1" diff --git a/constraint/vendor/k8s.io/api/admission/v1/generated.proto b/constraint/vendor/k8s.io/api/admission/v1/generated.proto index 941deb4fb..9648aa58f 100644 --- a/constraint/vendor/k8s.io/api/admission/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/admission/v1/generated.proto @@ -38,10 +38,10 @@ message AdmissionRequest { optional string uid = 1; // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; // SubResource is the subresource being requested, if any (for example, "status" or "scale") // +optional @@ -58,7 +58,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type for more details. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. @@ -71,7 +71,7 @@ message AdmissionRequest { // // See documentation for the "matchPolicy" field in the webhook configuration type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. @@ -93,15 +93,15 @@ message AdmissionRequest { optional string operation = 7; // UserInfo is information about the requesting user - optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8; // Object is the object from the incoming request. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; // OldObject is the existing object. Only populated for DELETE and UPDATE requests. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; // DryRun indicates that modifications will definitely not be persisted for this request. // Defaults to false. @@ -114,7 +114,7 @@ message AdmissionRequest { // Operation might be a CREATE, in which case the Options will a // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; } // AdmissionResponse describes an admission response. @@ -129,7 +129,7 @@ message AdmissionResponse { // Result contains extra details into why an admission request was denied. // This field IS NOT consulted in any way if "Allowed" is "true". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. // +optional diff --git a/constraint/vendor/k8s.io/api/admission/v1/types.go b/constraint/vendor/k8s.io/api/admission/v1/types.go index 556fd1ad5..2def92da5 100644 --- a/constraint/vendor/k8s.io/api/admission/v1/types.go +++ b/constraint/vendor/k8s.io/api/admission/v1/types.go @@ -24,6 +24,7 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // AdmissionReview describes an admission review request/response. type AdmissionReview struct { diff --git a/constraint/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..ac81d993c --- /dev/null +++ b/constraint/vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,28 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/doc.go b/constraint/vendor/k8s.io/api/admission/v1beta1/doc.go deleted file mode 100644 index a5669022a..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=false -// +k8s:prerelease-lifecycle-gen=true - -// +groupName=admission.k8s.io - -package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/admission/v1beta1/generated.pb.go deleted file mode 100644 index 22147cbe9..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/generated.pb.go +++ /dev/null @@ -1,1782 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/admission/v1beta1/generated.proto - -package v1beta1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} } -func (*AdmissionRequest) ProtoMessage() {} -func (*AdmissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{0} -} -func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionRequest.Merge(m, src) -} -func (m *AdmissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AdmissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo - -func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} } -func (*AdmissionResponse) ProtoMessage() {} -func (*AdmissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{1} -} -func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionResponse.Merge(m, src) -} -func (m *AdmissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AdmissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo - -func (m *AdmissionReview) Reset() { *m = AdmissionReview{} } -func (*AdmissionReview) ProtoMessage() {} -func (*AdmissionReview) Descriptor() ([]byte, []int) { - return fileDescriptor_d8f147b43c61e73e, []int{2} -} -func (m *AdmissionReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AdmissionReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_AdmissionReview.Merge(m, src) -} -func (m *AdmissionReview) XXX_Size() int { - return m.Size() -} -func (m *AdmissionReview) XXX_DiscardUnknown() { - xxx_messageInfo_AdmissionReview.DiscardUnknown(m) -} - -var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest") - proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry") - proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview") -} - -func init() { - proto.RegisterFile("k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_d8f147b43c61e73e) -} - -var fileDescriptor_d8f147b43c61e73e = []byte{ - // 911 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xaa, 0xb5, 0xc9, 0x01, - 0x19, 0xa9, 0x9d, 0x25, 0x11, 0x54, 0x51, 0xc5, 0x25, 0x4b, 0x22, 0x14, 0x90, 0x9a, 0x68, 0x5a, - 0x43, 0xe1, 0x80, 0x34, 0xb6, 0xa7, 0xf6, 0x60, 0x7b, 0x66, 0xd9, 0x99, 0x4d, 0xf0, 0x8d, 0x3b, - 0x17, 0xbe, 0x01, 0x5f, 0x80, 0x6f, 0xc1, 0x25, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39, - 0xa1, 0x99, 0x9d, 0xf5, 0x3a, 0x4e, 0x52, 0xfa, 0xef, 0x94, 0x7d, 0x7f, 0x7e, 0xbf, 0xf7, 0xf2, - 0x7b, 0xfb, 0xde, 0x1a, 0xdc, 0x1f, 0xef, 0x4a, 0xc4, 0x44, 0x40, 0x22, 0x16, 0x90, 0xc1, 0x94, - 0x49, 0xc9, 0x04, 0x0f, 0x4e, 0xb6, 0x7b, 0x54, 0x91, 0xed, 0x60, 0x48, 0x39, 0x8d, 0x89, 0xa2, - 0x03, 0x14, 0xc5, 0x42, 0x09, 0x78, 0x2f, 0xcd, 0x46, 0x24, 0x62, 0x68, 0x99, 0x8d, 0x6c, 0x76, - 0xf3, 0xc1, 0x90, 0xa9, 0x51, 0xd2, 0x43, 0x7d, 0x31, 0x0d, 0x86, 0x62, 0x28, 0x02, 0x03, 0xea, - 0x25, 0xcf, 0x8d, 0x65, 0x0c, 0xf3, 0x94, 0x92, 0x35, 0x2f, 0x95, 0x4e, 0xd4, 0x88, 0x72, 0xc5, - 0xfa, 0x44, 0xa5, 0xf5, 0xd7, 0x4b, 0x37, 0x3f, 0xcf, 0xb3, 0xa7, 0xa4, 0x3f, 0x62, 0x9c, 0xc6, - 0xb3, 0x20, 0x1a, 0x0f, 0xb5, 0x43, 0x06, 0x53, 0xaa, 0xc8, 0x75, 0xa8, 0xe0, 0x26, 0x54, 0x9c, - 0x70, 0xc5, 0xa6, 0xf4, 0x0a, 0xe0, 0xe1, 0xff, 0x01, 0x64, 0x7f, 0x44, 0xa7, 0x64, 0x1d, 0xb7, - 0xf5, 0xa7, 0x0b, 0x1a, 0x7b, 0x99, 0x22, 0x98, 0xfe, 0x92, 0x50, 0xa9, 0x60, 0x08, 0x8a, 0x09, - 0x1b, 0x78, 0x4e, 0xdb, 0xe9, 0xb8, 0xe1, 0x67, 0x67, 0xf3, 0x56, 0x61, 0x31, 0x6f, 0x15, 0xbb, - 0x87, 0xfb, 0x17, 0xf3, 0xd6, 0xc7, 0x37, 0x15, 0x52, 0xb3, 0x88, 0x4a, 0xd4, 0x3d, 0xdc, 0xc7, - 0x1a, 0x0c, 0x9f, 0x81, 0xd2, 0x98, 0xf1, 0x81, 0x77, 0xab, 0xed, 0x74, 0x6a, 0x3b, 0x0f, 0x51, - 0x3e, 0x81, 0x25, 0x0c, 0x45, 0xe3, 0xa1, 0x76, 0x48, 0xa4, 0x65, 0x40, 0x27, 0xdb, 0xe8, 0xeb, - 0x58, 0x24, 0xd1, 0x77, 0x34, 0xd6, 0xcd, 0x7c, 0xcb, 0xf8, 0x20, 0xdc, 0xb4, 0xc5, 0x4b, 0xda, - 0xc2, 0x86, 0x11, 0x8e, 0x40, 0x35, 0xa6, 0x52, 0x24, 0x71, 0x9f, 0x7a, 0x45, 0xc3, 0xfe, 0xe8, - 0xcd, 0xd9, 0xb1, 0x65, 0x08, 0x1b, 0xb6, 0x42, 0x35, 0xf3, 0xe0, 0x25, 0x3b, 0xfc, 0x02, 0xd4, - 0x64, 0xd2, 0xcb, 0x02, 0x5e, 0xc9, 0xe8, 0x71, 0xd7, 0x02, 0x6a, 0x4f, 0xf2, 0x10, 0x5e, 0xcd, - 0x83, 0x0c, 0xd4, 0xe2, 0x54, 0x49, 0xdd, 0xb5, 0xf7, 0xc1, 0x3b, 0x29, 0x50, 0xd7, 0xa5, 0x70, - 0x4e, 0x87, 0x57, 0xb9, 0xe1, 0x0c, 0xd4, 0xad, 0xb9, 0xec, 0xf2, 0xf6, 0x3b, 0x4b, 0x72, 0x77, - 0x31, 0x6f, 0xd5, 0xf1, 0x65, 0x5a, 0xbc, 0x5e, 0x07, 0x7e, 0x03, 0xa0, 0x75, 0xad, 0x08, 0xe1, - 0xd5, 0x8d, 0x46, 0x4d, 0xab, 0x11, 0xc4, 0x57, 0x32, 0xf0, 0x35, 0x28, 0xd8, 0x06, 0x25, 0x4e, - 0xa6, 0xd4, 0xdb, 0x30, 0xe8, 0xe5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x26, 0x02, 0x03, 0xe0, 0xea, - 0xbf, 0x32, 0x22, 0x7d, 0xea, 0x95, 0x4d, 0xda, 0x1d, 0x9b, 0xe6, 0x3e, 0xce, 0x02, 0x38, 0xcf, - 0x81, 0x5f, 0x02, 0x57, 0x44, 0xfa, 0x55, 0x67, 0x82, 0x7b, 0x15, 0x03, 0xf0, 0x33, 0xc0, 0x51, - 0x16, 0xb8, 0x58, 0x35, 0x70, 0x0e, 0x80, 0x4f, 0x41, 0x35, 0x91, 0x34, 0x3e, 0xe4, 0xcf, 0x85, - 0x57, 0x35, 0x82, 0x7e, 0x82, 0x56, 0x6f, 0xc8, 0xa5, 0xb5, 0xd7, 0x42, 0x76, 0x6d, 0x76, 0xfe, - 0x3e, 0x65, 0x1e, 0xbc, 0x64, 0x82, 0x5d, 0x50, 0x16, 0xbd, 0x9f, 0x69, 0x5f, 0x79, 0xae, 0xe1, - 0x7c, 0x70, 0xe3, 0x90, 0xec, 0xd6, 0x22, 0x4c, 0x4e, 0x0f, 0x7e, 0x55, 0x94, 0xeb, 0xf9, 0x84, - 0xb7, 0x2d, 0x75, 0xf9, 0xc8, 0x90, 0x60, 0x4b, 0x06, 0x7f, 0x02, 0xae, 0x98, 0x0c, 0x52, 0xa7, - 0x07, 0xde, 0x86, 0x79, 0x29, 0xe5, 0x51, 0xc6, 0x83, 0x73, 0x4a, 0xb8, 0x05, 0xca, 0x83, 0x78, - 0x86, 0x13, 0xee, 0xd5, 0xda, 0x4e, 0xa7, 0x1a, 0x02, 0xdd, 0xc3, 0xbe, 0xf1, 0x60, 0x1b, 0x81, - 0xcf, 0x40, 0x45, 0x44, 0x5a, 0x0c, 0xe9, 0x6d, 0xbe, 0x4d, 0x07, 0x75, 0xdb, 0x41, 0xe5, 0x28, - 0x65, 0xc1, 0x19, 0xdd, 0xd6, 0x5f, 0x25, 0x70, 0x67, 0xe5, 0x42, 0xc9, 0x48, 0x70, 0x49, 0xdf, - 0xcb, 0x89, 0xfa, 0x14, 0x54, 0xc8, 0x64, 0x22, 0x4e, 0x69, 0x7a, 0xa5, 0xaa, 0x79, 0x13, 0x7b, - 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x0c, 0xca, 0x52, 0x11, 0x95, 0x48, 0x7b, 0x71, 0xee, 0xbf, 0xde, - 0x7a, 0x3d, 0x31, 0x98, 0x54, 0x30, 0x4c, 0x65, 0x32, 0x51, 0xd8, 0xf2, 0xc0, 0x16, 0xd8, 0x88, - 0x88, 0xea, 0x8f, 0xcc, 0x55, 0xd9, 0x0c, 0xdd, 0xc5, 0xbc, 0xb5, 0x71, 0xac, 0x1d, 0x38, 0xf5, - 0xc3, 0x5d, 0xe0, 0x9a, 0x87, 0xa7, 0xb3, 0x28, 0x5b, 0x8c, 0xa6, 0x1e, 0xd1, 0x71, 0xe6, 0xbc, - 0x58, 0x35, 0x70, 0x9e, 0x0c, 0x7f, 0x77, 0x40, 0x83, 0x24, 0x03, 0xa6, 0xf6, 0x38, 0x17, 0x8a, - 0xa4, 0x53, 0x29, 0xb7, 0x8b, 0x9d, 0xda, 0xce, 0x01, 0x7a, 0xd5, 0x97, 0x10, 0x5d, 0xd1, 0x19, - 0xed, 0xad, 0xf1, 0x1c, 0x70, 0x15, 0xcf, 0x42, 0xcf, 0x0a, 0xd5, 0x58, 0x0f, 0xe3, 0x2b, 0x85, - 0x61, 0x07, 0x54, 0x4f, 0x49, 0xcc, 0x19, 0x1f, 0x4a, 0xaf, 0xd2, 0x2e, 0xea, 0xfd, 0xd6, 0xeb, - 0xf1, 0xbd, 0xf5, 0xe1, 0x65, 0xb4, 0xf9, 0x15, 0xf8, 0xe8, 0xda, 0x72, 0xb0, 0x01, 0x8a, 0x63, - 0x3a, 0x4b, 0x87, 0x8d, 0xf5, 0x23, 0xfc, 0x10, 0x6c, 0x9c, 0x90, 0x49, 0x42, 0xcd, 0xe0, 0x5c, - 0x9c, 0x1a, 0x8f, 0x6e, 0xed, 0x3a, 0x5b, 0x7f, 0x3b, 0xa0, 0xbe, 0xf2, 0x6f, 0x9c, 0x30, 0x7a, - 0x0a, 0xbb, 0xa0, 0x62, 0x8f, 0x8e, 0xe1, 0xa8, 0xed, 0xa0, 0xd7, 0x96, 0xc1, 0xa0, 0xc2, 0x9a, - 0x7e, 0x29, 0xb2, 0x8b, 0x98, 0x71, 0xc1, 0x1f, 0xcc, 0x87, 0xc8, 0xe8, 0x64, 0x3f, 0x73, 0xc1, - 0x1b, 0xca, 0x9b, 0x4a, 0x91, 0x59, 0x78, 0x49, 0x17, 0x86, 0x67, 0xe7, 0x7e, 0xe1, 0xc5, 0xb9, - 0x5f, 0x78, 0x79, 0xee, 0x17, 0x7e, 0x5b, 0xf8, 0xce, 0xd9, 0xc2, 0x77, 0x5e, 0x2c, 0x7c, 0xe7, - 0xe5, 0xc2, 0x77, 0xfe, 0x59, 0xf8, 0xce, 0x1f, 0xff, 0xfa, 0x85, 0x1f, 0xef, 0xbd, 0xea, 0x47, - 0xd0, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x6e, 0x31, 0x41, 0x23, 0x09, 0x00, 0x00, -} - -func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.RequestSubResource) - copy(dAtA[i:], m.RequestSubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource))) - i-- - dAtA[i] = 0x7a - if m.RequestResource != nil { - { - size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.RequestKind != nil { - { - size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - if m.DryRun != nil { - i-- - if *m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - { - size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - { - size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x3a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x32 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x2a - i -= len(m.SubResource) - copy(dAtA[i:], m.SubResource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource))) - i-- - dAtA[i] = 0x22 - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Warnings) > 0 { - for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Warnings[iNdEx]) - copy(dAtA[i:], m.Warnings[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warnings[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if len(m.AuditAnnotations) > 0 { - keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) - for k := range m.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAuditAnnotations[iNdEx]) - copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 - } - } - if m.PatchType != nil { - i -= len(*m.PatchType) - copy(dAtA[i:], *m.PatchType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType))) - i-- - dAtA[i] = 0x2a - } - if m.Patch != nil { - i -= len(m.Patch) - copy(dAtA[i:], m.Patch) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) - i-- - dAtA[i] = 0x22 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i-- - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AdmissionReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Request != nil { - { - size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AdmissionRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Kind.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubResource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UserInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.OldObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DryRun != nil { - n += 2 - } - l = m.Options.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RequestKind != nil { - l = m.RequestKind.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RequestResource != nil { - l = m.RequestResource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.RequestSubResource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AdmissionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Patch != nil { - l = len(m.Patch) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PatchType != nil { - l = len(*m.PatchType) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AuditAnnotations) > 0 { - for k, v := range m.AuditAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Warnings) > 0 { - for _, s := range m.Warnings { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AdmissionReview) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Request != nil { - l = m.Request.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AdmissionRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionRequest{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`, - `SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`, - `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `DryRun:` + valueToStringGenerated(this.DryRun) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`, - `RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`, - `RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionResponse) String() string { - if this == nil { - return "nil" - } - keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) - for k := range this.AuditAnnotations { - keysForAuditAnnotations = append(keysForAuditAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) - mapStringForAuditAnnotations := "map[string]string{" - for _, k := range keysForAuditAnnotations { - mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) - } - mapStringForAuditAnnotations += "}" - s := strings.Join([]string{`&AdmissionResponse{`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`, - `Patch:` + valueToStringGenerated(this.Patch) + `,`, - `PatchType:` + valueToStringGenerated(this.PatchType) + `,`, - `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, - `Warnings:` + fmt.Sprintf("%v", this.Warnings) + `,`, - `}`, - }, "") - return s -} -func (this *AdmissionReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionReview{`, - `Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`, - `Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AdmissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = Operation(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DryRun = &b - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestKind == nil { - m.RequestKind = &v1.GroupVersionKind{} - } - if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestResource == nil { - m.RequestResource = &v1.GroupVersionResource{} - } - if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequestSubResource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &v1.Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...) - if m.Patch == nil { - m.Patch = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PatchType(dAtA[iNdEx:postIndex]) - m.PatchType = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuditAnnotations == nil { - m.AuditAnnotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.AuditAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Warnings = append(m.Warnings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AdmissionReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Request == nil { - m.Request = &AdmissionRequest{} - } - if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Response == nil { - m.Response = &AdmissionResponse{} - } - if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/admission/v1beta1/generated.proto deleted file mode 100644 index ff0fa46d2..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/generated.proto +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.admission.v1beta1; - -import "k8s.io/api/authentication/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/admission/v1beta1"; - -// AdmissionRequest describes the admission.Attributes for the admission request. -message AdmissionRequest { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - optional string uid = 1; - - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2; - - // Resource is the fully-qualified resource being requested (for example, v1.pods) - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3; - - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - optional string subResource = 4; - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13; - - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14; - - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - optional string requestSubResource = 15; - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - optional string name = 5; - - // Namespace is the namespace associated with the request (if any). - // +optional - optional string namespace = 6; - - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - optional string operation = 7; - - // UserInfo is information about the requesting user - optional k8s.io.api.authentication.v1.UserInfo userInfo = 8; - - // Object is the object from the incoming request. - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9; - - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10; - - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - optional bool dryRun = 11; - - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12; -} - -// AdmissionResponse describes an admission response. -message AdmissionResponse { - // UID is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. - optional string uid = 1; - - // Allowed indicates whether or not the admission request was permitted. - optional bool allowed = 2; - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3; - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - optional bytes patch = 4; - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - optional string patchType = 5; - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - map auditAnnotations = 6; - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - repeated string warnings = 7; -} - -// AdmissionReview describes an admission review request/response. -message AdmissionReview { - // Request describes the attributes for the admission request. - // +optional - optional AdmissionRequest request = 1; - - // Response describes the attributes for the admission response. - // +optional - optional AdmissionResponse response = 2; -} - diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/register.go b/constraint/vendor/k8s.io/api/admission/v1beta1/register.go deleted file mode 100644 index 1c53e755d..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "admission.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. -// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. -var ( - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &AdmissionReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/types.go b/constraint/vendor/k8s.io/api/admission/v1beta1/types.go deleted file mode 100644 index 00c619d99..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/types.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.9 -// +k8s:prerelease-lifecycle-gen:deprecated=1.19 -// This API is never server served. It is used for outbound requests from apiservers. This will ensure it never gets served accidentally -// and having the generator against this group will protect future APIs which may be served. -// +k8s:prerelease-lifecycle-gen:replacement=admission.k8s.io,v1,AdmissionReview - -// AdmissionReview describes an admission review request/response. -type AdmissionReview struct { - metav1.TypeMeta `json:",inline"` - // Request describes the attributes for the admission request. - // +optional - Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"` - // Response describes the attributes for the admission response. - // +optional - Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"` -} - -// AdmissionRequest describes the admission.Attributes for the admission request. -type AdmissionRequest struct { - // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are - // otherwise identical (parallel requests, requests when earlier requests did not modify etc) - // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. - // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale) - Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Resource is the fully-qualified resource being requested (for example, v1.pods) - Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"` - // SubResource is the subresource being requested, if any (for example, "status" or "scale") - // +optional - SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"` - - // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). - // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for), - // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type for more details. - // +optional - RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"` - // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). - // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed. - // - // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of - // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`, - // an API request to apps/v1beta1 deployments would be converted and sent to the webhook - // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for), - // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request). - // - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"` - // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale") - // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed. - // See documentation for the "matchPolicy" field in the webhook configuration type. - // +optional - RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"` - - // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and - // rely on the server to generate the name. If that is the case, this field will contain an empty string. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` - // Namespace is the namespace associated with the request (if any). - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"` - // Operation is the operation being performed. This may be different than the operation - // requested. e.g. a patch can result in either a CREATE or UPDATE Operation. - Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"` - // UserInfo is information about the requesting user - UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"` - // Object is the object from the incoming request. - // +optional - Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"` - // OldObject is the existing object. Only populated for DELETE and UPDATE requests. - // +optional - OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"` - // DryRun indicates that modifications will definitely not be persisted for this request. - // Defaults to false. - // +optional - DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"` - // Options is the operation option structure of the operation being performed. - // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be - // different than the options the caller provided. e.g. for a patch request the performed - // Operation might be a CREATE, in which case the Options will a - // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`. - // +optional - Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"` -} - -// AdmissionResponse describes an admission response. -type AdmissionResponse struct { - // UID is an identifier for the individual request/response. - // This should be copied over from the corresponding AdmissionRequest. - UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"` - - // Allowed indicates whether or not the admission request was permitted. - Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` - - // Result contains extra details into why an admission request was denied. - // This field IS NOT consulted in any way if "Allowed" is "true". - // +optional - Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` - - // The patch body. Currently we only support "JSONPatch" which implements RFC 6902. - // +optional - Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"` - - // The type of Patch. Currently we only allow "JSONPatch". - // +optional - PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"` - - // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). - // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with - // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by - // the admission webhook to add additional context to the audit log for this request. - // +optional - AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"` - - // warnings is a list of warning messages to return to the requesting API client. - // Warning messages describe a problem the client making the API request should correct or be aware of. - // Limit warnings to 120 characters if possible. - // Warnings over 256 characters and large numbers of warnings may be truncated. - // +optional - Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"` -} - -// PatchType is the type of patch being used to represent the mutated object -type PatchType string - -// PatchType constants. -const ( - PatchTypeJSONPatch PatchType = "JSONPatch" -) - -// Operation is the type of resource operation being checked for admission control -type Operation string - -// Operation constants -const ( - Create Operation = "CREATE" - Update Operation = "UPDATE" - Delete Operation = "DELETE" - Connect Operation = "CONNECT" -) diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 82598ed57..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AdmissionRequest = map[string]string{ - "": "AdmissionRequest describes the admission.Attributes for the admission request.", - "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.", - "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)", - "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)", - "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")", - "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.", - "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.", - "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.", - "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.", - "namespace": "Namespace is the namespace associated with the request (if any).", - "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.", - "userInfo": "UserInfo is information about the requesting user", - "object": "Object is the object from the incoming request.", - "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.", - "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.", - "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.", -} - -func (AdmissionRequest) SwaggerDoc() map[string]string { - return map_AdmissionRequest -} - -var map_AdmissionResponse = map[string]string{ - "": "AdmissionResponse describes an admission response.", - "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.", - "allowed": "Allowed indicates whether or not the admission request was permitted.", - "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".", - "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.", - "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".", - "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.", - "warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.", -} - -func (AdmissionResponse) SwaggerDoc() map[string]string { - return map_AdmissionResponse -} - -var map_AdmissionReview = map[string]string{ - "": "AdmissionReview describes an admission review request/response.", - "request": "Request describes the attributes for the admission request.", - "response": "Response describes the attributes for the admission response.", -} - -func (AdmissionReview) SwaggerDoc() map[string]string { - return map_AdmissionReview -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 8234b322f..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,142 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) { - *out = *in - out.Kind = in.Kind - out.Resource = in.Resource - if in.RequestKind != nil { - in, out := &in.RequestKind, &out.RequestKind - *out = new(v1.GroupVersionKind) - **out = **in - } - if in.RequestResource != nil { - in, out := &in.RequestResource, &out.RequestResource - *out = new(v1.GroupVersionResource) - **out = **in - } - in.UserInfo.DeepCopyInto(&out.UserInfo) - in.Object.DeepCopyInto(&out.Object) - in.OldObject.DeepCopyInto(&out.OldObject) - if in.DryRun != nil { - in, out := &in.DryRun, &out.DryRun - *out = new(bool) - **out = **in - } - in.Options.DeepCopyInto(&out.Options) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest. -func (in *AdmissionRequest) DeepCopy() *AdmissionRequest { - if in == nil { - return nil - } - out := new(AdmissionRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) { - *out = *in - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(v1.Status) - (*in).DeepCopyInto(*out) - } - if in.Patch != nil { - in, out := &in.Patch, &out.Patch - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.PatchType != nil { - in, out := &in.PatchType, &out.PatchType - *out = new(PatchType) - **out = **in - } - if in.AuditAnnotations != nil { - in, out := &in.AuditAnnotations, &out.AuditAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Warnings != nil { - in, out := &in.Warnings, &out.Warnings - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse. -func (in *AdmissionResponse) DeepCopy() *AdmissionResponse { - if in == nil { - return nil - } - out := new(AdmissionResponse) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = new(AdmissionRequest) - (*in).DeepCopyInto(*out) - } - if in.Response != nil { - in, out := &in.Response, &out.Response - *out = new(AdmissionResponse) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview. -func (in *AdmissionReview) DeepCopy() *AdmissionReview { - if in == nil { - return nil - } - out := new(AdmissionReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AdmissionReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go deleted file mode 100644 index f96e8a443..000000000 --- a/constraint/vendor/k8s.io/api/admission/v1beta1/zz_generated.prerelease-lifecycle.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. - -package v1beta1 - -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *AdmissionReview) APILifecycleIntroduced() (major, minor int) { - return 1, 9 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *AdmissionReview) APILifecycleDeprecated() (major, minor int) { - return 1, 19 -} - -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *AdmissionReview) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "admission.k8s.io", Version: "v1", Kind: "AdmissionReview"} -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *AdmissionReview) APILifecycleRemoved() (major, minor int) { - return 1, 22 -} diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1/doc.go b/constraint/vendor/k8s.io/api/admissionregistration/v1/doc.go index c3940f090..ca0086188 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1/doc.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1 is the v1 version of the API. diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/constraint/vendor/k8s.io/api/admissionregistration/v1/generated.proto index 44589007a..e856e9eaf 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1/generated.proto @@ -156,7 +156,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -170,7 +170,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -290,7 +290,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -304,7 +304,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -374,7 +374,7 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -390,7 +390,7 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; @@ -463,7 +463,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -570,16 +570,11 @@ message TypeChecking { repeated ExpressionWarning expressionWarnings = 1; } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -606,7 +601,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -617,7 +612,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -688,14 +683,12 @@ message ValidatingAdmissionPolicyBindingSpec { repeated string validationActions = 4; } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -800,7 +793,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -891,7 +884,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -905,7 +898,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). @@ -957,7 +950,7 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -973,7 +966,7 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1/types.go b/constraint/vendor/k8s.io/api/admissionregistration/v1/types.go index 0510712b2..4efeb2674 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1/types.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1/types.go @@ -137,6 +137,7 @@ const ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.30 + // ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it. type ValidatingAdmissionPolicy struct { metav1.TypeMeta `json:",inline"` @@ -195,6 +196,7 @@ type ExpressionWarning struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.30 + // ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy. type ValidatingAdmissionPolicyList struct { metav1.TypeMeta `json:",inline"` @@ -203,7 +205,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -464,7 +466,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. @@ -713,6 +715,7 @@ type NamedRuleWithOperations struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. type ValidatingWebhookConfiguration struct { @@ -730,6 +733,7 @@ type ValidatingWebhookConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. type ValidatingWebhookConfigurationList struct { @@ -745,6 +749,7 @@ type ValidatingWebhookConfigurationList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. type MutatingWebhookConfiguration struct { @@ -762,6 +767,7 @@ type MutatingWebhookConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. type MutatingWebhookConfigurationList struct { diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..0862bb1f2 --- /dev/null +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 385c60e0d..98066211d 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 111cc7287..993ff6f20 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -25,6 +25,7 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1" k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,10 +46,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} } +func (*ApplyConfiguration) ProtoMessage() {} +func (*ApplyConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{0} +} +func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplyConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyConfiguration.Merge(m, src) +} +func (m *ApplyConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ApplyConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo + func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} } func (*AuditAnnotation) ProtoMessage() {} func (*AuditAnnotation) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{0} + return fileDescriptor_2c49182728ae0af5, []int{1} } func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -76,7 +105,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} } func (*ExpressionWarning) ProtoMessage() {} func (*ExpressionWarning) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{1} + return fileDescriptor_2c49182728ae0af5, []int{2} } func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,10 +130,38 @@ func (m *ExpressionWarning) XXX_DiscardUnknown() { var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo +func (m *JSONPatch) Reset() { *m = JSONPatch{} } +func (*JSONPatch) ProtoMessage() {} +func (*JSONPatch) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{3} +} +func (m *JSONPatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JSONPatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_JSONPatch.Merge(m, src) +} +func (m *JSONPatch) XXX_Size() int { + return m.Size() +} +func (m *JSONPatch) XXX_DiscardUnknown() { + xxx_messageInfo_JSONPatch.DiscardUnknown(m) +} + +var xxx_messageInfo_JSONPatch proto.InternalMessageInfo + func (m *MatchCondition) Reset() { *m = MatchCondition{} } func (*MatchCondition) ProtoMessage() {} func (*MatchCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{2} + return fileDescriptor_2c49182728ae0af5, []int{4} } func (m *MatchCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +189,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo func (m *MatchResources) Reset() { *m = MatchResources{} } func (*MatchResources) ProtoMessage() {} func (*MatchResources) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{3} + return fileDescriptor_2c49182728ae0af5, []int{5} } func (m *MatchResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,10 +214,206 @@ func (m *MatchResources) XXX_DiscardUnknown() { var xxx_messageInfo_MatchResources proto.InternalMessageInfo +func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} } +func (*MutatingAdmissionPolicy) ProtoMessage() {} +func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{6} +} +func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src) +} +func (m *MutatingAdmissionPolicy) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} } +func (*MutatingAdmissionPolicyBinding) ProtoMessage() {} +func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{7} +} +func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBinding) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} } +func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{8} +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} } +func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {} +func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{9} +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} } +func (*MutatingAdmissionPolicyList) ProtoMessage() {} +func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{10} +} +func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src) +} +func (m *MutatingAdmissionPolicyList) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo + +func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} } +func (*MutatingAdmissionPolicySpec) ProtoMessage() {} +func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{11} +} +func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src) +} +func (m *MutatingAdmissionPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo + +func (m *Mutation) Reset() { *m = Mutation{} } +func (*Mutation) ProtoMessage() {} +func (*Mutation) Descriptor() ([]byte, []int) { + return fileDescriptor_2c49182728ae0af5, []int{12} +} +func (m *Mutation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mutation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mutation.Merge(m, src) +} +func (m *Mutation) XXX_Size() int { + return m.Size() +} +func (m *Mutation) XXX_DiscardUnknown() { + xxx_messageInfo_Mutation.DiscardUnknown(m) +} + +var xxx_messageInfo_Mutation proto.InternalMessageInfo + func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} } func (*NamedRuleWithOperations) ProtoMessage() {} func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{4} + return fileDescriptor_2c49182728ae0af5, []int{13} } func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +441,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo func (m *ParamKind) Reset() { *m = ParamKind{} } func (*ParamKind) ProtoMessage() {} func (*ParamKind) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{5} + return fileDescriptor_2c49182728ae0af5, []int{14} } func (m *ParamKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +469,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo func (m *ParamRef) Reset() { *m = ParamRef{} } func (*ParamRef) ProtoMessage() {} func (*ParamRef) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{6} + return fileDescriptor_2c49182728ae0af5, []int{15} } func (m *ParamRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +497,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo func (m *TypeChecking) Reset() { *m = TypeChecking{} } func (*TypeChecking) ProtoMessage() {} func (*TypeChecking) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{7} + return fileDescriptor_2c49182728ae0af5, []int{16} } func (m *TypeChecking) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +525,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} } func (*ValidatingAdmissionPolicy) ProtoMessage() {} func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{8} + return fileDescriptor_2c49182728ae0af5, []int{17} } func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +553,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} } func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {} func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{9} + return fileDescriptor_2c49182728ae0af5, []int{18} } func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +581,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} } func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{10} + return fileDescriptor_2c49182728ae0af5, []int{19} } func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +609,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} } func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {} func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{11} + return fileDescriptor_2c49182728ae0af5, []int{20} } func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +637,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} } func (*ValidatingAdmissionPolicyList) ProtoMessage() {} func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{12} + return fileDescriptor_2c49182728ae0af5, []int{21} } func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +665,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} } func (*ValidatingAdmissionPolicySpec) ProtoMessage() {} func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{13} + return fileDescriptor_2c49182728ae0af5, []int{22} } func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +693,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} } func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {} func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{14} + return fileDescriptor_2c49182728ae0af5, []int{23} } func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +721,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo func (m *Validation) Reset() { *m = Validation{} } func (*Validation) ProtoMessage() {} func (*Validation) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{15} + return fileDescriptor_2c49182728ae0af5, []int{24} } func (m *Validation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +749,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo func (m *Variable) Reset() { *m = Variable{} } func (*Variable) ProtoMessage() {} func (*Variable) Descriptor() ([]byte, []int) { - return fileDescriptor_2c49182728ae0af5, []int{16} + return fileDescriptor_2c49182728ae0af5, []int{25} } func (m *Variable) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,10 +775,19 @@ func (m *Variable) XXX_DiscardUnknown() { var xxx_messageInfo_Variable proto.InternalMessageInfo func init() { + proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ApplyConfiguration") proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation") proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning") + proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1alpha1.JSONPatch") proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition") proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources") + proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy") + proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding") + proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingList") + proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec") + proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyList") + proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec") + proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Mutation") proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations") proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind") proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef") @@ -546,101 +808,147 @@ func init() { } var fileDescriptor_2c49182728ae0af5 = []byte{ - // 1498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0xc5, - 0x17, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0x73, 0xb1, 0xe7, 0xdf, 0x2a, 0x6e, 0xfe, 0xd4, 0x1b, 0xad, - 0x2a, 0xd4, 0x48, 0xb0, 0x26, 0x69, 0xa1, 0xb4, 0x42, 0x42, 0xd9, 0xde, 0xe8, 0x25, 0x17, 0x4d, - 0x51, 0x22, 0x21, 0x90, 0x98, 0xec, 0x4e, 0xec, 0x69, 0xbc, 0x17, 0x76, 0xd6, 0xa1, 0x11, 0x48, - 0x54, 0xe2, 0x05, 0xde, 0x78, 0xe0, 0x85, 0x57, 0x3e, 0x02, 0xdf, 0x80, 0xb7, 0x3e, 0xf6, 0xb1, - 0x3c, 0x60, 0x51, 0xf3, 0xc2, 0x27, 0x00, 0x29, 0x2f, 0xa0, 0x99, 0x9d, 0xbd, 0xda, 0x26, 0x76, - 0x09, 0xbc, 0x79, 0xce, 0x9c, 0xf3, 0xfb, 0xcd, 0x39, 0x73, 0xce, 0xd9, 0x33, 0x06, 0xd7, 0x0e, - 0xde, 0x66, 0x3a, 0x75, 0x1b, 0xd8, 0xa3, 0x0d, 0x6c, 0xd9, 0x94, 0x31, 0xea, 0x3a, 0x3e, 0x69, - 0x52, 0x16, 0xf8, 0x38, 0xa0, 0xae, 0xd3, 0x38, 0x5c, 0xc5, 0x6d, 0xaf, 0x85, 0x57, 0x1b, 0x4d, - 0xe2, 0x10, 0x1f, 0x07, 0xc4, 0xd2, 0x3d, 0xdf, 0x0d, 0x5c, 0xb8, 0x12, 0x9a, 0xea, 0xd8, 0xa3, - 0xfa, 0x40, 0x53, 0x3d, 0x32, 0x5d, 0x7a, 0xbd, 0x49, 0x83, 0x56, 0x67, 0x4f, 0x37, 0x5d, 0xbb, - 0xd1, 0x74, 0x9b, 0x6e, 0x43, 0x20, 0xec, 0x75, 0xf6, 0xc5, 0x4a, 0x2c, 0xc4, 0xaf, 0x10, 0x79, - 0xe9, 0xf2, 0x08, 0x87, 0xca, 0x1f, 0x67, 0xe9, 0x4a, 0x62, 0x64, 0x63, 0xb3, 0x45, 0x1d, 0xe2, - 0x1f, 0x35, 0xbc, 0x83, 0x26, 0x17, 0xb0, 0x86, 0x4d, 0x02, 0x3c, 0xc8, 0xaa, 0x31, 0xcc, 0xca, - 0xef, 0x38, 0x01, 0xb5, 0x49, 0x9f, 0xc1, 0x5b, 0x27, 0x19, 0x30, 0xb3, 0x45, 0x6c, 0x9c, 0xb7, - 0xd3, 0x18, 0x58, 0x58, 0xef, 0x58, 0x34, 0x58, 0x77, 0x1c, 0x37, 0x10, 0x4e, 0xc0, 0x0b, 0xa0, - 0x70, 0x40, 0x8e, 0x6a, 0xca, 0xb2, 0x72, 0xa9, 0x64, 0x94, 0x9f, 0x76, 0xd5, 0x89, 0x5e, 0x57, - 0x2d, 0xdc, 0x27, 0x47, 0x88, 0xcb, 0xe1, 0x3a, 0x58, 0x38, 0xc4, 0xed, 0x0e, 0xb9, 0xf5, 0xd8, - 0xf3, 0x89, 0x08, 0x41, 0x6d, 0x52, 0xa8, 0x2e, 0x4a, 0xd5, 0x85, 0x9d, 0xec, 0x36, 0xca, 0xeb, - 0x6b, 0x6d, 0x50, 0x4d, 0x56, 0xbb, 0xd8, 0x77, 0xa8, 0xd3, 0x84, 0xaf, 0x81, 0x99, 0x7d, 0x4a, - 0xda, 0x16, 0x22, 0xfb, 0x12, 0xb0, 0x22, 0x01, 0x67, 0x6e, 0x4b, 0x39, 0x8a, 0x35, 0xe0, 0x0a, - 0x98, 0xfe, 0x34, 0x34, 0xac, 0x15, 0x84, 0xf2, 0x82, 0x54, 0x9e, 0x96, 0x78, 0x28, 0xda, 0xd7, - 0xf6, 0xc1, 0xfc, 0x06, 0x0e, 0xcc, 0xd6, 0x0d, 0xd7, 0xb1, 0xa8, 0xf0, 0x70, 0x19, 0x14, 0x1d, - 0x6c, 0x13, 0xe9, 0xe2, 0xac, 0xb4, 0x2c, 0x6e, 0x62, 0x9b, 0x20, 0xb1, 0x03, 0xd7, 0x00, 0x20, - 0x79, 0xff, 0xa0, 0xd4, 0x03, 0x29, 0xd7, 0x52, 0x5a, 0xda, 0x4f, 0x45, 0x49, 0x84, 0x08, 0x73, - 0x3b, 0xbe, 0x49, 0x18, 0x7c, 0x0c, 0xaa, 0x1c, 0x8e, 0x79, 0xd8, 0x24, 0x0f, 0x49, 0x9b, 0x98, - 0x81, 0xeb, 0x0b, 0xd6, 0xf2, 0xda, 0x65, 0x3d, 0xc9, 0xd3, 0xf8, 0xc6, 0x74, 0xef, 0xa0, 0xc9, - 0x05, 0x4c, 0xe7, 0x89, 0xa1, 0x1f, 0xae, 0xea, 0x0f, 0xf0, 0x1e, 0x69, 0x47, 0xa6, 0xc6, 0xb9, - 0x5e, 0x57, 0xad, 0x6e, 0xe6, 0x11, 0x51, 0x3f, 0x09, 0x74, 0xc1, 0xbc, 0xbb, 0xf7, 0x88, 0x98, - 0x41, 0x4c, 0x3b, 0xf9, 0xf2, 0xb4, 0xb0, 0xd7, 0x55, 0xe7, 0xb7, 0x32, 0x70, 0x28, 0x07, 0x0f, - 0xbf, 0x00, 0x73, 0xbe, 0xf4, 0x1b, 0x75, 0xda, 0x84, 0xd5, 0x0a, 0xcb, 0x85, 0x4b, 0xe5, 0x35, - 0x43, 0x1f, 0xb9, 0x1c, 0x75, 0xee, 0x98, 0xc5, 0x8d, 0x77, 0x69, 0xd0, 0xda, 0xf2, 0x48, 0xb8, - 0xcf, 0x8c, 0x73, 0x32, 0xf0, 0x73, 0x28, 0x4d, 0x80, 0xb2, 0x7c, 0xf0, 0x5b, 0x05, 0x9c, 0x25, - 0x8f, 0xcd, 0x76, 0xc7, 0x22, 0x19, 0xbd, 0x5a, 0xf1, 0xd4, 0x0e, 0xf2, 0x8a, 0x3c, 0xc8, 0xd9, - 0x5b, 0x03, 0x78, 0xd0, 0x40, 0x76, 0x78, 0x13, 0x94, 0x6d, 0x9e, 0x14, 0xdb, 0x6e, 0x9b, 0x9a, - 0x47, 0xb5, 0x69, 0x91, 0x4a, 0x5a, 0xaf, 0xab, 0x96, 0x37, 0x12, 0xf1, 0x71, 0x57, 0x5d, 0x48, - 0x2d, 0xdf, 0x3f, 0xf2, 0x08, 0x4a, 0x9b, 0x69, 0xcf, 0x15, 0xb0, 0x38, 0xe4, 0x54, 0xf0, 0x6a, - 0x12, 0x79, 0x91, 0x1a, 0x35, 0x65, 0xb9, 0x70, 0xa9, 0x64, 0x54, 0xd3, 0x11, 0x13, 0x1b, 0x28, - 0xab, 0x07, 0xbf, 0x54, 0x00, 0xf4, 0xfb, 0xf0, 0x64, 0xa2, 0x5c, 0x1d, 0x25, 0x5e, 0xfa, 0x80, - 0x20, 0x2d, 0xc9, 0x20, 0xc1, 0xfe, 0x3d, 0x34, 0x80, 0x4e, 0xc3, 0xa0, 0xb4, 0x8d, 0x7d, 0x6c, - 0xdf, 0xa7, 0x8e, 0xc5, 0xeb, 0x0e, 0x7b, 0x74, 0x87, 0xf8, 0xa2, 0xee, 0x94, 0x6c, 0xdd, 0xad, - 0x6f, 0xdf, 0x95, 0x3b, 0x28, 0xa5, 0xc5, 0xab, 0xf9, 0x80, 0x3a, 0x96, 0xac, 0xd2, 0xb8, 0x9a, - 0x39, 0x1e, 0x12, 0x3b, 0xda, 0x0f, 0x93, 0x60, 0x46, 0x70, 0xf0, 0xce, 0x71, 0x72, 0xf1, 0x37, - 0x40, 0x29, 0x2e, 0x28, 0x89, 0x5a, 0x95, 0x6a, 0xa5, 0xb8, 0xf8, 0x50, 0xa2, 0x03, 0x3f, 0x02, - 0x33, 0x2c, 0x2a, 0xb3, 0xc2, 0xcb, 0x97, 0xd9, 0x2c, 0xef, 0x75, 0x71, 0x81, 0xc5, 0x90, 0x30, - 0x00, 0x8b, 0x1e, 0x3f, 0x3d, 0x09, 0x88, 0xbf, 0xe9, 0x06, 0xb7, 0xdd, 0x8e, 0x63, 0xad, 0x9b, - 0x3c, 0x7a, 0xb5, 0xa2, 0x38, 0xdd, 0xf5, 0x5e, 0x57, 0x5d, 0xdc, 0x1e, 0xac, 0x72, 0xdc, 0x55, - 0xff, 0x3f, 0x64, 0x4b, 0xa4, 0xd9, 0x30, 0x68, 0xed, 0x3b, 0x05, 0xcc, 0x72, 0x8d, 0x1b, 0x2d, - 0x62, 0x1e, 0xf0, 0x06, 0xfd, 0x95, 0x02, 0x20, 0xc9, 0xb7, 0xed, 0x30, 0xdb, 0xca, 0x6b, 0xef, - 0x8c, 0x51, 0x5e, 0x7d, 0xbd, 0x3f, 0xc9, 0x99, 0xbe, 0x2d, 0x86, 0x06, 0x70, 0x6a, 0x3f, 0x4f, - 0x82, 0xf3, 0x3b, 0xb8, 0x4d, 0x2d, 0x1c, 0x50, 0xa7, 0xb9, 0x1e, 0xd1, 0x85, 0xc5, 0x02, 0x3f, - 0x06, 0x33, 0x3c, 0xc0, 0x16, 0x0e, 0xb0, 0x6c, 0xb6, 0x6f, 0x8c, 0x76, 0x1d, 0x61, 0x8b, 0xdb, - 0x20, 0x01, 0x4e, 0x92, 0x2e, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x02, 0x45, 0xe6, 0x11, 0x53, 0x96, - 0xca, 0x7b, 0x63, 0xf8, 0x3e, 0xf4, 0xd4, 0x0f, 0x3d, 0x62, 0x26, 0xd9, 0xc8, 0x57, 0x48, 0x70, - 0x40, 0x1f, 0x4c, 0xb1, 0x00, 0x07, 0x1d, 0x26, 0x53, 0xeb, 0xde, 0xa9, 0xb0, 0x09, 0x44, 0x63, - 0x5e, 0xf2, 0x4d, 0x85, 0x6b, 0x24, 0x99, 0xb4, 0x3f, 0x14, 0xb0, 0x3c, 0xd4, 0xd6, 0xa0, 0x8e, - 0xc5, 0xf3, 0xe1, 0xdf, 0x0f, 0xf3, 0x27, 0x99, 0x30, 0x6f, 0x9d, 0x86, 0xe3, 0xf2, 0xf0, 0xc3, - 0xa2, 0xad, 0xfd, 0xae, 0x80, 0x8b, 0x27, 0x19, 0x3f, 0xa0, 0x2c, 0x80, 0x1f, 0xf6, 0x79, 0xaf, - 0x8f, 0x58, 0xf3, 0x94, 0x85, 0xbe, 0xc7, 0xe3, 0x4d, 0x24, 0x49, 0x79, 0xee, 0x81, 0x33, 0x34, - 0x20, 0x36, 0x6f, 0xc6, 0xbc, 0xba, 0xee, 0x9f, 0xa2, 0xeb, 0xc6, 0x9c, 0xe4, 0x3d, 0x73, 0x97, - 0x33, 0xa0, 0x90, 0x48, 0xfb, 0xba, 0x70, 0xb2, 0xe3, 0x3c, 0x4e, 0xbc, 0x45, 0x7b, 0x42, 0xb8, - 0x99, 0x74, 0xd1, 0xf8, 0x1a, 0xb7, 0xe3, 0x1d, 0x94, 0xd2, 0xe2, 0x0d, 0xd2, 0x93, 0xfd, 0x77, - 0xc0, 0x1c, 0x72, 0x92, 0x47, 0x51, 0xeb, 0x0e, 0x1b, 0x64, 0xb4, 0x42, 0x31, 0x24, 0xec, 0x80, - 0x79, 0x3b, 0x33, 0x78, 0xc9, 0x52, 0xb9, 0x36, 0x06, 0x49, 0x76, 0x72, 0x0b, 0x47, 0x9e, 0xac, - 0x0c, 0xe5, 0x48, 0xe0, 0x2e, 0xa8, 0x1e, 0xca, 0x88, 0xb9, 0x4e, 0xd8, 0x35, 0xc3, 0x69, 0xa3, - 0x64, 0xac, 0xf0, 0x41, 0x6d, 0x27, 0xbf, 0x79, 0xdc, 0x55, 0x2b, 0x79, 0x21, 0xea, 0xc7, 0xd0, - 0x7e, 0x53, 0xc0, 0x85, 0xa1, 0x77, 0xf1, 0x1f, 0x64, 0x1f, 0xcd, 0x66, 0xdf, 0xcd, 0x53, 0xc9, - 0xbe, 0xc1, 0x69, 0xf7, 0xfd, 0xd4, 0xdf, 0xb8, 0x2a, 0xf2, 0x0d, 0x83, 0x92, 0x17, 0xcd, 0x07, - 0xd2, 0xd7, 0x2b, 0xe3, 0x26, 0x0f, 0xb7, 0x35, 0xe6, 0xf8, 0xf7, 0x3b, 0x5e, 0xa2, 0x04, 0x15, - 0x7e, 0x06, 0x2a, 0xb6, 0x7c, 0x21, 0x70, 0x00, 0xea, 0x04, 0xd1, 0x14, 0xf4, 0x0f, 0x32, 0xe8, - 0x6c, 0xaf, 0xab, 0x56, 0x36, 0x72, 0xb0, 0xa8, 0x8f, 0x08, 0xb6, 0x41, 0x39, 0xc9, 0x80, 0x68, - 0x6c, 0x7e, 0xf3, 0x25, 0x42, 0xee, 0x3a, 0xc6, 0xff, 0x64, 0x8c, 0xcb, 0x89, 0x8c, 0xa1, 0x34, - 0x3c, 0x7c, 0x00, 0xe6, 0xf6, 0x31, 0x6d, 0x77, 0x7c, 0x22, 0x07, 0xd2, 0x70, 0x82, 0x78, 0x95, - 0x0f, 0x8b, 0xb7, 0xd3, 0x1b, 0xc7, 0x5d, 0xb5, 0x9a, 0x11, 0x88, 0x69, 0x21, 0x6b, 0x0c, 0x9f, - 0x28, 0xa0, 0x82, 0xb3, 0xcf, 0x47, 0x56, 0x3b, 0x23, 0x3c, 0xb8, 0x3e, 0x86, 0x07, 0xb9, 0x17, - 0xa8, 0x51, 0x93, 0x6e, 0x54, 0x72, 0x1b, 0x0c, 0xf5, 0xb1, 0xc1, 0xcf, 0xc1, 0x82, 0x9d, 0x79, - 0xdd, 0xb1, 0xda, 0x94, 0x38, 0xc0, 0xd8, 0x57, 0x17, 0x23, 0x24, 0x2f, 0xd9, 0xac, 0x9c, 0xa1, - 0x3c, 0x15, 0xb4, 0x40, 0xe9, 0x10, 0xfb, 0x14, 0xef, 0xf1, 0x87, 0xc6, 0xb4, 0xe0, 0xbd, 0x3c, - 0xd6, 0xd5, 0x85, 0xb6, 0xc9, 0x7c, 0x19, 0x49, 0x18, 0x4a, 0x80, 0xb5, 0x1f, 0x27, 0x81, 0x7a, - 0xc2, 0xa7, 0x1c, 0xde, 0x03, 0xd0, 0xdd, 0x63, 0xc4, 0x3f, 0x24, 0xd6, 0x9d, 0xf0, 0x8d, 0x1f, - 0x4d, 0xd0, 0x85, 0x64, 0xbc, 0xda, 0xea, 0xd3, 0x40, 0x03, 0xac, 0xa0, 0x0d, 0x66, 0x83, 0xd4, - 0xe4, 0x37, 0xce, 0x8b, 0x40, 0x3a, 0x96, 0x1e, 0x1c, 0x8d, 0x4a, 0xaf, 0xab, 0x66, 0x46, 0x49, - 0x94, 0x81, 0x87, 0x26, 0x00, 0x66, 0x72, 0x7b, 0x61, 0x01, 0x34, 0x46, 0x6b, 0x67, 0xc9, 0x9d, - 0xc5, 0x9f, 0xa0, 0xd4, 0x75, 0xa5, 0x60, 0xb5, 0x3f, 0x15, 0x00, 0x92, 0xaa, 0x80, 0x17, 0x41, - 0xea, 0x19, 0x2f, 0xbf, 0x62, 0x45, 0x0e, 0x81, 0x52, 0x72, 0xb8, 0x02, 0xa6, 0x6d, 0xc2, 0x18, - 0x6e, 0x46, 0xef, 0x80, 0xf8, 0x5f, 0x86, 0x8d, 0x50, 0x8c, 0xa2, 0x7d, 0xb8, 0x0b, 0xa6, 0x7c, - 0x82, 0x99, 0xeb, 0xc8, 0xff, 0x23, 0xde, 0xe5, 0x63, 0x15, 0x12, 0x92, 0xe3, 0xae, 0xba, 0x3a, - 0xca, 0xbf, 0x40, 0xba, 0x9c, 0xc2, 0x84, 0x11, 0x92, 0x70, 0xf0, 0x0e, 0xa8, 0x4a, 0x8e, 0xd4, - 0x81, 0xc3, 0xaa, 0x3d, 0x2f, 0x4f, 0x53, 0xdd, 0xc8, 0x2b, 0xa0, 0x7e, 0x1b, 0xed, 0x1e, 0x98, - 0x89, 0xb2, 0x0b, 0xd6, 0x40, 0x31, 0xf5, 0xf9, 0x0e, 0x1d, 0x17, 0x92, 0x5c, 0x60, 0x26, 0x07, - 0x07, 0xc6, 0xd8, 0x7a, 0xfa, 0xa2, 0x3e, 0xf1, 0xec, 0x45, 0x7d, 0xe2, 0xf9, 0x8b, 0xfa, 0xc4, - 0x93, 0x5e, 0x5d, 0x79, 0xda, 0xab, 0x2b, 0xcf, 0x7a, 0x75, 0xe5, 0x79, 0xaf, 0xae, 0xfc, 0xd2, - 0xab, 0x2b, 0xdf, 0xfc, 0x5a, 0x9f, 0xf8, 0x60, 0x65, 0xe4, 0x7f, 0xf1, 0xfe, 0x0a, 0x00, 0x00, - 0xff, 0xff, 0x22, 0xbd, 0xc5, 0xc7, 0xf1, 0x13, 0x00, 0x00, + // 1783 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x1b, 0x4b, + 0x15, 0xcf, 0xda, 0xce, 0x87, 0xc7, 0xf9, 0xf2, 0xd0, 0x12, 0x37, 0xa5, 0xde, 0x68, 0x55, 0xa1, + 0x46, 0x82, 0x35, 0x49, 0x0b, 0xa5, 0x55, 0x51, 0x95, 0x6d, 0x9b, 0xb6, 0x69, 0x9d, 0x44, 0x53, + 0x94, 0x20, 0x04, 0x12, 0x93, 0xf5, 0xc4, 0xde, 0xc6, 0xfb, 0xc1, 0xce, 0x3a, 0x34, 0x02, 0x89, + 0x4a, 0x08, 0x09, 0xde, 0x78, 0xe0, 0x85, 0x37, 0xc4, 0x1f, 0xc0, 0x03, 0xfc, 0x05, 0xbc, 0xf5, + 0xb1, 0x8f, 0xe5, 0x81, 0x15, 0x35, 0x20, 0xf1, 0x0c, 0xd2, 0xbd, 0x52, 0x5e, 0xee, 0xd5, 0xcc, + 0xce, 0x7e, 0x79, 0xed, 0xc6, 0x4e, 0xd3, 0xf4, 0xe1, 0xde, 0x37, 0xcf, 0xf9, 0xf8, 0x9d, 0x39, + 0x67, 0xce, 0x9c, 0x39, 0xc7, 0x0b, 0x6e, 0x1d, 0x7c, 0x97, 0xaa, 0x86, 0x5d, 0xc3, 0x8e, 0x51, + 0xc3, 0x0d, 0xd3, 0xa0, 0xd4, 0xb0, 0x2d, 0x97, 0x34, 0x0d, 0xea, 0xb9, 0xd8, 0x33, 0x6c, 0xab, + 0x76, 0xb8, 0x82, 0xdb, 0x4e, 0x0b, 0xaf, 0xd4, 0x9a, 0xc4, 0x22, 0x2e, 0xf6, 0x48, 0x43, 0x75, + 0x5c, 0xdb, 0xb3, 0xe1, 0x72, 0xa0, 0xaa, 0x62, 0xc7, 0x50, 0xfb, 0xaa, 0xaa, 0xa1, 0xea, 0xe2, + 0x37, 0x9b, 0x86, 0xd7, 0xea, 0xec, 0xa9, 0xba, 0x6d, 0xd6, 0x9a, 0x76, 0xd3, 0xae, 0x71, 0x84, + 0xbd, 0xce, 0x3e, 0x5f, 0xf1, 0x05, 0xff, 0x15, 0x20, 0x2f, 0x5e, 0x1f, 0x62, 0x53, 0xbd, 0xdb, + 0x59, 0xbc, 0x11, 0x2b, 0x99, 0x58, 0x6f, 0x19, 0x16, 0x71, 0x8f, 0x6a, 0xce, 0x41, 0x93, 0x11, + 0x68, 0xcd, 0x24, 0x1e, 0xee, 0xa7, 0x55, 0x1b, 0xa4, 0xe5, 0x76, 0x2c, 0xcf, 0x30, 0x49, 0x46, + 0xe1, 0x3b, 0x27, 0x29, 0x50, 0xbd, 0x45, 0x4c, 0xdc, 0xab, 0xa7, 0x3c, 0x02, 0x70, 0xcd, 0x71, + 0xda, 0x47, 0xf7, 0x6c, 0x6b, 0xdf, 0x68, 0x76, 0x02, 0x3f, 0xe0, 0x2a, 0x00, 0xe4, 0x85, 0xe3, + 0x12, 0xee, 0x61, 0x45, 0x5a, 0x92, 0xae, 0x15, 0x35, 0xf8, 0xca, 0x97, 0xc7, 0xba, 0xbe, 0x0c, + 0x1e, 0x44, 0x1c, 0x94, 0x90, 0x52, 0x28, 0x98, 0x5b, 0xeb, 0x34, 0x0c, 0x6f, 0xcd, 0xb2, 0x6c, + 0x2f, 0x80, 0xb9, 0x02, 0xf2, 0x07, 0xe4, 0x48, 0xe8, 0x97, 0x84, 0x7e, 0xfe, 0x09, 0x39, 0x42, + 0x8c, 0x0e, 0xd7, 0xc0, 0xdc, 0x21, 0x6e, 0x77, 0x48, 0x0c, 0x58, 0xc9, 0x71, 0xd1, 0x05, 0x21, + 0x3a, 0xb7, 0x93, 0x66, 0xa3, 0x5e, 0x79, 0xa5, 0x0d, 0xca, 0xf1, 0x6a, 0x17, 0xbb, 0x96, 0x61, + 0x35, 0xe1, 0x37, 0xc0, 0xd4, 0xbe, 0x41, 0xda, 0x0d, 0x44, 0xf6, 0x05, 0xe0, 0xbc, 0x00, 0x9c, + 0x5a, 0x17, 0x74, 0x14, 0x49, 0xc0, 0x65, 0x30, 0xf9, 0xb3, 0x40, 0xb1, 0x92, 0xe7, 0xc2, 0x73, + 0x42, 0x78, 0x52, 0xe0, 0xa1, 0x90, 0xaf, 0xdc, 0x05, 0xc5, 0x8d, 0x67, 0x5b, 0x9b, 0xdb, 0xd8, + 0xd3, 0x5b, 0xa7, 0x8a, 0xd1, 0x3e, 0x98, 0xad, 0x33, 0xe5, 0x7b, 0xb6, 0xd5, 0x30, 0x78, 0x88, + 0x96, 0x40, 0xc1, 0xc2, 0x26, 0x11, 0xfa, 0xd3, 0x42, 0xbf, 0xb0, 0x89, 0x4d, 0x82, 0x38, 0xa7, + 0xc7, 0x4e, 0x6e, 0x28, 0x3b, 0x7f, 0x2f, 0x08, 0x43, 0x88, 0x50, 0xbb, 0xe3, 0xea, 0x84, 0xc2, + 0x17, 0xa0, 0xcc, 0xe0, 0xa8, 0x83, 0x75, 0xf2, 0x8c, 0xb4, 0x89, 0xee, 0xd9, 0x2e, 0xb7, 0x5a, + 0x5a, 0xbd, 0xae, 0xc6, 0x57, 0x26, 0x4a, 0x1e, 0xd5, 0x39, 0x68, 0x32, 0x02, 0x55, 0x59, 0x8e, + 0xaa, 0x87, 0x2b, 0xea, 0x53, 0xbc, 0x47, 0xda, 0xa1, 0xaa, 0x76, 0xb1, 0xeb, 0xcb, 0xe5, 0xcd, + 0x5e, 0x44, 0x94, 0x35, 0x02, 0x6d, 0x30, 0x6b, 0xef, 0x3d, 0x27, 0xba, 0x17, 0x99, 0xcd, 0x9d, + 0xde, 0x2c, 0xec, 0xfa, 0xf2, 0xec, 0x56, 0x0a, 0x0e, 0xf5, 0xc0, 0xc3, 0x5f, 0x82, 0x19, 0x57, + 0xf8, 0x8d, 0x3a, 0x6d, 0x42, 0x2b, 0xf9, 0xa5, 0xfc, 0xb5, 0xd2, 0xaa, 0xa6, 0x0e, 0x5d, 0x19, + 0x54, 0xe6, 0x58, 0x83, 0x29, 0xef, 0x1a, 0x5e, 0x6b, 0xcb, 0x21, 0x01, 0x9f, 0x6a, 0x17, 0x45, + 0xe0, 0x67, 0x50, 0xd2, 0x00, 0x4a, 0xdb, 0x83, 0xbf, 0x97, 0xc0, 0x05, 0xf2, 0x42, 0x6f, 0x77, + 0x1a, 0x24, 0x25, 0x57, 0x29, 0x9c, 0xd9, 0x46, 0xbe, 0x26, 0x36, 0x72, 0xe1, 0x41, 0x1f, 0x3b, + 0xa8, 0xaf, 0x75, 0x78, 0x1f, 0x94, 0x4c, 0x96, 0x14, 0xdb, 0x76, 0xdb, 0xd0, 0x8f, 0x2a, 0x93, + 0x3c, 0x95, 0x94, 0xae, 0x2f, 0x97, 0xea, 0x31, 0xf9, 0xd8, 0x97, 0xe7, 0x12, 0xcb, 0xef, 0x1f, + 0x39, 0x04, 0x25, 0xd5, 0x94, 0xff, 0x48, 0x60, 0xa1, 0xde, 0x61, 0x37, 0xdc, 0x6a, 0xae, 0x85, + 0x9b, 0x0f, 0x78, 0xf0, 0x27, 0x60, 0x8a, 0x1d, 0x5b, 0x03, 0x7b, 0x58, 0xe4, 0xd6, 0xb7, 0x86, + 0x3b, 0xe4, 0xe0, 0x44, 0xeb, 0xc4, 0xc3, 0x71, 0x6e, 0xc7, 0x34, 0x14, 0xa1, 0xc2, 0x16, 0x28, + 0x50, 0x87, 0xe8, 0x22, 0x85, 0xd6, 0x47, 0x88, 0xe4, 0x80, 0x3d, 0x3f, 0x73, 0x88, 0x1e, 0xdf, + 0x3b, 0xb6, 0x42, 0xdc, 0x82, 0xf2, 0x7f, 0x09, 0x54, 0x07, 0xe8, 0x68, 0x86, 0xd5, 0x60, 0x85, + 0xe6, 0xc3, 0xbb, 0x6b, 0xa7, 0xdc, 0xad, 0xbf, 0xbf, 0xbb, 0x62, 0xeb, 0x03, 0xbd, 0xfe, 0x9f, + 0x04, 0x94, 0x77, 0xab, 0x3e, 0x35, 0xa8, 0x07, 0x7f, 0x94, 0xf1, 0x5c, 0x1d, 0xf2, 0x36, 0x1b, + 0x34, 0xf0, 0x3b, 0x2a, 0xc9, 0x21, 0x25, 0xe1, 0xb5, 0x05, 0xc6, 0x0d, 0x8f, 0x98, 0xb4, 0x92, + 0xe3, 0xf7, 0xe5, 0xf1, 0x99, 0xb9, 0xad, 0xcd, 0x08, 0xab, 0xe3, 0x8f, 0x19, 0x3e, 0x0a, 0xcc, + 0x28, 0x7f, 0xce, 0x9d, 0xe4, 0x34, 0x8b, 0x10, 0xab, 0xc4, 0x0e, 0x27, 0x6e, 0xc6, 0x15, 0x3b, + 0x3a, 0xbe, 0xed, 0x88, 0x83, 0x12, 0x52, 0xf0, 0xc7, 0x60, 0xca, 0xc1, 0x2e, 0x36, 0xc3, 0xb7, + 0x28, 0x5d, 0xf6, 0x4e, 0xf2, 0x66, 0x5b, 0xa8, 0x6a, 0xd3, 0x2c, 0x52, 0xe1, 0x0a, 0x45, 0x90, + 0xb0, 0x03, 0x66, 0xcd, 0x54, 0x9d, 0xe7, 0x6f, 0x58, 0x69, 0xf5, 0xd6, 0x28, 0x21, 0x4b, 0x01, + 0x04, 0x15, 0x36, 0x4d, 0x43, 0x3d, 0x46, 0x94, 0x7f, 0x4b, 0xe0, 0xf2, 0x80, 0x80, 0x9d, 0x43, + 0x7a, 0x34, 0xd3, 0xe9, 0xa1, 0x9d, 0x41, 0x7a, 0xf4, 0xcf, 0x8b, 0x3f, 0x4e, 0x0c, 0x74, 0x93, + 0x27, 0x04, 0x06, 0x45, 0x7e, 0x12, 0x4f, 0x0c, 0xab, 0x21, 0xfc, 0xbc, 0x31, 0xea, 0xe9, 0x32, + 0x5d, 0x6d, 0xa6, 0xeb, 0xcb, 0xc5, 0x68, 0x89, 0x62, 0x54, 0xf8, 0x73, 0x30, 0x6f, 0x8a, 0x8e, + 0x81, 0x01, 0x18, 0x96, 0x47, 0x45, 0x1e, 0xbd, 0xc7, 0x11, 0x5f, 0xe8, 0xfa, 0xf2, 0x7c, 0xbd, + 0x07, 0x16, 0x65, 0x0c, 0xc1, 0x06, 0x28, 0x1e, 0x62, 0xd7, 0xc0, 0x7b, 0xf1, 0x23, 0x3a, 0x4a, + 0xf6, 0xee, 0x08, 0x5d, 0xad, 0x2c, 0xa2, 0x5b, 0x0c, 0x29, 0x14, 0xc5, 0xc0, 0xcc, 0x8a, 0xd9, + 0x09, 0x3a, 0xc6, 0xf0, 0x85, 0xbc, 0x3e, 0xf2, 0x91, 0xda, 0x56, 0x6c, 0x25, 0xa4, 0x50, 0x14, + 0x03, 0xc3, 0xa7, 0x60, 0x66, 0x1f, 0x1b, 0xed, 0x8e, 0x4b, 0xc4, 0xf3, 0x37, 0xce, 0xef, 0xef, + 0xd7, 0xd9, 0x63, 0xbe, 0x9e, 0x64, 0x1c, 0xfb, 0x72, 0x39, 0x45, 0xe0, 0x4f, 0x60, 0x5a, 0x19, + 0xfe, 0x02, 0xcc, 0x99, 0xa9, 0x46, 0x8e, 0x56, 0x26, 0xf8, 0xce, 0x47, 0x3e, 0x95, 0x08, 0x21, + 0xee, 0x7a, 0xd3, 0x74, 0x8a, 0x7a, 0x4d, 0xc1, 0xdf, 0x48, 0x00, 0xba, 0xc4, 0xb0, 0x0e, 0x6d, + 0x9d, 0x43, 0xa6, 0x1e, 0xf4, 0x1f, 0x08, 0x18, 0x88, 0x32, 0x12, 0xc7, 0xbe, 0x7c, 0x7b, 0x88, + 0x19, 0x46, 0xcd, 0x6a, 0xf2, 0x18, 0xf4, 0xb1, 0xa9, 0xfc, 0x35, 0x07, 0xa6, 0xc2, 0x78, 0xc3, + 0x3b, 0xec, 0x3e, 0x78, 0x7a, 0x8b, 0x49, 0x8b, 0x4e, 0xb5, 0x1a, 0x1e, 0xca, 0x76, 0xc8, 0x38, + 0x4e, 0x2e, 0x50, 0xac, 0x00, 0x7f, 0x2d, 0x01, 0x88, 0x33, 0xb3, 0x88, 0x28, 0x68, 0xdf, 0x1b, + 0x21, 0xae, 0xd9, 0x81, 0x46, 0xfb, 0x2a, 0x0b, 0x48, 0x96, 0x8e, 0xfa, 0x18, 0x64, 0xb7, 0xfa, + 0x39, 0xb5, 0x2d, 0xbe, 0xc7, 0x4a, 0x61, 0xe4, 0x5b, 0x1d, 0x4d, 0x08, 0xc1, 0xad, 0x8e, 0x96, + 0x28, 0x46, 0x55, 0xde, 0x48, 0x60, 0x61, 0x40, 0x67, 0x07, 0x6f, 0xc6, 0xdd, 0x2b, 0x6f, 0xaf, + 0x2b, 0xd2, 0x52, 0xfe, 0x5a, 0x51, 0x2b, 0x27, 0xbb, 0x4e, 0xce, 0x40, 0x69, 0x39, 0xf8, 0x2b, + 0x96, 0x15, 0x19, 0x3c, 0x51, 0x2d, 0x6e, 0x0e, 0xe3, 0x81, 0xda, 0xa7, 0xd1, 0x5c, 0x8c, 0xd2, + 0x29, 0xc3, 0x43, 0x7d, 0xcc, 0x29, 0x18, 0xc4, 0x85, 0x8c, 0xbd, 0x98, 0xd8, 0x31, 0x76, 0x88, + 0xdb, 0x6f, 0x46, 0x5a, 0xdb, 0x7e, 0x2c, 0x38, 0x28, 0x21, 0xc5, 0x26, 0xa2, 0x03, 0x56, 0x4f, + 0x73, 0xe9, 0x89, 0x88, 0x17, 0x46, 0xce, 0x51, 0xfe, 0x92, 0x03, 0xd1, 0x5b, 0x38, 0xc4, 0x00, + 0x55, 0x03, 0xc5, 0x68, 0x28, 0x11, 0xa8, 0x51, 0xa9, 0x88, 0x06, 0x18, 0x14, 0xcb, 0xb0, 0x37, + 0x9b, 0x86, 0xa3, 0x4a, 0xfe, 0xf4, 0xa3, 0x0a, 0x7f, 0xb3, 0xa3, 0x21, 0x25, 0x82, 0x84, 0x1e, + 0x58, 0xe0, 0xf5, 0x9d, 0x78, 0xc4, 0xdd, 0xb4, 0xbd, 0x75, 0xbb, 0x63, 0x35, 0xd6, 0x74, 0x9e, + 0xeb, 0x05, 0xbe, 0xbb, 0xdb, 0x5d, 0x5f, 0x5e, 0xd8, 0xee, 0x2f, 0x72, 0xec, 0xcb, 0x97, 0x07, + 0xb0, 0xf8, 0x7d, 0x1a, 0x04, 0xad, 0xfc, 0x41, 0x02, 0xd3, 0x4c, 0xe2, 0x5e, 0x8b, 0xe8, 0x07, + 0xac, 0x79, 0x65, 0x45, 0x84, 0xf4, 0xce, 0xce, 0x41, 0xb6, 0x95, 0x56, 0xef, 0x8c, 0x90, 0xf0, + 0x99, 0x01, 0x3c, 0xce, 0x99, 0x0c, 0x8b, 0xa2, 0x3e, 0x36, 0x95, 0x7f, 0xe4, 0xc0, 0xa5, 0x1d, + 0xdc, 0x36, 0x1a, 0x1f, 0x69, 0xa8, 0x78, 0x9e, 0xea, 0xb2, 0x1f, 0x8d, 0xf4, 0xc4, 0x0d, 0xd8, + 0xf5, 0xa0, 0x06, 0x1b, 0xba, 0x60, 0x82, 0x7a, 0xd8, 0xeb, 0x84, 0x9d, 0xda, 0xc6, 0x99, 0x58, + 0xe3, 0x88, 0xda, 0xac, 0xb0, 0x37, 0x11, 0xac, 0x91, 0xb0, 0xa4, 0x7c, 0x2a, 0x81, 0xa5, 0x81, + 0xba, 0xe7, 0x37, 0xcc, 0xfc, 0x34, 0x15, 0xe6, 0xad, 0xb3, 0x70, 0xfc, 0xa4, 0x71, 0xe6, 0x13, + 0x09, 0x5c, 0x3d, 0x49, 0xf9, 0x1c, 0x3a, 0x56, 0x27, 0xdd, 0xb1, 0x3e, 0x39, 0x43, 0xd7, 0x07, + 0xb4, 0xae, 0xbf, 0xcd, 0x9f, 0xec, 0xf8, 0x97, 0x43, 0x4d, 0xea, 0x1f, 0xb2, 0x5d, 0x50, 0x3e, + 0x14, 0x11, 0xb3, 0xad, 0xa0, 0x6a, 0x06, 0xfd, 0x68, 0x51, 0x5b, 0xee, 0xfa, 0x72, 0x79, 0xa7, + 0x97, 0x79, 0xec, 0xcb, 0xf3, 0xbd, 0x44, 0x94, 0xc5, 0x50, 0xfe, 0x2b, 0x81, 0x2b, 0x03, 0xcf, + 0xe2, 0x1c, 0xb2, 0xcf, 0x48, 0x67, 0xdf, 0xfd, 0x33, 0xc9, 0xbe, 0xfe, 0x69, 0xf7, 0xa7, 0x89, + 0x77, 0xb8, 0xfa, 0x85, 0x98, 0x99, 0xda, 0xa0, 0x14, 0x67, 0x40, 0x38, 0x35, 0x7d, 0xfb, 0x14, + 0x21, 0xb7, 0x2d, 0xed, 0x2b, 0x22, 0xc6, 0xa5, 0x98, 0x46, 0x51, 0x12, 0x3e, 0x3b, 0xd5, 0x14, + 0xde, 0x67, 0xaa, 0x79, 0x29, 0x81, 0x79, 0x9c, 0xfe, 0x0f, 0x9f, 0x56, 0xc6, 0xb9, 0x07, 0xb7, + 0x47, 0xe9, 0xbf, 0xd3, 0x10, 0x5a, 0x45, 0xb8, 0x31, 0xdf, 0xc3, 0xa0, 0x28, 0x63, 0xed, 0x23, + 0x0f, 0x56, 0xa9, 0x81, 0x77, 0xf2, 0x03, 0x0d, 0xbc, 0xca, 0xdf, 0x72, 0x40, 0x3e, 0xe1, 0x29, + 0x87, 0x1b, 0x00, 0xda, 0x7b, 0x94, 0xb8, 0x87, 0xa4, 0xf1, 0x30, 0xf8, 0x64, 0x13, 0x76, 0xd0, + 0xf9, 0xb8, 0xbd, 0xda, 0xca, 0x48, 0xa0, 0x3e, 0x5a, 0xd0, 0x04, 0xd3, 0x5e, 0xa2, 0xf3, 0x1b, + 0x65, 0x22, 0x10, 0x8e, 0x25, 0x1b, 0x47, 0x6d, 0xbe, 0xeb, 0xcb, 0xa9, 0x56, 0x12, 0xa5, 0xe0, + 0xa1, 0x0e, 0x80, 0x1e, 0x9f, 0x5e, 0x70, 0x01, 0x6a, 0xc3, 0x95, 0xb3, 0xf8, 0xcc, 0xa2, 0x27, + 0x28, 0x71, 0x5c, 0x09, 0x58, 0xe5, 0x33, 0x09, 0x80, 0xf8, 0x56, 0xc0, 0xab, 0x20, 0xf1, 0x29, + 0x44, 0xbc, 0x62, 0x05, 0x06, 0x81, 0x12, 0x74, 0xb8, 0x0c, 0x26, 0x4d, 0x42, 0x29, 0x6e, 0x86, + 0x73, 0x40, 0xf4, 0xa9, 0xa7, 0x1e, 0x90, 0x51, 0xc8, 0x87, 0xbb, 0x60, 0xc2, 0x25, 0x98, 0x8a, + 0xf9, 0xb3, 0xa8, 0xdd, 0x65, 0x6d, 0x15, 0xe2, 0x94, 0x63, 0x5f, 0x5e, 0x19, 0xe6, 0xa3, 0x9e, + 0x2a, 0xba, 0x30, 0xae, 0x84, 0x04, 0x1c, 0x7c, 0x08, 0xca, 0xc2, 0x46, 0x62, 0xc3, 0xc1, 0xad, + 0xbd, 0x24, 0x76, 0x53, 0xae, 0xf7, 0x0a, 0xa0, 0xac, 0x8e, 0xb2, 0x01, 0xa6, 0xc2, 0xec, 0x82, + 0x15, 0x50, 0x48, 0x3c, 0xdf, 0x81, 0xe3, 0x9c, 0xd2, 0x13, 0x98, 0x5c, 0xff, 0xc0, 0x68, 0x5b, + 0xaf, 0xde, 0x56, 0xc7, 0x5e, 0xbf, 0xad, 0x8e, 0xbd, 0x79, 0x5b, 0x1d, 0x7b, 0xd9, 0xad, 0x4a, + 0xaf, 0xba, 0x55, 0xe9, 0x75, 0xb7, 0x2a, 0xbd, 0xe9, 0x56, 0xa5, 0x7f, 0x76, 0xab, 0xd2, 0xef, + 0xfe, 0x55, 0x1d, 0xfb, 0xe1, 0xf2, 0xd0, 0x1f, 0x65, 0x3f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xac, + 0xc8, 0x8c, 0x78, 0xc0, 0x1d, 0x00, 0x00, +} + +func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplyConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) { @@ -709,6 +1017,34 @@ func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *JSONPatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JSONPatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JSONPatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *MatchCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -824,7 +1160,7 @@ func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -834,18 +1170,18 @@ func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -854,19 +1190,20 @@ func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) } i-- dAtA[i] = 0x12 - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0xa + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ParamKind) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -876,187 +1213,12 @@ func (m *ParamKind) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ParamRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ParameterNotFoundAction != nil { - i -= len(*m.ParameterNotFoundAction) - copy(dAtA[i:], *m.ParameterNotFoundAction) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) - i-- - dAtA[i] = 0x22 - } - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TypeChecking) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExpressionWarnings) > 0 { - for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1084,7 +1246,7 @@ func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1094,12 +1256,12 @@ func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1131,7 +1293,7 @@ func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1141,25 +1303,16 @@ func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ValidationActions) > 0 { - for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ValidationActions[iNdEx]) - copy(dAtA[i:], m.ValidationActions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if m.MatchResources != nil { { size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) @@ -1192,7 +1345,7 @@ func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1202,12 +1355,12 @@ func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1239,7 +1392,7 @@ func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { +func (m *MutatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1249,30 +1402,21 @@ func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MutatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Variables) > 0 { - for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } + i -= len(m.ReinvocationPolicy) + copy(dAtA[i:], m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x3a if len(m.MatchConditions) > 0 { for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { { @@ -1287,10 +1431,17 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, dAtA[i] = 0x32 } } - if len(m.AuditAnnotations) > 0 { - for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x2a + } + if len(m.Mutations) > 0 { + for iNdEx := len(m.Mutations) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Mutations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1298,20 +1449,13 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Validations) > 0 { - for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1349,7 +1493,7 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { +func (m *Mutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1359,33 +1503,31 @@ func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *Mutation) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Mutation) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.JSONPatch != nil { + { + size, err := m.JSONPatch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if m.TypeChecking != nil { + if m.ApplyConfiguration != nil { { - size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplyConfiguration.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1393,15 +1535,17 @@ func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i -= len(m.PatchType) + copy(dAtA[i:], m.PatchType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchType))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x12 return len(dAtA) - i, nil } -func (m *Validation) Marshal() (dAtA []byte, err error) { +func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1411,42 +1555,72 @@ func (m *Validation) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validation) MarshalTo(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.MessageExpression) - copy(dAtA[i:], m.MessageExpression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + { + size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x22 - if m.Reason != nil { - i -= len(*m.Reason) - copy(dAtA[i:], *m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + return len(dAtA) - i, nil +} + +func (m *ParamKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) i-- dAtA[i] = 0x12 - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Variable) Marshal() (dAtA []byte, err error) { +func (m *ParamRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1456,19 +1630,38 @@ func (m *Variable) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Variable) MarshalTo(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Expression) - copy(dAtA[i:], m.Expression) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + if m.ParameterNotFoundAction != nil { + i -= len(*m.ParameterNotFoundAction) + copy(dAtA[i:], *m.ParameterNotFoundAction) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction))) + i-- + dAtA[i] = 0x22 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) i-- dAtA[i] = 0x12 i -= len(m.Name) @@ -1479,606 +1672,2773 @@ func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *TypeChecking) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *AuditAnnotation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ValueExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + +func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ExpressionWarning) Size() (n int) { - if m == nil { - return 0 - } +func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.FieldRef) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Warning) - n += 1 + l + sovGenerated(uint64(l)) - return n + if len(m.ExpressionWarnings) > 0 { + for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *MatchCondition) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *MatchResources) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if len(m.ExcludeResourceRules) > 0 { - for _, e := range m.ExcludeResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NamedRuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.RuleWithOperations.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ParamKind) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ParamRef) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ParameterNotFoundAction != nil { - l = len(*m.ParameterNotFoundAction) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TypeChecking) Size() (n int) { - if m == nil { - return 0 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - var l int - _ = l - if len(m.ExpressionWarnings) > 0 { - for _, e := range m.ExpressionWarnings { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicy) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.PolicyName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParamRef != nil { - l = m.ParamRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ValidationActions) > 0 { + for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ValidationActions[iNdEx]) + copy(dAtA[i:], m.ValidationActions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } if m.MatchResources != nil { - l = m.MatchResources.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if len(m.ValidationActions) > 0 { - for _, s := range m.ValidationActions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + { + size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return n + i -= len(m.PolicyName) + copy(dAtA[i:], m.PolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyList) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicySpec) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - if m.ParamKind != nil { - l = m.ParamKind.Size() - n += 1 + l + sovGenerated(uint64(l)) + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Variables) > 0 { + for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - if m.MatchConstraints != nil { - l = m.MatchConstraints.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.MatchConditions) > 0 { + for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } } - if len(m.Validations) > 0 { - for _, e := range m.Validations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } } if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 } - if len(m.AuditAnnotations) > 0 { - for _, e := range m.AuditAnnotations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Validations) > 0 { + for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.MatchConditions) > 0 { - for _, e := range m.MatchConditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MatchConstraints != nil { + { + size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Variables) > 0 { - for _, e := range m.Variables { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ParamKind != nil { + { + size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { - if m == nil { - return 0 +func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if m.TypeChecking != nil { - l = m.TypeChecking.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - return n + if m.TypeChecking != nil { + { + size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Validation) Size() (n int) { - if m == nil { - return 0 +func (m *Validation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Validation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.MessageExpression) + copy(dAtA[i:], m.MessageExpression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression))) + i-- + dAtA[i] = 0x22 if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(*m.Reason) + copy(dAtA[i:], *m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) + i-- + dAtA[i] = 0x1a } - l = len(m.MessageExpression) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Variable) Size() (n int) { - if m == nil { - return 0 +func (m *Variable) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Variable) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Expression) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AuditAnnotation) String() string { - if this == nil { - return "nil" +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - s := strings.Join([]string{`&AuditAnnotation{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, - `}`, - }, "") - return s + dAtA[offset] = uint8(v) + return base } -func (this *ExpressionWarning) String() string { - if this == nil { - return "nil" +func (m *ApplyConfiguration) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ExpressionWarning{`, - `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, - `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchCondition) String() string { - if this == nil { - return "nil" + +func (m *AuditAnnotation) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&MatchCondition{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ValueExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *MatchResources) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceRules += "}" - repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" - for _, f := range this.ExcludeResourceRules { - repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + +func (m *ExpressionWarning) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForExcludeResourceRules += "}" - s := strings.Join([]string{`&MatchResources{`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.FieldRef) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Warning) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NamedRuleWithOperations) String() string { - if this == nil { - return "nil" + +func (m *JSONPatch) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NamedRuleWithOperations{`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ParamKind) String() string { - if this == nil { - return "nil" + +func (m *MatchCondition) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ParamKind{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ParamRef) String() string { - if this == nil { - return "nil" + +func (m *MatchResources) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ParamRef{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, - `}`, - }, "") - return s -} -func (this *TypeChecking) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings := "[]ExpressionWarning{" - for _, f := range this.ExpressionWarnings { - repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForExpressionWarnings += "}" - s := strings.Join([]string{`&TypeChecking{`, - `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, - `}`, - }, "") - return s + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ExcludeResourceRules) > 0 { + for _, e := range m.ExcludeResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ValidatingAdmissionPolicy) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyBinding) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyBindingList) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicyBindingSpec) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, - `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, - `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, - `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, - `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *ValidatingAdmissionPolicyList) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]ValidatingAdmissionPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *ValidatingAdmissionPolicySpec) String() string { - if this == nil { - return "nil" + +func (m *MutatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForValidations := "[]Validation{" - for _, f := range this.Validations { - repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForValidations += "}" - repeatedStringForAuditAnnotations := "[]AuditAnnotation{" - for _, f := range this.AuditAnnotations { - repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForAuditAnnotations += "}" - repeatedStringForMatchConditions := "[]MatchCondition{" - for _, f := range this.MatchConditions { - repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForMatchConditions += "}" - repeatedStringForVariables := "[]Variable{" - for _, f := range this.Variables { - repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + if len(m.Mutations) > 0 { + for _, e := range m.Mutations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForVariables += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, - `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, - `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, - `Validations:` + repeatedStringForValidations + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, - `MatchConditions:` + repeatedStringForMatchConditions + `,`, - `Variables:` + repeatedStringForVariables + `,`, - `}`, - }, "") - return s + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *ValidatingAdmissionPolicyStatus) String() string { - if this == nil { - return "nil" + +func (m *Mutation) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.PatchType) + n += 1 + l + sovGenerated(uint64(l)) + if m.ApplyConfiguration != nil { + l = m.ApplyConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JSONPatch != nil { + l = m.JSONPatch.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedRuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RuleWithOperations.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ParamRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ParameterNotFoundAction != nil { + l = len(*m.ParameterNotFoundAction) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeChecking) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExpressionWarnings) > 0 { + for _, e := range m.ExpressionWarnings { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PolicyName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ParamRef != nil { + l = m.ParamRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchResources != nil { + l = m.MatchResources.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ValidationActions) > 0 { + for _, s := range m.ValidationActions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamKind != nil { + l = m.ParamKind.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MatchConstraints != nil { + l = m.MatchConstraints.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Validations) > 0 { + for _, e := range m.Validations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuditAnnotations) > 0 { + for _, e := range m.AuditAnnotations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchConditions) > 0 { + for _, e := range m.MatchConditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Variables) > 0 { + for _, e := range m.Variables { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingAdmissionPolicyStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if m.TypeChecking != nil { + l = m.TypeChecking.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Validation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.Reason != nil { + l = len(*m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.MessageExpression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Variable) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ApplyConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplyConfiguration{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *AuditAnnotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditAnnotation{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`, + `}`, + }, "") + return s +} +func (this *ExpressionWarning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExpressionWarning{`, + `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`, + `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`, + `}`, + }, "") + return s +} +func (this *JSONPatch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JSONPatch{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MatchCondition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *MatchResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{" + for _, f := range this.ExcludeResourceRules { + repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForExcludeResourceRules += "}" + s := strings.Join([]string{`&MatchResources{`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicySpec", "MutatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "MutatingAdmissionPolicyBindingSpec", "MutatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicyBinding", "MutatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingAdmissionPolicy", "MutatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *MutatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + repeatedStringForMutations := "[]Mutation{" + for _, f := range this.Mutations { + repeatedStringForMutations += strings.Replace(strings.Replace(f.String(), "Mutation", "Mutation", 1), `&`, ``, 1) + "," + } + repeatedStringForMutations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + s := strings.Join([]string{`&MutatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `Mutations:` + repeatedStringForMutations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `ReinvocationPolicy:` + fmt.Sprintf("%v", this.ReinvocationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *Mutation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mutation{`, + `PatchType:` + fmt.Sprintf("%v", this.PatchType) + `,`, + `ApplyConfiguration:` + strings.Replace(this.ApplyConfiguration.String(), "ApplyConfiguration", "ApplyConfiguration", 1) + `,`, + `JSONPatch:` + strings.Replace(this.JSONPatch.String(), "JSONPatch", "JSONPatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRuleWithOperations{`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ParamKind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamKind{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ParamRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParamRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`, + `}`, + }, "") + return s +} +func (this *TypeChecking) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpressionWarnings := "[]ExpressionWarning{" + for _, f := range this.ExpressionWarnings { + repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + "," + } + repeatedStringForExpressionWarnings += "}" + s := strings.Join([]string{`&TypeChecking{`, + `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyBindingSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`, + `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`, + `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`, + `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`, + `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingAdmissionPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForValidations := "[]Validation{" + for _, f := range this.Validations { + repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + "," + } + repeatedStringForValidations += "}" + repeatedStringForAuditAnnotations := "[]AuditAnnotation{" + for _, f := range this.AuditAnnotations { + repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + "," + } + repeatedStringForAuditAnnotations += "}" + repeatedStringForMatchConditions := "[]MatchCondition{" + for _, f := range this.MatchConditions { + repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchConditions += "}" + repeatedStringForVariables := "[]Variable{" + for _, f := range this.Variables { + repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + "," + } + repeatedStringForVariables += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`, + `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`, + `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`, + `Validations:` + repeatedStringForValidations + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`, + `MatchConditions:` + repeatedStringForMatchConditions + `,`, + `Variables:` + repeatedStringForVariables + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingAdmissionPolicyStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Validation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Validation{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + valueToStringGenerated(this.Reason) + `,`, + `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, + `}`, + }, "") + return s +} +func (this *Variable) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Variable{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ApplyConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warning = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JSONPatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JSONPatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JSONPatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MatchResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) + if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingAdmissionPolicyBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParamRef == nil { + m.ParamRef = &ParamRef{} + } + if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchResources == nil { + m.MatchResources = &MatchResources{} + } + if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *Validation) String() string { - if this == nil { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - s := strings.Join([]string{`&Validation{`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + valueToStringGenerated(this.Reason) + `,`, - `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`, - `}`, - }, "") - return s + return nil } -func (this *Variable) String() string { - if this == nil { - return "nil" +func (m *MutatingAdmissionPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingAdmissionPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingAdmissionPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&Variable{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { +func (m *MutatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2101,17 +4461,17 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2121,29 +4481,69 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + if m.ParamKind == nil { + m.ParamKind = &ParamKind{} + } + if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchConstraints == nil { + m.MatchConstraints = &MatchResources{} + } + if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2153,79 +4553,31 @@ func (m *AuditAnnotation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ValueExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + m.Variables = append(m.Variables, Variable{}) + if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mutations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2235,27 +4587,29 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.FieldRef = string(dAtA[iNdEx:postIndex]) + m.Mutations = append(m.Mutations, Mutation{}) + if err := m.Mutations[len(m.Mutations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2283,63 +4637,14 @@ func (m *ExpressionWarning) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Warning = string(dAtA[iNdEx:postIndex]) + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MatchCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2349,27 +4654,29 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.MatchConditions = append(m.MatchConditions, MatchCondition{}) + if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2397,7 +4704,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Expression = string(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = k8s_io_api_admissionregistration_v1.ReinvocationPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -2420,7 +4727,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *MatchResources) Unmarshal(dAtA []byte) error { +func (m *Mutation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2443,53 +4750,17 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MatchResources: wiretype end group for non-group") + return fmt.Errorf("proto: Mutation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Mutation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2499,31 +4770,27 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PatchType = PatchType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplyConfiguration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2550,14 +4817,16 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ApplyConfiguration == nil { + m.ApplyConfiguration = &ApplyConfiguration{} + } + if err := m.ApplyConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONPatch", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2584,43 +4853,12 @@ func (m *MatchResources) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{}) - if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.JSONPatch == nil { + m.JSONPatch = &JSONPatch{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.JSONPatch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s iNdEx = postIndex default: iNdEx = preIndex diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index db02dd929..88344ce87 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/admissionregistration/v1alpha1"; +// ApplyConfiguration defines the desired configuration values of an object. +message ApplyConfiguration { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + // AuditAnnotation describes how to produce an audit annotation for an API request. message AuditAnnotation { // key specifies the audit annotation key. The audit annotation keys of @@ -79,6 +124,75 @@ message ExpressionWarning { optional string warning = 3; } +// JSONPatch defines a JSON Patch. +message JSONPatch { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + optional string expression = 1; +} + message MatchCondition { // Name is an identifier for this match condition, used for strategic merging of MatchConditions, // as well as providing an identifier for logging purposes. A good name should be descriptive of @@ -156,7 +270,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -170,7 +284,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -202,6 +316,193 @@ message MatchResources { optional string matchPolicy = 7; } +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +message MutatingAdmissionPolicy { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicy. + optional MutatingAdmissionPolicySpec spec = 2; +} + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +message MutatingAdmissionPolicyBinding { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + optional MutatingAdmissionPolicyBindingSpec spec = 2; +} + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of PolicyBinding. + repeated MutatingAdmissionPolicyBinding items = 2; +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +message MutatingAdmissionPolicyBindingSpec { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + optional string policyName = 1; + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + optional ParamRef paramRef = 2; + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + optional MatchResources matchResources = 3; +} + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +message MutatingAdmissionPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingAdmissionPolicy. + repeated MutatingAdmissionPolicy items = 2; +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +message MutatingAdmissionPolicySpec { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + optional ParamKind paramKind = 1; + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + optional MatchResources matchConstraints = 2; + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + repeated Variable variables = 3; + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + repeated Mutation mutations = 4; + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + optional string failurePolicy = 5; + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + repeated MatchCondition matchConditions = 6; + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + optional string reinvocationPolicy = 7; +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +message Mutation { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + optional string patchType = 2; + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + optional ApplyConfiguration applyConfiguration = 3; + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + optional JSONPatch jsonPatch = 4; +} + // NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames. // +structType=atomic message NamedRuleWithOperations { @@ -211,7 +512,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -267,7 +568,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -295,7 +596,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -322,7 +623,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -333,7 +634,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -409,7 +710,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -514,7 +815,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // Validation specifies the CEL expression which is used to apply the validation. diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index d4c2fbe80..eead376cc 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ValidatingAdmissionPolicyList{}, &ValidatingAdmissionPolicyBinding{}, &ValidatingAdmissionPolicyBindingList{}, + &MutatingAdmissionPolicy{}, + &MutatingAdmissionPolicyList{}, + &MutatingAdmissionPolicyBinding{}, + &MutatingAdmissionPolicyBindingList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index bd6b17e15..ee50fbe2d 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. @@ -663,3 +663,346 @@ const ( Delete OperationType = v1.Delete Connect OperationType = v1.Connect ) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain. +type MutatingAdmissionPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicy. + Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy. +type MutatingAdmissionPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingAdmissionPolicy. + Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy. +type MutatingAdmissionPolicySpec struct { + // paramKind specifies the kind of resources used to parameterize this policy. + // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. + // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. + // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null. + // +optional + ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"` + + // matchConstraints specifies what resources this policy is designed to validate. + // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. + // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API + // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // Required. + MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"` + + // variables contain definitions of variables that can be used in composition of other expressions. + // Each variable is defined as a named CEL expression. + // The variables defined here will be available under `variables` in other expressions of the policy + // except matchConditions because matchConditions are evaluated before the rest of the policy. + // + // The expression of a variable can refer to other variables defined earlier in the list but not those after. + // Thus, variables must be sorted by the order of first appearance and acyclic. + // +listType=atomic + // +optional + Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"` + + // mutations contain operations to perform on matching objects. + // mutations may not be empty; a minimum of one mutation is required. + // mutations are evaluated in order, and are reinvoked according to + // the reinvocationPolicy. + // The mutations of a policy are invoked for each binding of this policy + // and reinvocation of mutations occurs on a per binding basis. + // + // +listType=atomic + // +optional + Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"` + + // failurePolicy defines how to handle failures for the admission policy. Failures can + // occur from CEL expression parse errors, type check errors, runtime errors and invalid + // or mis-configured policy definitions or bindings. + // + // A policy is invalid if paramKind refers to a non-existent Kind. + // A binding is invalid if paramRef.name refers to a non-existent resource. + // + // failurePolicy does not define how validations that evaluate to false are handled. + // + // Allowed values are Ignore or Fail. Defaults to Fail. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchConditions is a list of conditions that must be met for a request to be validated. + // Match conditions filter requests that have already been matched by the matchConstraints. + // An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // If a parameter object is provided, it can be accessed via the `params` handle in the same + // manner as validation expressions. + // + // The exact matching logic is (in order): + // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped. + // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated. + // 3. If any matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Fail, reject the request + // - If failurePolicy=Ignore, the policy is skipped + // + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +optional + MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"` + + // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding + // as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: These mutations will not be called more than once per binding in a single admission evaluation. + // + // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of + // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only + // reinvoked when mutations change the object after this mutation is invoked. + // Required. + ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// Mutation specifies the CEL expression which is used to apply the Mutation. +type Mutation struct { + // patchType indicates the patch strategy used. + // Allowed values are "ApplyConfiguration" and "JSONPatch". + // Required. + // + // +unionDiscriminator + PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"` + + // applyConfiguration defines the desired configuration values of an object. + // The configuration is applied to the admission object using + // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). + // A CEL expression is used to create apply configuration. + ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"` + + // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. + // A CEL expression is used to create the JSON patch. + JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"` +} + +// PatchType specifies the type of patch operation for a mutation. +// +enum +type PatchType string + +const ( + // ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object. + PatchTypeApplyConfiguration PatchType = "ApplyConfiguration" + // JSONPatch indicates that the object is mutated through JSON Patch. + PatchTypeJSONPatch PatchType = "JSONPatch" +) + +// ApplyConfiguration defines the desired configuration values of an object. +type ApplyConfiguration struct { + // expression will be evaluated by CEL to create an apply configuration. + // ref: https://github.com/google/cel-spec + // + // Apply configurations are declared in CEL using object initialization. For example, this CEL expression + // returns an apply configuration to set a single field: + // + // Object{ + // spec: Object.spec{ + // serviceAccountName: "example" + // } + // } + // + // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of + // values not included in the apply configuration. + // + // CEL expressions have access to the object types needed to create apply configurations: + // + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the + // object. No other metadata properties are accessible. + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// JSONPatch defines a JSON Patch. +type JSONPatch struct { + // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). + // ref: https://github.com/google/cel-spec + // + // expression must return an array of JSONPatch values. + // + // For example, this CEL expression returns a JSON patch to conditionally modify a value: + // + // [ + // JSONPatch{op: "test", path: "/spec/example", value: "Red"}, + // JSONPatch{op: "replace", path: "/spec/example", value: "Green"} + // ] + // + // To define an object for the patch value, use Object types. For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/spec/selector", + // value: Object.spec.selector{matchLabels: {"environment": "test"}} + // } + // ] + // + // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example: + // + // [ + // JSONPatch{ + // op: "add", + // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"), + // value: "test" + // }, + // ] + // + // CEL expressions have access to the types needed to create JSON patches and objects: + // + // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'. + // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string, + // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a + // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL + // function may be used to escape path keys containing '/' and '~'. + // - 'Object' - CEL type of the resource object. + // - 'Object.' - CEL type of object field (such as 'Object.spec') + // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers') + // + // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables: + // + // - 'object' - The object from the incoming request. The value is null for DELETE requests. + // - 'oldObject' - The existing object. The value is null for CREATE requests. + // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). + // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. + // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. + // - 'variables' - Map of composited variables, from its name to its lazily evaluated value. + // For example, a variable named 'foo' can be accessed as 'variables.foo'. + // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. + // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz + // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the + // request resource. + // + // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) + // as well as: + // + // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively). + // + // + // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. + // Required. + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + +// ReinvocationPolicyType specifies what type of policy the admission mutation uses. +// +enum +type ReinvocationPolicyType = v1.ReinvocationPolicyType + +const ( + // NeverReinvocationPolicy indicates that the mutation must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = v1.NeverReinvocationPolicy + // IfNeededReinvocationPolicy indicates that the mutation may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial mutation call. + IfNeededReinvocationPolicy ReinvocationPolicyType = v1.IfNeededReinvocationPolicy +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. +// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators +// configure policies for clusters. +// +// For a given admission request, each binding will cause its policy to be +// evaluated N times, where N is 1 for policies/bindings that don't use +// params, otherwise N is the number of parameters selected by the binding. +// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget). +// +// Adding/removing policies, bindings, or params can not affect whether a +// given (policy, binding, param) combination is within its own CEL budget. +type MutatingAdmissionPolicyBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Specification of the desired behavior of the MutatingAdmissionPolicyBinding. + Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of PolicyBinding. + Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding. +type MutatingAdmissionPolicyBindingSpec struct { + // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. + // If the referenced resource does not exist, this binding is considered invalid and will be ignored + // Required. + PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"` + + // paramRef specifies the parameter resource used to configure the admission control policy. + // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. + // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. + // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param. + // +optional + ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"` + + // matchResources limits what resources match this binding and may be mutated by it. + // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and + // matchConditions before the resource may be mutated. + // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints + // and matchConditions must match for the resource to be mutated. + // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. + // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. + // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. + // '*' matches CREATE, UPDATE and CONNECT. + // +optional + MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"` +} diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index dcf46b324..32222a81b 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ApplyConfiguration = map[string]string{ + "": "ApplyConfiguration defines the desired configuration values of an object.", + "expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (ApplyConfiguration) SwaggerDoc() map[string]string { + return map_ApplyConfiguration +} + var map_AuditAnnotation = map[string]string{ "": "AuditAnnotation describes how to produce an audit annotation for an API request.", "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", @@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string { return map_ExpressionWarning } +var map_JSONPatch = map[string]string{ + "": "JSONPatch defines a JSON Patch.", + "expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.' - CEL type of object field (such as 'Object.spec') - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.", +} + +func (JSONPatch) SwaggerDoc() map[string]string { + return map_JSONPatch +} + var map_MatchResources = map[string]string{ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", @@ -60,6 +78,83 @@ func (MatchResources) SwaggerDoc() map[string]string { return map_MatchResources } +var map_MutatingAdmissionPolicy = map[string]string{ + "": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicy +} + +var map_MutatingAdmissionPolicyBinding = map[string]string{ + "": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.", +} + +func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBinding +} + +var map_MutatingAdmissionPolicyBindingList = map[string]string{ + "": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of PolicyBinding.", +} + +func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingList +} + +var map_MutatingAdmissionPolicyBindingSpec = map[string]string{ + "": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.", + "policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.", +} + +func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyBindingSpec +} + +var map_MutatingAdmissionPolicyList = map[string]string{ + "": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingAdmissionPolicy.", +} + +func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicyList +} + +var map_MutatingAdmissionPolicySpec = map[string]string{ + "": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.", + "paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.", + "matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.", + "variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.", + "mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.", + "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.", +} + +func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string { + return map_MutatingAdmissionPolicySpec +} + +var map_Mutation = map[string]string{ + "": "Mutation specifies the CEL expression which is used to apply the Mutation.", + "patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.", + "applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.", + "jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.", +} + +func (Mutation) SwaggerDoc() map[string]string { + return map_Mutation +} + var map_NamedRuleWithOperations = map[string]string{ "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 24cd0e4e9..97c159c74 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration. +func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration { + if in == nil { + return nil + } + out := new(ApplyConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) { *out = *in @@ -58,6 +74,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatch) DeepCopyInto(out *JSONPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch. +func (in *JSONPatch) DeepCopy() *JSONPatch { + if in == nil { + return nil + } + out := new(JSONPatch) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MatchCondition) DeepCopyInto(out *MatchCondition) { *out = *in @@ -119,6 +151,226 @@ func (in *MatchResources) DeepCopy() *MatchResources { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy. +func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding. +func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicyBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList. +func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) { + *out = *in + if in.ParamRef != nil { + in, out := &in.ParamRef, &out.ParamRef + *out = new(ParamRef) + (*in).DeepCopyInto(*out) + } + if in.MatchResources != nil { + in, out := &in.MatchResources, &out.MatchResources + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec. +func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingAdmissionPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList. +func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) { + *out = *in + if in.ParamKind != nil { + in, out := &in.ParamKind, &out.ParamKind + *out = new(ParamKind) + **out = **in + } + if in.MatchConstraints != nil { + in, out := &in.MatchConstraints, &out.MatchConstraints + *out = new(MatchResources) + (*in).DeepCopyInto(*out) + } + if in.Variables != nil { + in, out := &in.Variables, &out.Variables + *out = make([]Variable, len(*in)) + copy(*out, *in) + } + if in.Mutations != nil { + in, out := &in.Mutations, &out.Mutations + *out = make([]Mutation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]MatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec. +func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec { + if in == nil { + return nil + } + out := new(MutatingAdmissionPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mutation) DeepCopyInto(out *Mutation) { + *out = *in + if in.ApplyConfiguration != nil { + in, out := &in.ApplyConfiguration, &out.ApplyConfiguration + *out = new(ApplyConfiguration) + **out = **in + } + if in.JSONPatch != nil { + in, out := &in.JSONPatch, &out.JSONPatch + *out = new(JSONPatch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation. +func (in *Mutation) DeepCopy() *Mutation { + if in == nil { + return nil + } + out := new(Mutation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) { *out = *in diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..91c813d5f --- /dev/null +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,166 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 26 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 29 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 32 +} diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 91479acc2..30f99f64d 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -157,7 +157,7 @@ message MatchResources { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1; // ObjectSelector decides whether to run the validation based on if the // object has matching labels. objectSelector is evaluated against both @@ -171,7 +171,7 @@ message MatchResources { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2; // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. // The policy cares about an operation if it matches _any_ Rule. @@ -223,7 +223,7 @@ message MutatingWebhook { // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -291,7 +291,7 @@ message MutatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -305,7 +305,7 @@ message MutatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -379,7 +379,7 @@ message MutatingWebhook { message MutatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -395,7 +395,7 @@ message MutatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of MutatingWebhookConfiguration. repeated MutatingWebhookConfiguration items = 2; @@ -410,7 +410,7 @@ message NamedRuleWithOperations { repeated string resourceNames = 1; // RuleWithOperations is a tuple of Operations and Resources. - optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; + optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2; } // ParamKind is a tuple of Group Kind and Version. @@ -468,7 +468,7 @@ message ParamRef { // mutually exclusive properties. If one is set, the other must be unset. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; // `parameterNotFoundAction` controls the behavior of the binding when the resource // exists, and name or selector is valid, but there are no parameters @@ -523,7 +523,7 @@ message TypeChecking { message ValidatingAdmissionPolicy { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicy. optional ValidatingAdmissionPolicySpec spec = 2; @@ -550,7 +550,7 @@ message ValidatingAdmissionPolicy { message ValidatingAdmissionPolicyBinding { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding. optional ValidatingAdmissionPolicyBindingSpec spec = 2; @@ -561,7 +561,7 @@ message ValidatingAdmissionPolicyBindingList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of PolicyBinding. repeated ValidatingAdmissionPolicyBinding items = 2; @@ -639,7 +639,7 @@ message ValidatingAdmissionPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingAdmissionPolicy. repeated ValidatingAdmissionPolicy items = 2; @@ -744,7 +744,7 @@ message ValidatingAdmissionPolicyStatus { // +optional // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3; } // ValidatingWebhook describes an admission webhook and the resources and operations it applies to. @@ -767,7 +767,7 @@ message ValidatingWebhook { // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. // +listType=atomic - repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; + repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3; // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - // allowed values are Ignore or Fail. Defaults to Ignore. @@ -835,7 +835,7 @@ message ValidatingWebhook { // // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; // ObjectSelector decides whether to run the webhook based on if the // object has matching labels. objectSelector is evaluated against both @@ -849,7 +849,7 @@ message ValidatingWebhook { // users may skip the admission webhook by setting the labels. // Default to the empty LabelSelector, which matches everything. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; // SideEffects states whether this webhook has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun @@ -906,7 +906,7 @@ message ValidatingWebhook { message ValidatingWebhookConfiguration { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Webhooks is a list of webhooks and the affected resources and operations. // +optional @@ -922,7 +922,7 @@ message ValidatingWebhookConfigurationList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ValidatingWebhookConfiguration. repeated ValidatingWebhookConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index cf1e29a6c..0f5903123 100644 --- a/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingAdmissionPolicy. - Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy. @@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct { // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of PolicyBinding. - Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"` } // ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding. diff --git a/constraint/vendor/k8s.io/api/apidiscovery/v2/doc.go b/constraint/vendor/k8s.io/api/apidiscovery/v2/doc.go index d47aa8597..4f3ad5f13 100644 --- a/constraint/vendor/k8s.io/api/apidiscovery/v2/doc.go +++ b/constraint/vendor/k8s.io/api/apidiscovery/v2/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=apidiscovery.k8s.io package v2 // import "k8s.io/api/apidiscovery/v2" diff --git a/constraint/vendor/k8s.io/api/apidiscovery/v2/generated.proto b/constraint/vendor/k8s.io/api/apidiscovery/v2/generated.proto index fa56318a6..62f2d7f2c 100644 --- a/constraint/vendor/k8s.io/api/apidiscovery/v2/generated.proto +++ b/constraint/vendor/k8s.io/api/apidiscovery/v2/generated.proto @@ -38,7 +38,7 @@ message APIGroupDiscovery { // name is allowed to be "" to represent the legacy, ungroupified resources. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // versions are the versions supported in this group. They are sorted in descending order of preference, // with the preferred version being the first entry. @@ -55,7 +55,7 @@ message APIGroupDiscoveryList { // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of groups for discovery. The groups are listed in priority order. repeated APIGroupDiscovery items = 2; @@ -72,7 +72,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced optional string scope = 3; @@ -112,7 +112,7 @@ message APISubresourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // Some subresources do not return normal resources, these will have null or empty return types. - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define @@ -122,7 +122,7 @@ message APISubresourceDiscovery { // +listMapKey=group // +listMapKey=version // +listMapKey=kind - repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; // verbs is a list of supported API operation types (this includes // but is not limited to get, list, watch, create, update, patch, diff --git a/constraint/vendor/k8s.io/api/apidiscovery/v2/types.go b/constraint/vendor/k8s.io/api/apidiscovery/v2/types.go index f0e31bcde..449679b61 100644 --- a/constraint/vendor/k8s.io/api/apidiscovery/v2/types.go +++ b/constraint/vendor/k8s.io/api/apidiscovery/v2/types.go @@ -21,6 +21,7 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 // APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery. // This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated @@ -37,6 +38,7 @@ type APIGroupDiscoveryList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.30 // APIGroupDiscovery holds information about which resources are being served for all version of the API Group. // It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version. diff --git a/constraint/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b7132c647 --- /dev/null +++ b/constraint/vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) { + return 1, 30 +} diff --git a/constraint/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/constraint/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto index a09af750b..e9ae88072 100644 --- a/constraint/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -38,7 +38,7 @@ message APIGroupDiscovery { // name is allowed to be "" to represent the legacy, ungroupified resources. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // versions are the versions supported in this group. They are sorted in descending order of preference, // with the preferred version being the first entry. @@ -55,7 +55,7 @@ message APIGroupDiscoveryList { // ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of groups for discovery. The groups are listed in priority order. repeated APIGroupDiscovery items = 2; @@ -72,7 +72,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced optional string scope = 3; @@ -112,7 +112,7 @@ message APISubresourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // Some subresources do not return normal resources, these will have null or empty return types. - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define @@ -122,7 +122,7 @@ message APISubresourceDiscovery { // +listMapKey=group // +listMapKey=version // +listMapKey=kind - repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3; // verbs is a list of supported API operation types (this includes // but is not limited to get, list, watch, create, update, patch, diff --git a/constraint/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto index ef4429048..8a7786072 100644 --- a/constraint/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto @@ -52,7 +52,7 @@ message ServerStorageVersion { // Storage version of a specific resource. message StorageVersion { // The name is .. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is an empty spec. It is here to comply with Kubernetes API style. optional StorageVersionSpec spec = 2; @@ -77,7 +77,7 @@ message StorageVersionCondition { optional int64 observedGeneration = 3; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // The reason for the condition's last transition. // +required @@ -93,7 +93,7 @@ message StorageVersionList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items holds a list of StorageVersion repeated StorageVersion items = 2; diff --git a/constraint/vendor/k8s.io/api/apps/v1/doc.go b/constraint/vendor/k8s.io/api/apps/v1/doc.go index 61dc97bde..d189e860f 100644 --- a/constraint/vendor/k8s.io/api/apps/v1/doc.go +++ b/constraint/vendor/k8s.io/api/apps/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/constraint/vendor/k8s.io/api/apps/v1/generated.proto b/constraint/vendor/k8s.io/api/apps/v1/generated.proto index 900141686..388e638f4 100644 --- a/constraint/vendor/k8s.io/api/apps/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/apps/v1/generated.proto @@ -43,10 +43,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -56,7 +56,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -67,7 +67,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -93,7 +93,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -109,7 +109,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -121,7 +121,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -129,7 +129,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -225,7 +225,7 @@ message Deployment { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -245,10 +245,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -261,7 +261,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -277,11 +277,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -376,7 +376,7 @@ message ReplicaSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -402,7 +402,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -418,7 +418,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -444,13 +444,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -501,7 +501,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -522,7 +522,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -538,7 +538,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -552,7 +552,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -572,7 +572,7 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // StatefulSet represents a set of pods with consistent identities. @@ -586,7 +586,7 @@ message StatefulSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -608,7 +608,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -624,7 +624,7 @@ message StatefulSetList { // Standard list's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of stateful sets. repeated StatefulSet items = 2; @@ -675,7 +675,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -684,7 +684,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -695,7 +695,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -737,15 +737,13 @@ message StatefulSetSpec { // volume claims are created as needed and retained until manually deleted. This // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled - // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // down. + // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/constraint/vendor/k8s.io/api/apps/v1/types.go b/constraint/vendor/k8s.io/api/apps/v1/types.go index 96ff62098..a68690b44 100644 --- a/constraint/vendor/k8s.io/api/apps/v1/types.go +++ b/constraint/vendor/k8s.io/api/apps/v1/types.go @@ -37,6 +37,7 @@ const ( // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: @@ -141,7 +142,7 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. @@ -254,15 +255,13 @@ type StatefulSetSpec struct { // volume claims are created as needed and retained until manually deleted. This // policy allows the lifecycle to be altered, for example by deleting persistent // volume claims when their stateful set is deleted, or when their pod is scaled - // down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, - // which is alpha. +optional + // down. + // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } @@ -335,6 +334,7 @@ type StatefulSetCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // StatefulSetList is a collection of StatefulSets. type StatefulSetList struct { @@ -353,6 +353,7 @@ type StatefulSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { @@ -555,6 +556,7 @@ type DeploymentCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DeploymentList is a list of Deployments. type DeploymentList struct { @@ -747,6 +749,7 @@ type DaemonSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { @@ -778,6 +781,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // DaemonSetList is a collection of daemon sets. type DaemonSetList struct { @@ -796,6 +800,7 @@ type DaemonSetList struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { @@ -823,6 +828,7 @@ type ReplicaSet struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ReplicaSetList is a collection of ReplicaSets. type ReplicaSetList struct { @@ -925,6 +931,7 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain @@ -950,6 +957,7 @@ type ControllerRevision struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.9 // ControllerRevisionList is a resource containing a list of ControllerRevision objects. type ControllerRevisionList struct { diff --git a/constraint/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 6676da064..341ecdadb 100644 --- a/constraint/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -354,8 +354,8 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..34a036b62 --- /dev/null +++ b/constraint/vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevision) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ControllerRevisionList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DaemonSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Deployment) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicaSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSet) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StatefulSetList) APILifecycleIntroduced() (major, minor int) { + return 1, 9 +} diff --git a/constraint/vendor/k8s.io/api/apps/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/apps/v1beta1/generated.proto index 896562225..46d7bfdf9 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -70,7 +70,7 @@ message ControllerRevisionList { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -90,10 +90,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -106,7 +106,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -136,11 +136,11 @@ message DeploymentSpec { // selector is the label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -254,7 +254,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -268,7 +268,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -287,14 +287,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -342,7 +342,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -364,7 +364,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -378,7 +378,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -429,7 +429,7 @@ message StatefulSetSpec { // If empty, defaulted to labels on the pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -437,7 +437,7 @@ message StatefulSetSpec { // of the StatefulSet. Each pod will be named with the format // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -448,7 +448,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -486,16 +486,13 @@ message StatefulSetSpec { optional int32 minReadySeconds = 9; // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/constraint/vendor/k8s.io/api/apps/v1beta1/types.go b/constraint/vendor/k8s.io/api/apps/v1beta1/types.go index bdf9f93a9..bc4851957 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/apps/v1beta1/types.go @@ -181,11 +181,11 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. - RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" ) // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs @@ -290,16 +290,13 @@ type StatefulSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } diff --git a/constraint/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index a62e9869d..1381d75dc 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -258,8 +258,8 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/apps/v1beta2/generated.proto b/constraint/vendor/k8s.io/api/apps/v1beta2/generated.proto index 3ae8a8009..c08a4c78b 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/constraint/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -45,10 +45,10 @@ message ControllerRevision { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; // Revision indicates the revision of the state represented by Data. optional int64 revision = 3; @@ -58,7 +58,7 @@ message ControllerRevision { message ControllerRevisionList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ControllerRevisions repeated ControllerRevision items = 2; @@ -71,7 +71,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -97,7 +97,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -113,7 +113,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -125,7 +125,7 @@ message DaemonSetSpec { // Must match in order to be controlled. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -133,7 +133,7 @@ message DaemonSetSpec { // selector is specified). // The only allowed template.spec.restartPolicy value is "Always". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -230,7 +230,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -250,10 +250,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -266,7 +266,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -282,11 +282,11 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -382,7 +382,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -408,7 +408,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -424,7 +424,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -450,13 +450,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -507,7 +507,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -528,7 +528,7 @@ message RollingUpdateDaemonSet { // so resource intensive daemonsets should take into account that they may // cause evictions during disruption. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -544,7 +544,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -558,7 +558,7 @@ message RollingUpdateDeployment { // new ReplicaSet can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -578,14 +578,14 @@ message RollingUpdateStatefulSetStrategy { // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it // will be counted towards MaxUnavailable. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -634,7 +634,7 @@ message ScaleStatus { // map to the same storage identity. message StatefulSet { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired identities of pods in this set. // +optional @@ -656,7 +656,7 @@ message StatefulSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -670,7 +670,7 @@ message StatefulSetCondition { // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated StatefulSet items = 2; } @@ -720,7 +720,7 @@ message StatefulSetSpec { // selector is a label query over pods that should match the replica count. // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -729,7 +729,7 @@ message StatefulSetSpec { // -. For example, a pod in a StatefulSet named // "web" with index number "3" would be named "web-3". // The only allowed template.spec.restartPolicy value is "Always". - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // volumeClaimTemplates is a list of claims that pods are allowed to reference. // The StatefulSet controller is responsible for mapping network identities to @@ -740,7 +740,7 @@ message StatefulSetSpec { // TODO: Define the behavior if a claim already exists with the same name. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; // serviceName is the name of the service that governs this StatefulSet. // This service must exist before the StatefulSet, and is responsible for @@ -778,16 +778,13 @@ message StatefulSetSpec { optional int32 minReadySeconds = 9; // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10; // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional optional StatefulSetOrdinals ordinals = 11; } diff --git a/constraint/vendor/k8s.io/api/apps/v1beta2/types.go b/constraint/vendor/k8s.io/api/apps/v1beta2/types.go index 6981c2a17..c2624a941 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/constraint/vendor/k8s.io/api/apps/v1beta2/types.go @@ -191,11 +191,11 @@ const ( // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will not be deleted. RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain" - // RetentionPersistentVolumeClaimRetentionPolicyType specifies that + // DeletePersistentVolumeClaimRetentionPolicyType specifies that // PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates // will be deleted in the scenario specified in // StatefulSetPersistentVolumeClaimRetentionPolicy. - RetentionPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" + DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete" ) // StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs @@ -300,16 +300,13 @@ type StatefulSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from - // the StatefulSet VolumeClaimTemplates. This requires the - // StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. + // the StatefulSet VolumeClaimTemplates. // +optional PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"` // ordinals controls the numbering of replica indices in a StatefulSet. The // default ordinals behavior assigns a "0" index to the first replica and - // increments the index by one for each additional replica requested. Using - // the ordinals field requires the StatefulSetStartOrdinal feature gate to be - // enabled, which is beta. + // increments the index by one for each additional replica requested. // +optional Ordinals *StatefulSetOrdinals `json:"ordinals,omitempty" protobuf:"bytes,11,opt,name=ordinals"` } diff --git a/constraint/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index d7e920991..beec4b755 100644 --- a/constraint/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -382,8 +382,8 @@ var map_StatefulSetSpec = map[string]string{ "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.", - "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.", } func (StatefulSetSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/authentication/v1/doc.go b/constraint/vendor/k8s.io/api/authentication/v1/doc.go index 1614265bd..3bdc89bad 100644 --- a/constraint/vendor/k8s.io/api/authentication/v1/doc.go +++ b/constraint/vendor/k8s.io/api/authentication/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/constraint/vendor/k8s.io/api/authentication/v1/generated.proto b/constraint/vendor/k8s.io/api/authentication/v1/generated.proto index 1fe2f4f2c..ae9763576 100644 --- a/constraint/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/authentication/v1/generated.proto @@ -63,7 +63,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -81,7 +81,7 @@ message TokenRequest { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenRequestSpec spec = 2; @@ -123,7 +123,7 @@ message TokenRequestStatus { optional string token = 1; // ExpirationTimestamp is the time of expiration of the returned token. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; } // TokenReview attempts to authenticate a token to a known user. @@ -133,7 +133,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; diff --git a/constraint/vendor/k8s.io/api/authentication/v1/types.go b/constraint/vendor/k8s.io/api/authentication/v1/types.go index 4f4400e30..2dc0707c4 100644 --- a/constraint/vendor/k8s.io/api/authentication/v1/types.go +++ b/constraint/vendor/k8s.io/api/authentication/v1/types.go @@ -45,6 +45,7 @@ const ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // TokenReview attempts to authenticate a token to a known user. // Note: TokenReview requests may be cached by the webhook token authenticator @@ -134,6 +135,7 @@ func (t ExtraValue) String() string { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 // TokenRequest requests a token for a given service account. type TokenRequest struct { @@ -206,6 +208,7 @@ type BoundObjectReference struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.28 // SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. // When using impersonation, users will receive the user info of the user being impersonated. If impersonation or diff --git a/constraint/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b612bdec4 --- /dev/null +++ b/constraint/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) { + return 1, 28 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *TokenReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/constraint/vendor/k8s.io/api/authentication/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/authentication/v1alpha1/generated.proto index 51d925244..4585e5cdd 100644 --- a/constraint/vendor/k8s.io/api/authentication/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/authentication/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -46,6 +46,6 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } diff --git a/constraint/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 61658245d..d0f6fe440 100644 --- a/constraint/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -45,7 +45,7 @@ message SelfSubjectReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Status is filled in by the server with the user attributes. optional SelfSubjectReviewStatus status = 2; @@ -55,7 +55,7 @@ message SelfSubjectReview { message SelfSubjectReviewStatus { // User attributes of the user making this request. // +optional - optional k8s.io.api.authentication.v1.UserInfo userInfo = 1; + optional .k8s.io.api.authentication.v1.UserInfo userInfo = 1; } // TokenReview attempts to authenticate a token to a known user. @@ -65,7 +65,7 @@ message TokenReview { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional TokenReviewSpec spec = 2; diff --git a/constraint/vendor/k8s.io/api/authorization/v1/doc.go b/constraint/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6b7..77e5a19c4 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/doc.go +++ b/constraint/vendor/k8s.io/api/authorization/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=authorization.k8s.io package v1 // import "k8s.io/api/authorization/v1" diff --git a/constraint/vendor/k8s.io/api/authorization/v1/generated.pb.go b/constraint/vendor/k8s.io/api/authorization/v1/generated.pb.go index dfa109b42..aed9a3a47 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -72,10 +73,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() { var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} } +func (*FieldSelectorAttributes) ProtoMessage() {} +func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{1} +} +func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorAttributes.Merge(m, src) +} +func (m *FieldSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo + +func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} } +func (*LabelSelectorAttributes) ProtoMessage() {} +func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_aafd0e5e70cec678, []int{2} +} +func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorAttributes.Merge(m, src) +} +func (m *LabelSelectorAttributes) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo + func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{1} + return fileDescriptor_aafd0e5e70cec678, []int{3} } func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -103,7 +160,7 @@ var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } func (*NonResourceAttributes) ProtoMessage() {} func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{2} + return fileDescriptor_aafd0e5e70cec678, []int{4} } func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -131,7 +188,7 @@ var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } func (*NonResourceRule) ProtoMessage() {} func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{3} + return fileDescriptor_aafd0e5e70cec678, []int{5} } func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +216,7 @@ var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } func (*ResourceAttributes) ProtoMessage() {} func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{4} + return fileDescriptor_aafd0e5e70cec678, []int{6} } func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +244,7 @@ var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo func (m *ResourceRule) Reset() { *m = ResourceRule{} } func (*ResourceRule) ProtoMessage() {} func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{5} + return fileDescriptor_aafd0e5e70cec678, []int{7} } func (m *ResourceRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +272,7 @@ var xxx_messageInfo_ResourceRule proto.InternalMessageInfo func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } func (*SelfSubjectAccessReview) ProtoMessage() {} func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{6} + return fileDescriptor_aafd0e5e70cec678, []int{8} } func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -243,7 +300,7 @@ var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{7} + return fileDescriptor_aafd0e5e70cec678, []int{9} } func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -271,7 +328,7 @@ var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } func (*SelfSubjectRulesReview) ProtoMessage() {} func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{8} + return fileDescriptor_aafd0e5e70cec678, []int{10} } func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -299,7 +356,7 @@ var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{9} + return fileDescriptor_aafd0e5e70cec678, []int{11} } func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -327,7 +384,7 @@ var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } func (*SubjectAccessReview) ProtoMessage() {} func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{10} + return fileDescriptor_aafd0e5e70cec678, []int{12} } func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,7 +412,7 @@ var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{11} + return fileDescriptor_aafd0e5e70cec678, []int{13} } func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +440,7 @@ var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{12} + return fileDescriptor_aafd0e5e70cec678, []int{14} } func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +468,7 @@ var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_aafd0e5e70cec678, []int{13} + return fileDescriptor_aafd0e5e70cec678, []int{15} } func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,6 +495,8 @@ var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes") + proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") @@ -459,78 +518,85 @@ func init() { } var fileDescriptor_aafd0e5e70cec678 = []byte{ - // 1126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x6a, 0x3f, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x11, 0x76, 0xb4, 0x48, - 0x90, 0xaa, 0x65, 0x97, 0x58, 0x6d, 0x13, 0x55, 0xaa, 0x90, 0xad, 0x46, 0x28, 0x52, 0x5b, 0xaa, - 0x89, 0x12, 0x89, 0x22, 0x10, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xbb, 0x0e, 0xe1, - 0x54, 0x89, 0x2f, 0xc0, 0x91, 0x03, 0x07, 0xbe, 0x01, 0x17, 0x24, 0x6e, 0x1c, 0x38, 0xa0, 0x1c, - 0x7b, 0x2c, 0x12, 0xb2, 0xc8, 0x72, 0xe6, 0x3b, 0xa0, 0x99, 0x1d, 0x7b, 0xd7, 0xc9, 0xda, 0x8d, - 0x39, 0xd0, 0x4b, 0x6f, 0xde, 0xf7, 0xfb, 0xbd, 0x37, 0x6f, 0xde, 0xbf, 0x79, 0x86, 0x5b, 0x47, - 0xdb, 0x4c, 0xb7, 0x1c, 0x83, 0xb8, 0x96, 0x41, 0x02, 0xbf, 0xe3, 0x78, 0xd6, 0x37, 0xc4, 0xb7, - 0x1c, 0xdb, 0xe8, 0x6f, 0x1a, 0x6d, 0x6a, 0x53, 0x8f, 0xf8, 0xb4, 0xa5, 0xbb, 0x9e, 0xe3, 0x3b, - 0x68, 0x2d, 0x22, 0xeb, 0xc4, 0xb5, 0xf4, 0x31, 0xb2, 0xde, 0xdf, 0x5c, 0x7d, 0xbf, 0x6d, 0xf9, - 0x9d, 0xa0, 0xa9, 0x9b, 0x4e, 0xcf, 0x68, 0x3b, 0x6d, 0xc7, 0x10, 0x3a, 0xcd, 0xe0, 0x50, 0x7c, - 0x89, 0x0f, 0xf1, 0x2b, 0xb2, 0xb5, 0x7a, 0x27, 0x3e, 0xb8, 0x47, 0xcc, 0x8e, 0x65, 0x53, 0xef, - 0xc4, 0x70, 0x8f, 0xda, 0x5c, 0xc0, 0x8c, 0x1e, 0xf5, 0x49, 0x8a, 0x07, 0xab, 0xc6, 0x24, 0x2d, - 0x2f, 0xb0, 0x7d, 0xab, 0x47, 0x2f, 0x28, 0xdc, 0x7b, 0x95, 0x02, 0x33, 0x3b, 0xb4, 0x47, 0xce, - 0xeb, 0x69, 0x5b, 0x00, 0x3b, 0x5f, 0xfb, 0x1e, 0x39, 0x20, 0xdd, 0x80, 0xa2, 0x2a, 0x14, 0x2c, - 0x9f, 0xf6, 0x98, 0xaa, 0xac, 0xe7, 0x36, 0x4a, 0x8d, 0x52, 0x38, 0xa8, 0x16, 0x76, 0xb9, 0x00, - 0x47, 0xf2, 0xfb, 0xc5, 0xef, 0x7f, 0xac, 0x66, 0x9e, 0xff, 0xb9, 0x9e, 0xd1, 0x7e, 0xce, 0x82, - 0xfa, 0xc8, 0x31, 0x49, 0x77, 0x2f, 0x68, 0x7e, 0x49, 0x4d, 0xbf, 0x6e, 0x9a, 0x94, 0x31, 0x4c, - 0xfb, 0x16, 0x3d, 0x46, 0x5f, 0x40, 0x91, 0xdf, 0xac, 0x45, 0x7c, 0xa2, 0x2a, 0xeb, 0xca, 0x46, - 0xb9, 0xf6, 0x81, 0x1e, 0xc7, 0x74, 0xe4, 0xa0, 0xee, 0x1e, 0xb5, 0xb9, 0x80, 0xe9, 0x9c, 0xad, - 0xf7, 0x37, 0xf5, 0x8f, 0x85, 0xad, 0xc7, 0xd4, 0x27, 0x0d, 0x74, 0x3a, 0xa8, 0x66, 0xc2, 0x41, - 0x15, 0x62, 0x19, 0x1e, 0x59, 0x45, 0x07, 0x90, 0x67, 0x2e, 0x35, 0xd5, 0xac, 0xb0, 0x7e, 0x47, - 0x9f, 0x92, 0x31, 0x3d, 0xc5, 0xc3, 0x3d, 0x97, 0x9a, 0x8d, 0xab, 0xf2, 0x84, 0x3c, 0xff, 0xc2, - 0xc2, 0x1e, 0xfa, 0x1c, 0xe6, 0x98, 0x4f, 0xfc, 0x80, 0xa9, 0x39, 0x61, 0xf9, 0xde, 0xcc, 0x96, - 0x85, 0x76, 0xe3, 0x2d, 0x69, 0x7b, 0x2e, 0xfa, 0xc6, 0xd2, 0xaa, 0xf6, 0x29, 0x2c, 0x3f, 0x71, - 0x6c, 0x4c, 0x99, 0x13, 0x78, 0x26, 0xad, 0xfb, 0xbe, 0x67, 0x35, 0x03, 0x9f, 0x32, 0xb4, 0x0e, - 0x79, 0x97, 0xf8, 0x1d, 0x11, 0xae, 0x52, 0xec, 0xda, 0x53, 0xe2, 0x77, 0xb0, 0x40, 0x38, 0xa3, - 0x4f, 0xbd, 0xa6, 0xb8, 0x72, 0x82, 0x71, 0x40, 0xbd, 0x26, 0x16, 0x88, 0xf6, 0x15, 0x2c, 0x24, - 0x8c, 0xe3, 0xa0, 0x2b, 0x32, 0xca, 0xa1, 0xb1, 0x8c, 0x72, 0x0d, 0x86, 0x23, 0x39, 0x7a, 0x00, - 0x0b, 0x76, 0xac, 0xb3, 0x8f, 0x1f, 0x31, 0x35, 0x2b, 0xa8, 0x4b, 0xe1, 0xa0, 0x9a, 0x34, 0xc7, - 0x21, 0x7c, 0x9e, 0xab, 0xfd, 0x9a, 0x05, 0x94, 0x72, 0x1b, 0x03, 0x4a, 0x36, 0xe9, 0x51, 0xe6, - 0x12, 0x93, 0xca, 0x2b, 0x5d, 0x93, 0x0e, 0x97, 0x9e, 0x0c, 0x01, 0x1c, 0x73, 0x5e, 0x7d, 0x39, - 0xf4, 0x0e, 0x14, 0xda, 0x9e, 0x13, 0xb8, 0x22, 0x31, 0xa5, 0xc6, 0xbc, 0xa4, 0x14, 0x3e, 0xe2, - 0x42, 0x1c, 0x61, 0xe8, 0x26, 0x5c, 0xe9, 0x53, 0x8f, 0x59, 0x8e, 0xad, 0xe6, 0x05, 0x6d, 0x41, - 0xd2, 0xae, 0x1c, 0x44, 0x62, 0x3c, 0xc4, 0xd1, 0x6d, 0x28, 0x7a, 0xd2, 0x71, 0xb5, 0x20, 0xb8, - 0x8b, 0x92, 0x5b, 0x1c, 0x45, 0x70, 0xc4, 0x40, 0x77, 0xa1, 0xcc, 0x82, 0xe6, 0x48, 0x61, 0x4e, - 0x28, 0x2c, 0x49, 0x85, 0xf2, 0x5e, 0x0c, 0xe1, 0x24, 0x8f, 0x5f, 0x8b, 0xdf, 0x51, 0xbd, 0x32, - 0x7e, 0x2d, 0x1e, 0x02, 0x2c, 0x10, 0xed, 0x37, 0x05, 0xae, 0xce, 0x96, 0xb1, 0x5b, 0x50, 0x22, - 0xae, 0x25, 0xae, 0x3d, 0xcc, 0xd5, 0x3c, 0x8f, 0x6b, 0xfd, 0xe9, 0x6e, 0x24, 0xc4, 0x31, 0xce, - 0xc9, 0x43, 0x67, 0x78, 0x49, 0x8f, 0xc8, 0xc3, 0x23, 0x19, 0x8e, 0x71, 0xb4, 0x05, 0xf3, 0xc3, - 0x0f, 0x91, 0x24, 0x35, 0x2f, 0x14, 0xae, 0x85, 0x83, 0xea, 0x3c, 0x4e, 0x02, 0x78, 0x9c, 0xa7, - 0xfd, 0x92, 0x85, 0x95, 0x3d, 0xda, 0x3d, 0x7c, 0x3d, 0xb3, 0xe0, 0xd9, 0xd8, 0x2c, 0xd8, 0x9e, - 0xde, 0xb1, 0xe9, 0x5e, 0xbe, 0xb6, 0x79, 0xf0, 0x43, 0x16, 0xd6, 0xa6, 0xf8, 0x84, 0x8e, 0x01, - 0x79, 0x17, 0xda, 0x4b, 0xc6, 0xd1, 0x98, 0xea, 0xcb, 0xc5, 0xae, 0x6c, 0x5c, 0x0f, 0x07, 0xd5, - 0x94, 0x6e, 0xc5, 0x29, 0x47, 0xa0, 0x6f, 0x15, 0x58, 0xb6, 0xd3, 0x26, 0x95, 0x0c, 0x73, 0x6d, - 0xea, 0xe1, 0xa9, 0x33, 0xae, 0x71, 0x23, 0x1c, 0x54, 0xd3, 0xc7, 0x1f, 0x4e, 0x3f, 0x8b, 0xbf, - 0x32, 0xd7, 0x13, 0xe1, 0xe1, 0x0d, 0xf2, 0xff, 0xd5, 0xd5, 0x27, 0x63, 0x75, 0xb5, 0x75, 0xd9, - 0xba, 0x4a, 0x38, 0x39, 0xb1, 0xac, 0x3e, 0x3b, 0x57, 0x56, 0x77, 0x2f, 0x53, 0x56, 0x49, 0xc3, - 0xd3, 0xab, 0xea, 0x31, 0xac, 0x4e, 0x76, 0x68, 0xe6, 0xe1, 0xac, 0xfd, 0x94, 0x85, 0xa5, 0x37, - 0xcf, 0xfc, 0x2c, 0x6d, 0xfd, 0x7b, 0x1e, 0x56, 0xde, 0xb4, 0xf4, 0xa4, 0x45, 0x27, 0x60, 0xd4, - 0x93, 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x7a, 0x58, 0x20, 0x48, 0x83, 0xb9, 0x76, 0xf4, 0xba, - 0x45, 0xef, 0x0f, 0xf0, 0x00, 0xcb, 0xa7, 0x4d, 0x22, 0xa8, 0x05, 0x05, 0xca, 0xf7, 0x56, 0xb5, - 0xb0, 0x9e, 0xdb, 0x28, 0xd7, 0x3e, 0xfc, 0x2f, 0x95, 0xa1, 0x8b, 0xcd, 0x77, 0xc7, 0xf6, 0xbd, - 0x93, 0x78, 0x9d, 0x10, 0x32, 0x1c, 0x19, 0x47, 0x6f, 0x43, 0x2e, 0xb0, 0x5a, 0xf2, 0xb5, 0x2f, - 0x4b, 0x4a, 0x6e, 0x7f, 0xf7, 0x21, 0xe6, 0xf2, 0x55, 0x22, 0x97, 0x67, 0x61, 0x02, 0x2d, 0x42, - 0xee, 0x88, 0x9e, 0x44, 0x0d, 0x85, 0xf9, 0x4f, 0xf4, 0x00, 0x0a, 0x7d, 0xbe, 0x57, 0xcb, 0xf8, - 0xbe, 0x37, 0xd5, 0xc9, 0x78, 0x0d, 0xc7, 0x91, 0xd6, 0xfd, 0xec, 0xb6, 0xa2, 0xfd, 0xa1, 0xc0, - 0x8d, 0x89, 0xe5, 0xc7, 0xd7, 0x1d, 0xd2, 0xed, 0x3a, 0xc7, 0xb4, 0x25, 0x8e, 0x2d, 0xc6, 0xeb, - 0x4e, 0x3d, 0x12, 0xe3, 0x21, 0x8e, 0xde, 0x85, 0xb9, 0x16, 0xb5, 0x2d, 0xda, 0x12, 0x8b, 0x51, - 0x31, 0xae, 0xdc, 0x87, 0x42, 0x8a, 0x25, 0xca, 0x79, 0x1e, 0x25, 0xcc, 0xb1, 0xe5, 0x2a, 0x36, - 0xe2, 0x61, 0x21, 0xc5, 0x12, 0x45, 0x75, 0x58, 0xa0, 0xdc, 0x4d, 0xe1, 0xff, 0x8e, 0xe7, 0x39, - 0xc3, 0x8c, 0xae, 0x48, 0x85, 0x85, 0x9d, 0x71, 0x18, 0x9f, 0xe7, 0x6b, 0xff, 0x64, 0x41, 0x9d, - 0x34, 0xda, 0xd0, 0x61, 0xbc, 0x8b, 0x08, 0x50, 0xac, 0x43, 0xe5, 0xda, 0xcd, 0x4b, 0x35, 0x08, - 0xd7, 0x68, 0x2c, 0x4b, 0x47, 0xe6, 0x93, 0xd2, 0xc4, 0xea, 0x22, 0x3e, 0x91, 0x07, 0x8b, 0xf6, - 0xf8, 0xce, 0x1c, 0x2d, 0x55, 0xe5, 0xda, 0xed, 0xcb, 0xb6, 0x83, 0x38, 0x4d, 0x95, 0xa7, 0x2d, - 0x9e, 0x03, 0x18, 0xbe, 0x60, 0x1f, 0xd5, 0x00, 0x2c, 0xdb, 0x74, 0x7a, 0x6e, 0x97, 0xfa, 0x54, - 0x84, 0xad, 0x18, 0xcf, 0xc1, 0xdd, 0x11, 0x82, 0x13, 0xac, 0xb4, 0x78, 0xe7, 0x67, 0x8b, 0x77, - 0xa3, 0x7e, 0x7a, 0x56, 0xc9, 0xbc, 0x38, 0xab, 0x64, 0x5e, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a, - 0x72, 0x1a, 0x56, 0x94, 0x17, 0x61, 0x45, 0x79, 0x19, 0x56, 0x94, 0xbf, 0xc2, 0x8a, 0xf2, 0xdd, - 0xdf, 0x95, 0xcc, 0xb3, 0xb5, 0x29, 0xff, 0x94, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x45, 0x6f, - 0xe0, 0x61, 0x47, 0x0f, 0x00, 0x00, + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64, + 0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b, + 0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3, + 0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11, + 0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76, + 0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c, + 0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e, + 0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d, + 0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3, + 0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c, + 0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e, + 0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29, + 0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b, + 0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36, + 0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40, + 0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60, + 0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9, + 0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5, + 0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94, + 0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18, + 0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18, + 0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83, + 0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d, + 0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d, + 0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27, + 0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b, + 0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1, + 0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3, + 0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7, + 0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d, + 0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26, + 0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94, + 0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1, + 0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e, + 0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20, + 0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0, + 0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b, + 0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e, + 0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55, + 0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf, + 0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf, + 0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41, + 0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae, + 0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed, + 0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95, + 0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3, + 0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d, + 0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3, + 0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed, + 0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c, + 0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf, + 0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7, + 0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06, + 0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5, + 0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1, + 0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97, + 0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c, + 0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1, + 0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe, + 0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca, + 0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12, + 0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12, + 0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f, + 0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c, + 0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3, + 0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45, + 0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85, + 0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9, + 0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e, + 0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14, + 0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f, + 0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02, + 0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08, + 0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79, + 0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4, + 0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff, + 0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -565,6 +631,90 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelSelectorAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requirements) > 0 { + for iNdEx := len(m.Requirements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requirements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.RawSelector) + copy(dAtA[i:], m.RawSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RawSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -712,6 +862,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1277,6 +1451,40 @@ func (m ExtraValue) Size() (n int) { return n } +func (m *FieldSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RawSelector) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requirements) > 0 { + for _, e := range m.Requirements { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *LocalSubjectAccessReview) Size() (n int) { if m == nil { return 0 @@ -1346,6 +1554,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1536,6 +1752,38 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *FieldSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]FieldSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&FieldSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorAttributes) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequirements := "[]LabelSelectorRequirement{" + for _, f := range this.Requirements { + repeatedStringForRequirements += fmt.Sprintf("%v", f) + "," + } + repeatedStringForRequirements += "}" + s := strings.Join([]string{`&LabelSelectorAttributes{`, + `RawSelector:` + fmt.Sprintf("%v", this.RawSelector) + `,`, + `Requirements:` + repeatedStringForRequirements + `,`, + `}`, + }, "") + return s +} func (this *LocalSubjectAccessReview) String() string { if this == nil { return "nil" @@ -1582,6 +1830,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(this.FieldSelector.String(), "FieldSelectorAttributes", "FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(this.LabelSelector.String(), "LabelSelectorAttributes", "LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -1807,6 +2057,238 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.FieldSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requirements = append(m.Requirements, v1.LabelSelectorRequirement{}) + if err := m.Requirements[len(m.Requirements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2437,6 +2919,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/constraint/vendor/k8s.io/api/authorization/v1/generated.proto b/constraint/vendor/k8s.io/api/authorization/v1/generated.proto index 83283d0bd..37b05b855 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/authorization/v1/generated.proto @@ -37,6 +37,60 @@ message ExtraValue { repeated string items = 1; } +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message FieldSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement requirements = 2; +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +message LabelSelectorAttributes { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + optional string rawSelector = 1; + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement requirements = 2; +} + // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions // checking. @@ -44,7 +98,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -111,6 +165,20 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + optional LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, @@ -145,7 +213,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -177,7 +245,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -198,7 +266,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; diff --git a/constraint/vendor/k8s.io/api/authorization/v1/types.go b/constraint/vendor/k8s.io/api/authorization/v1/types.go index 3b42956f8..36f5fa410 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/types.go +++ b/constraint/vendor/k8s.io/api/authorization/v1/types.go @@ -26,6 +26,7 @@ import ( // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // SubjectAccessReview checks whether or not a user or group can perform an action. type SubjectAccessReview struct { @@ -47,6 +48,7 @@ type SubjectAccessReview struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a // spec.namespace means "in all namespaces". Self is a special case, because users should always be able @@ -69,6 +71,7 @@ type SelfSubjectAccessReview struct { // +genclient // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. // Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions @@ -115,6 +118,72 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + FieldSelector *FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // + // This field is alpha-level. To use this field, you must enable the + // `AuthorizeWithSelectors` feature gate (disabled by default). + // +optional + LabelSelector *LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` +} + +// LabelSelectorAttributes indicates a label limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type LabelSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a label selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.LabelSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` +} + +// FieldSelectorAttributes indicates a field limited access. +// Webhook authors are encouraged to +// * ensure rawSelector and requirements are not both set +// * consider the requirements field if set +// * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. +// For the *SubjectAccessReview endpoints of the kube-apiserver: +// * If rawSelector is empty and requirements are empty, the request is not limited. +// * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. +// * If rawSelector is empty and requirements are present, the requirements should be honored +// * If rawSelector is present and requirements are present, the request is invalid. +type FieldSelectorAttributes struct { + // rawSelector is the serialization of a field selector that would be included in a query parameter. + // Webhook implementations are encouraged to ignore rawSelector. + // The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present. + // +optional + RawSelector string `json:"rawSelector,omitempty" protobuf:"bytes,1,opt,name=rawSelector"` + + // requirements is the parsed interpretation of a field selector. + // All requirements must be met for a resource instance to match the selector. + // Webhook implementations should handle requirements, but how to handle them is up to the webhook. + // Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements + // are not understood. + // +optional + // +listType=atomic + Requirements []metav1.FieldSelectorRequirement `json:"requirements,omitempty" protobuf:"bytes,2,rep,name=requirements"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface @@ -198,6 +267,7 @@ type SubjectAccessReviewStatus struct { // +genclient:nonNamespaced // +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. // The returned list of actions may be incomplete depending on the server's authorization mode, diff --git a/constraint/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 93229485c..dc6b8a89e 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -27,6 +27,26 @@ package v1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_FieldSelectorAttributes = map[string]string{ + "": "FieldSelectorAttributes indicates a field limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a field selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (FieldSelectorAttributes) SwaggerDoc() map[string]string { + return map_FieldSelectorAttributes +} + +var map_LabelSelectorAttributes = map[string]string{ + "": "LabelSelectorAttributes indicates a label limited access. Webhook authors are encouraged to * ensure rawSelector and requirements are not both set * consider the requirements field if set * not try to parse or consider the rawSelector field if set. This is to avoid another CVE-2022-2880 (i.e. getting different systems to agree on how exactly to parse a query is not something we want), see https://www.oxeye.io/resources/golang-parameter-smuggling-attack for more details. For the *SubjectAccessReview endpoints of the kube-apiserver: * If rawSelector is empty and requirements are empty, the request is not limited. * If rawSelector is present and requirements are empty, the rawSelector will be parsed and limited if the parsing succeeds. * If rawSelector is empty and requirements are present, the requirements should be honored * If rawSelector is present and requirements are present, the request is invalid.", + "rawSelector": "rawSelector is the serialization of a field selector that would be included in a query parameter. Webhook implementations are encouraged to ignore rawSelector. The kube-apiserver's *SubjectAccessReview will parse the rawSelector as long as the requirements are not present.", + "requirements": "requirements is the parsed interpretation of a label selector. All requirements must be met for a resource instance to match the selector. Webhook implementations should handle requirements, but how to handle them is up to the webhook. Since requirements can only limit the request, it is safe to authorize as unlimited request if the requirements are not understood.", +} + +func (LabelSelectorAttributes) SwaggerDoc() map[string]string { + return map_LabelSelectorAttributes +} + var map_LocalSubjectAccessReview = map[string]string{ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -59,14 +79,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.\n\nThis field is alpha-level. To use this field, you must enable the `AuthorizeWithSelectors` feature gate (disabled by default).", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index f1d49eb38..7f040f5c5 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -45,6 +46,52 @@ func (in ExtraValue) DeepCopy() ExtraValue { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorAttributes) DeepCopyInto(out *FieldSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.FieldSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorAttributes. +func (in *FieldSelectorAttributes) DeepCopy() *FieldSelectorAttributes { + if in == nil { + return nil + } + out := new(FieldSelectorAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorAttributes) DeepCopyInto(out *LabelSelectorAttributes) { + *out = *in + if in.Requirements != nil { + in, out := &in.Requirements, &out.Requirements + *out = make([]metav1.LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorAttributes. +func (in *LabelSelectorAttributes) DeepCopy() *LabelSelectorAttributes { + if in == nil { + return nil + } + out := new(LabelSelectorAttributes) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in @@ -118,6 +165,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +258,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +356,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b0c0475b4 --- /dev/null +++ b/constraint/vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LocalSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SelfSubjectRulesReview) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SubjectAccessReview) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} diff --git a/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 28642ba63..5007d1b49 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -26,6 +26,7 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/authorization/v1" math "math" math_bits "math/bits" @@ -459,78 +460,82 @@ func init() { } var fileDescriptor_8eab727787743457 = []byte{ - // 1130 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x3f, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xe6, 0x2b, 0x6c, 0xcb, - 0x48, 0x28, 0x88, 0xb2, 0xdb, 0x44, 0x85, 0x94, 0x40, 0x0f, 0xb1, 0x12, 0x50, 0xa4, 0xb6, 0x54, - 0x13, 0x25, 0x07, 0x2a, 0x01, 0xe3, 0xf5, 0xc4, 0x5e, 0x62, 0xef, 0x2e, 0x3b, 0xb3, 0x0e, 0x41, - 0x1c, 0x7a, 0xe4, 0xc8, 0x91, 0x23, 0x27, 0xfe, 0x07, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x20, - 0x21, 0x8b, 0x2c, 0x7f, 0x04, 0x57, 0x34, 0xb3, 0x63, 0xef, 0x3a, 0xd9, 0xc4, 0x49, 0x0e, 0xf4, - 0xd2, 0x9b, 0xe7, 0x7d, 0x3e, 0xef, 0xcd, 0x7b, 0x6f, 0xde, 0x7b, 0xfb, 0x0c, 0xf7, 0x0f, 0x1e, - 0x32, 0xc3, 0x76, 0x4d, 0xe2, 0xd9, 0x26, 0x09, 0x78, 0xc7, 0xf5, 0xed, 0x6f, 0x09, 0xb7, 0x5d, - 0xc7, 0xec, 0xaf, 0x34, 0x29, 0x27, 0x2b, 0x66, 0x9b, 0x3a, 0xd4, 0x27, 0x9c, 0xb6, 0x0c, 0xcf, - 0x77, 0xb9, 0x8b, 0x6a, 0x91, 0x86, 0x41, 0x3c, 0xdb, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xd2, 0xbb, - 0x6d, 0x9b, 0x77, 0x82, 0xa6, 0x61, 0xb9, 0x3d, 0xb3, 0xed, 0xb6, 0x5d, 0x53, 0x2a, 0x36, 0x83, - 0x7d, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x32, 0xb8, 0xf4, 0x20, 0x76, 0xa1, 0x47, 0xac, 0x8e, 0xed, - 0x50, 0xff, 0xc8, 0xf4, 0x0e, 0xda, 0x42, 0xc0, 0xcc, 0x1e, 0xe5, 0xc4, 0xec, 0x9f, 0x73, 0x63, - 0xc9, 0xbc, 0x48, 0xcb, 0x0f, 0x1c, 0x6e, 0xf7, 0xe8, 0x39, 0x85, 0xf7, 0x27, 0x29, 0x30, 0xab, - 0x43, 0x7b, 0xe4, 0xac, 0x5e, 0x7d, 0x0d, 0x60, 0xeb, 0x1b, 0xee, 0x93, 0x3d, 0xd2, 0x0d, 0x28, - 0xaa, 0x42, 0xc1, 0xe6, 0xb4, 0xc7, 0x74, 0xad, 0x96, 0x5b, 0x2e, 0x35, 0x4a, 0xe1, 0xa0, 0x5a, - 0xd8, 0x16, 0x02, 0x1c, 0xc9, 0xd7, 0x8b, 0x3f, 0xfe, 0x54, 0xcd, 0xbc, 0xf8, 0xb3, 0x96, 0xa9, - 0xff, 0x9a, 0x05, 0xfd, 0xb1, 0x6b, 0x91, 0xee, 0x4e, 0xd0, 0xfc, 0x8a, 0x5a, 0x7c, 0xc3, 0xb2, - 0x28, 0x63, 0x98, 0xf6, 0x6d, 0x7a, 0x88, 0xbe, 0x84, 0xa2, 0x88, 0xac, 0x45, 0x38, 0xd1, 0xb5, - 0x9a, 0xb6, 0x5c, 0x5e, 0xbd, 0x6f, 0xc4, 0x89, 0x1d, 0x39, 0x68, 0x78, 0x07, 0x6d, 0x21, 0x60, - 0x86, 0x60, 0x1b, 0xfd, 0x15, 0xe3, 0x53, 0x69, 0xeb, 0x09, 0xe5, 0xa4, 0x81, 0x8e, 0x07, 0xd5, - 0x4c, 0x38, 0xa8, 0x42, 0x2c, 0xc3, 0x23, 0xab, 0xe8, 0x39, 0xe4, 0x99, 0x47, 0x2d, 0x3d, 0x2b, - 0xad, 0x7f, 0x60, 0x4c, 0x7a, 0x36, 0x23, 0xc5, 0xcd, 0x1d, 0x8f, 0x5a, 0x8d, 0x5b, 0xea, 0x9a, - 0xbc, 0x38, 0x61, 0x69, 0x14, 0x59, 0x30, 0xc5, 0x38, 0xe1, 0x01, 0xd3, 0x73, 0xd2, 0xfc, 0x87, - 0x37, 0x33, 0x2f, 0x4d, 0x34, 0xfe, 0xa7, 0x2e, 0x98, 0x8a, 0xce, 0x58, 0x99, 0xae, 0x3f, 0x87, - 0x85, 0xa7, 0xae, 0x83, 0x29, 0x73, 0x03, 0xdf, 0xa2, 0x1b, 0x9c, 0xfb, 0x76, 0x33, 0xe0, 0x94, - 0xa1, 0x1a, 0xe4, 0x3d, 0xc2, 0x3b, 0x32, 0x71, 0xa5, 0xd8, 0xbf, 0x67, 0x84, 0x77, 0xb0, 0x44, - 0x04, 0xa3, 0x4f, 0xfd, 0xa6, 0x0c, 0x3e, 0xc1, 0xd8, 0xa3, 0x7e, 0x13, 0x4b, 0xa4, 0xfe, 0x35, - 0xcc, 0x26, 0x8c, 0xe3, 0xa0, 0x2b, 0xdf, 0x56, 0x40, 0x63, 0x6f, 0x2b, 0x34, 0x18, 0x8e, 0xe4, - 0xe8, 0x11, 0xcc, 0x3a, 0xb1, 0xce, 0x2e, 0x7e, 0xcc, 0xf4, 0xac, 0xa4, 0xce, 0x87, 0x83, 0x6a, - 0xd2, 0x9c, 0x80, 0xf0, 0x59, 0xae, 0x28, 0x08, 0x94, 0x12, 0x8d, 0x09, 0x25, 0x87, 0xf4, 0x28, - 0xf3, 0x88, 0x45, 0x55, 0x48, 0xb7, 0x95, 0xc3, 0xa5, 0xa7, 0x43, 0x00, 0xc7, 0x9c, 0xc9, 0xc1, - 0xa1, 0x37, 0xa1, 0xd0, 0xf6, 0xdd, 0xc0, 0x93, 0xaf, 0x53, 0x6a, 0xcc, 0x28, 0x4a, 0xe1, 0x13, - 0x21, 0xc4, 0x11, 0x86, 0xde, 0x86, 0xe9, 0x3e, 0xf5, 0x99, 0xed, 0x3a, 0x7a, 0x5e, 0xd2, 0x66, - 0x15, 0x6d, 0x7a, 0x2f, 0x12, 0xe3, 0x21, 0x8e, 0xee, 0x41, 0xd1, 0x57, 0x8e, 0xeb, 0x05, 0xc9, - 0x9d, 0x53, 0xdc, 0xe2, 0x28, 0x83, 0x23, 0x06, 0x7a, 0x0f, 0xca, 0x2c, 0x68, 0x8e, 0x14, 0xa6, - 0xa4, 0xc2, 0xbc, 0x52, 0x28, 0xef, 0xc4, 0x10, 0x4e, 0xf2, 0x44, 0x58, 0x22, 0x46, 0x7d, 0x7a, - 0x3c, 0x2c, 0x91, 0x02, 0x2c, 0x91, 0xfa, 0xef, 0x1a, 0xdc, 0xba, 0xde, 0x8b, 0xbd, 0x03, 0x25, - 0xe2, 0xd9, 0x32, 0xec, 0xe1, 0x5b, 0xcd, 0x88, 0xbc, 0x6e, 0x3c, 0xdb, 0x8e, 0x84, 0x38, 0xc6, - 0x05, 0x79, 0xe8, 0x8c, 0xa8, 0xeb, 0x11, 0x79, 0x78, 0x25, 0xc3, 0x31, 0x8e, 0xd6, 0x60, 0x66, - 0x78, 0x90, 0x8f, 0xa4, 0xe7, 0xa5, 0xc2, 0xed, 0x70, 0x50, 0x9d, 0xc1, 0x49, 0x00, 0x8f, 0xf3, - 0xea, 0xbf, 0x65, 0x61, 0x71, 0x87, 0x76, 0xf7, 0x5f, 0xcd, 0x54, 0xf8, 0x62, 0x6c, 0x2a, 0x3c, - 0xba, 0x42, 0xdb, 0xa6, 0xbb, 0xfa, 0x6a, 0x27, 0xc3, 0xcf, 0x59, 0xf8, 0xff, 0x25, 0x8e, 0xa1, - 0xef, 0x00, 0xf9, 0xe7, 0x1a, 0x4d, 0x65, 0xf4, 0xc1, 0x64, 0x87, 0xce, 0x37, 0x69, 0xe3, 0x4e, - 0x38, 0xa8, 0xa6, 0x34, 0x2f, 0x4e, 0xb9, 0x07, 0x7d, 0xaf, 0xc1, 0x82, 0x93, 0x36, 0xb8, 0x54, - 0xd6, 0xd7, 0x26, 0x7b, 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xe2, 0xf4, - 0x0b, 0xc5, 0xc8, 0xb9, 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xae, 0xd6, 0x3e, 0x1f, 0xab, 0xb5, - 0x8f, 0xae, 0x55, 0x6b, 0x09, 0x4f, 0x2f, 0x2c, 0xb5, 0xe6, 0x99, 0x52, 0x5b, 0xbf, 0x72, 0xa9, - 0x25, 0xad, 0x5f, 0x5e, 0x69, 0x4f, 0x60, 0xe9, 0x62, 0xaf, 0xae, 0x3d, 0xba, 0xeb, 0xbf, 0x64, - 0x61, 0xfe, 0xf5, 0x3a, 0x70, 0xb3, 0xa6, 0x3f, 0xc9, 0xc3, 0xe2, 0xeb, 0x86, 0xbf, 0xbc, 0xe1, - 0xc5, 0x47, 0x34, 0x60, 0xd4, 0x57, 0x1f, 0xfe, 0xd1, 0x5b, 0xed, 0x32, 0xea, 0x63, 0x89, 0xa0, - 0xda, 0x70, 0x37, 0x88, 0x3e, 0x58, 0x20, 0x32, 0xad, 0xbe, 0x85, 0x6a, 0x31, 0xb0, 0xa1, 0x40, - 0xc5, 0xc6, 0xab, 0x17, 0x6a, 0xb9, 0xe5, 0xf2, 0xea, 0xe6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, - 0x96, 0xc3, 0xfd, 0xa3, 0x78, 0x07, 0x91, 0x32, 0x1c, 0xdd, 0x80, 0xde, 0x80, 0x5c, 0x60, 0xb7, - 0xd4, 0x8a, 0x50, 0x56, 0x94, 0xdc, 0xee, 0xf6, 0x26, 0x16, 0xf2, 0xa5, 0x7d, 0xb5, 0x7b, 0x4b, - 0x13, 0x68, 0x0e, 0x72, 0x07, 0xf4, 0x28, 0xea, 0x33, 0x2c, 0x7e, 0xa2, 0x06, 0x14, 0xfa, 0x62, - 0x2d, 0x57, 0x79, 0xbe, 0x37, 0xd9, 0xd3, 0x78, 0x95, 0xc7, 0x91, 0xea, 0x7a, 0xf6, 0xa1, 0x56, - 0xff, 0x43, 0x83, 0xbb, 0x17, 0x16, 0xa4, 0x58, 0x94, 0x48, 0xb7, 0xeb, 0x1e, 0xd2, 0x96, 0xbc, - 0xbb, 0x18, 0x2f, 0x4a, 0x1b, 0x91, 0x18, 0x0f, 0x71, 0xf4, 0x16, 0x4c, 0xb5, 0xa8, 0x63, 0xd3, - 0x96, 0x5c, 0xa9, 0x8a, 0x71, 0x2d, 0x6f, 0x4a, 0x29, 0x56, 0xa8, 0xe0, 0xf9, 0x94, 0x30, 0xd7, - 0x51, 0x4b, 0xdc, 0x88, 0x87, 0xa5, 0x14, 0x2b, 0x14, 0x6d, 0xc0, 0x2c, 0x15, 0x6e, 0xca, 0x20, - 0xb6, 0x7c, 0xdf, 0x1d, 0xbe, 0xec, 0xa2, 0x52, 0x98, 0xdd, 0x1a, 0x87, 0xf1, 0x59, 0x7e, 0xfd, - 0x9f, 0x2c, 0xe8, 0x17, 0x8d, 0x3d, 0x74, 0x10, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xd5, - 0xb8, 0x7a, 0xcb, 0x08, 0xb5, 0xc6, 0x82, 0xf2, 0x66, 0x26, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x88, - 0x0e, 0x61, 0xce, 0x19, 0x5f, 0xb9, 0xa3, 0x9d, 0xac, 0xbc, 0xba, 0x72, 0xad, 0x06, 0x91, 0x57, - 0xea, 0xea, 0xca, 0xb9, 0x33, 0x00, 0xc3, 0xe7, 0x2e, 0x41, 0xab, 0x00, 0xb6, 0x63, 0xb9, 0x3d, - 0xaf, 0x4b, 0x39, 0x95, 0x09, 0x2c, 0xc6, 0xd3, 0x72, 0x7b, 0x84, 0xe0, 0x04, 0x2b, 0x2d, 0xf3, - 0xf9, 0xeb, 0x65, 0xbe, 0xf1, 0xf1, 0xf1, 0x69, 0x25, 0xf3, 0xf2, 0xb4, 0x92, 0x39, 0x39, 0xad, - 0x64, 0x5e, 0x84, 0x15, 0xed, 0x38, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x12, 0x56, 0xb4, 0xbf, - 0xc2, 0x8a, 0xf6, 0xc3, 0xdf, 0x95, 0xcc, 0x67, 0xb5, 0x49, 0xff, 0xc0, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0xcd, 0x08, 0x09, 0x84, 0xa4, 0x0f, 0x00, 0x00, + // 1192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24, + 0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56, + 0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71, + 0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c, + 0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b, + 0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43, + 0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e, + 0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae, + 0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b, + 0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2, + 0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8, + 0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3, + 0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07, + 0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1, + 0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9, + 0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e, + 0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22, + 0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac, + 0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73, + 0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0, + 0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7, + 0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62, + 0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa, + 0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92, + 0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c, + 0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67, + 0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16, + 0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb, + 0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0, + 0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd, + 0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3, + 0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7, + 0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8, + 0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83, + 0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e, + 0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce, + 0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39, + 0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15, + 0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6, + 0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb, + 0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7, + 0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27, + 0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e, + 0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1, + 0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5, + 0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce, + 0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69, + 0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c, + 0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6, + 0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad, + 0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc, + 0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e, + 0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1, + 0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9, + 0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e, + 0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3, + 0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50, + 0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3, + 0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64, + 0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9, + 0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3, + 0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06, + 0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88, + 0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c, + 0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b, + 0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd, + 0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30, + 0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9, + 0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08, + 0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd, + 0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22, + 0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95, + 0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00, } func (m ExtraValue) Marshal() (dAtA []byte, err error) { @@ -712,6 +717,30 @@ func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.FieldSelector != nil { + { + size, err := m.FieldSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1346,6 +1375,14 @@ func (m *ResourceAttributes) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + if m.FieldSelector != nil { + l = m.FieldSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1582,6 +1619,8 @@ func (this *ResourceAttributes) String() string { `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `FieldSelector:` + strings.Replace(fmt.Sprintf("%v", this.FieldSelector), "FieldSelectorAttributes", "v11.FieldSelectorAttributes", 1) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelectorAttributes", "v11.LabelSelectorAttributes", 1) + `,`, `}`, }, "") return s @@ -2437,6 +2476,78 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldSelector == nil { + m.FieldSelector = &v11.FieldSelectorAttributes{} + } + if err := m.FieldSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v11.LabelSelectorAttributes{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 43bea7aa1..8738768b8 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.authorization.v1beta1; +import "k8s.io/api/authorization/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -44,7 +45,7 @@ message LocalSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace // you made the request against. If empty, it is defaulted. @@ -111,6 +112,14 @@ message ResourceAttributes { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional optional string name = 7; + + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.FieldSelectorAttributes fieldSelector = 8; + + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + optional .k8s.io.api.authorization.v1.LabelSelectorAttributes labelSelector = 9; } // ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, @@ -145,7 +154,7 @@ message SelfSubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. user and groups must be empty optional SelfSubjectAccessReviewSpec spec = 2; @@ -177,7 +186,7 @@ message SelfSubjectRulesReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated. optional SelfSubjectRulesReviewSpec spec = 2; @@ -198,7 +207,7 @@ message SubjectAccessReview { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec holds information about the request being evaluated optional SubjectAccessReviewSpec spec = 2; diff --git a/constraint/vendor/k8s.io/api/authorization/v1beta1/types.go b/constraint/vendor/k8s.io/api/authorization/v1beta1/types.go index ef3a501b0..8b8e5a986 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -19,6 +19,7 @@ package v1beta1 import ( "fmt" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -124,6 +125,12 @@ type ResourceAttributes struct { // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. // +optional Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` + // fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it. + // +optional + FieldSelector *authorizationv1.FieldSelectorAttributes `json:"fieldSelector,omitempty" protobuf:"bytes,8,opt,name=fieldSelector"` + // labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it. + // +optional + LabelSelector *authorizationv1.LabelSelectorAttributes `json:"labelSelector,omitempty" protobuf:"bytes,9,opt,name=labelSelector"` } // NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface diff --git a/constraint/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index e0846be7a..bb1352a2d 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -59,14 +59,16 @@ func (NonResourceRule) SwaggerDoc() map[string]string { } var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "fieldSelector": "fieldSelector describes the limitation on access based on field. It can only limit access, not broaden it.", + "labelSelector": "labelSelector describes the limitation on access based on labels. It can only limit access, not broaden it.", } func (ResourceAttributes) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index 13f09cf2d..d76993dba 100644 --- a/constraint/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/authorization/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -118,6 +119,16 @@ func (in *NonResourceRule) DeepCopy() *NonResourceRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { *out = *in + if in.FieldSelector != nil { + in, out := &in.FieldSelector, &out.FieldSelector + *out = new(v1.FieldSelectorAttributes) + (*in).DeepCopyInto(*out) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelectorAttributes) + (*in).DeepCopyInto(*out) + } return } @@ -201,7 +212,7 @@ func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReview if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes @@ -299,7 +310,7 @@ func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { if in.ResourceAttributes != nil { in, out := &in.ResourceAttributes, &out.ResourceAttributes *out = new(ResourceAttributes) - **out = **in + (*in).DeepCopyInto(*out) } if in.NonResourceAttributes != nil { in, out := &in.NonResourceAttributes, &out.NonResourceAttributes diff --git a/constraint/vendor/k8s.io/api/autoscaling/v1/doc.go b/constraint/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b5c..d64c9cbc1 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/constraint/vendor/k8s.io/api/autoscaling/v1/generated.proto b/constraint/vendor/k8s.io/api/autoscaling/v1/generated.proto index 1dbafd1a5..68c35b6b2 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target. optional string container = 5; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling taget optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,21 +131,21 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional @@ -168,7 +168,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -184,7 +184,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -222,7 +222,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is the current number of replicas of pods managed by this autoscaler. optional int32 currentReplicas = 3; @@ -241,8 +241,6 @@ message HorizontalPodAutoscalerStatus { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -269,7 +267,6 @@ message MetricSpec { // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -286,8 +283,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -336,18 +331,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric. // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -360,18 +355,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -384,13 +379,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -401,13 +396,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -431,7 +426,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -455,14 +450,14 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } // Scale represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/constraint/vendor/k8s.io/api/autoscaling/v1/types.go b/constraint/vendor/k8s.io/api/autoscaling/v1/types.go index 450829017..85c609e5c 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v1/types.go @@ -83,6 +83,7 @@ type HorizontalPodAutoscalerStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { @@ -101,6 +102,7 @@ type HorizontalPodAutoscaler struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { @@ -114,6 +116,7 @@ type HorizontalPodAutoscalerList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Scale represents a scaling request for a resource. type Scale struct { @@ -190,8 +193,6 @@ const ( type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -218,7 +219,6 @@ type MetricSpec struct { // current scale target (e.g. CPU or memory). Such metrics are built in to // Kubernetes, and have special scaling options on top of those available // to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -352,8 +352,6 @@ type ExternalMetricSource struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/constraint/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 37c2b36a5..ba43d06c1 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -147,11 +147,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -161,7 +161,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/constraint/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..36d86a5ec --- /dev/null +++ b/constraint/vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Scale) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2/doc.go b/constraint/vendor/k8s.io/api/autoscaling/v2/doc.go index f96a059b6..aafa2d4de 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2/doc.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2/doc.go @@ -17,5 +17,6 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v2 // import "k8s.io/api/autoscaling/v2" diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2/generated.proto b/constraint/vendor/k8s.io/api/autoscaling/v2/generated.proto index a9e36975f..4e6dc0592 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2/generated.proto +++ b/constraint/vendor/k8s.io/api/autoscaling/v2/generated.proto @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -293,7 +293,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -301,8 +301,6 @@ message MetricIdentifier { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -329,7 +327,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -346,8 +343,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -393,12 +388,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -412,12 +407,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // currentAverageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2/types.go b/constraint/vendor/k8s.io/api/autoscaling/v2/types.go index c12a83df1..99e8db09d 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2/types.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2/types.go @@ -26,6 +26,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource @@ -101,8 +102,6 @@ type CrossVersionObjectReference struct { type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -129,7 +128,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -452,8 +450,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -573,6 +569,7 @@ type MetricValueStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.23 // HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go index 1941b1ef5..649cd04a0 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go @@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..99ae74865 --- /dev/null +++ b/constraint/vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *HorizontalPodAutoscalerList) APILifecycleIntroduced() (major, minor int) { + return 1, 23 +} diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index e2119d555..4b71732ab 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -51,7 +51,7 @@ message ContainerResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -78,7 +78,7 @@ message ContainerResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; // container is the name of the container in the pods of the scaling target optional string container = 4; @@ -108,17 +108,17 @@ message ExternalMetricSource { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // targetValue is the target value of the metric (as a quantity). // Mutually exclusive with TargetAverageValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // targetAverageValue is the target per-pod value of global metric (as a quantity). // Mutually exclusive with TargetValue. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; } // ExternalMetricStatus indicates the current value of a global metric @@ -131,14 +131,14 @@ message ExternalMetricStatus { // metricSelector is used to identify a specific time series // within a given metric. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // currentAverageValue is the current value of metric averaged over autoscaled pods. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; } // HorizontalPodAutoscaler is the configuration for a horizontal pod @@ -148,7 +148,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -172,7 +172,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -188,7 +188,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -233,7 +233,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -260,8 +260,6 @@ message HorizontalPodAutoscalerStatus { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -288,7 +286,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -305,8 +302,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -355,18 +350,18 @@ message ObjectMetricSource { optional string metricName = 2; // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // ObjectMetricStatus indicates the current value of a metric describing a @@ -379,18 +374,18 @@ message ObjectMetricStatus { optional string metricName = 2; // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; } // PodsMetricSource indicates how to scale on a metric describing each pod in @@ -403,13 +398,13 @@ message PodsMetricSource { // targetAverageValue is the target value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // PodsMetricStatus indicates the current value of a metric describing each pod in @@ -420,13 +415,13 @@ message PodsMetricStatus { // currentAverageValue is the current value of the average of the // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; // selector is the string-encoded form of a standard kubernetes label selector for the given metric // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; } // ResourceMetricSource indicates how to scale on a resource metric known to @@ -450,7 +445,7 @@ message ResourceMetricSource { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; } // ResourceMetricStatus indicates the current value of a resource metric known to @@ -474,6 +469,6 @@ message ResourceMetricStatus { // resource metric across all relevant pods, as a raw value (instead of as // a percentage of the request), similar to the "pods" metric source type. // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 193cc4354..c3abdd9bd 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -96,8 +96,6 @@ const ( type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -121,7 +119,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` // external refers to a global metric that is not associated @@ -311,8 +308,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index d656ee416..c7c72bf35 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -148,11 +148,11 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -162,7 +162,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 41f7a16ea..941d9752a 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -147,7 +147,7 @@ message HorizontalPodAutoscaler { // metadata is the standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. @@ -190,7 +190,7 @@ message HorizontalPodAutoscalerCondition { // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is the reason for the condition's last transition. // +optional @@ -206,7 +206,7 @@ message HorizontalPodAutoscalerCondition { message HorizontalPodAutoscalerList { // metadata is the standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of horizontal pod autoscaler objects. repeated HorizontalPodAutoscaler items = 2; @@ -258,7 +258,7 @@ message HorizontalPodAutoscalerStatus { // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, // used by the autoscaler to control how often the number of pods is changed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; // currentReplicas is current number of replicas of pods managed by this autoscaler, // as last seen by the autoscaler. @@ -289,7 +289,7 @@ message MetricIdentifier { // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. // When unset, just the metricName will be used to gather metrics. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; } // MetricSpec specifies how to scale based on a single metric @@ -297,8 +297,6 @@ message MetricIdentifier { message MetricSpec { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -325,7 +323,6 @@ message MetricSpec { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional optional ContainerResourceMetricSource containerResource = 7; @@ -342,8 +339,6 @@ message MetricSpec { message MetricStatus { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled optional string type = 1; // object refers to a metric describing a single kubernetes object @@ -389,12 +384,12 @@ message MetricTarget { // value is the target value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; // averageValue is the target value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; // averageUtilization is the target value of the average of the // resource metric across all relevant pods, represented as a percentage of @@ -408,12 +403,12 @@ message MetricTarget { message MetricValueStatus { // value is the current value of the metric (as a quantity). // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; // averageValue is the current value of the average of the // metric across all relevant pods (as a quantity) // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; // averageUtilization is the current value of the average of the // resource metric across all relevant pods, represented as a percentage of diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 2fee0b8a0..bc9677b14 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -104,8 +104,6 @@ type CrossVersionObjectReference struct { type MetricSpec struct { // type is the type of metric source. It should be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each mapping to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object @@ -132,7 +130,6 @@ type MetricSpec struct { // each pod of the current scale target (e.g. CPU or memory). Such metrics are // built in to Kubernetes, and have special scaling options on top of those // available to normal per-pod metrics using the "pods" source. - // This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag. // +optional ContainerResource *ContainerResourceMetricSource `json:"containerResource,omitempty" protobuf:"bytes,7,opt,name=containerResource"` @@ -449,8 +446,6 @@ type HorizontalPodAutoscalerCondition struct { type MetricStatus struct { // type is the type of metric source. It will be one of "ContainerResource", "External", // "Object", "Pods" or "Resource", each corresponds to a matching field in the object. - // Note: "ContainerResource" type is available on when the feature-gate - // HPAContainerMetrics is enabled Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` // object refers to a metric describing a single kubernetes object diff --git a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 4af7d0ec0..5d4bb86b8 100644 --- a/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -185,11 +185,11 @@ func (MetricIdentifier) SwaggerDoc() map[string]string { var map_MetricSpec = map[string]string{ "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "containerResource": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", } @@ -199,7 +199,7 @@ func (MetricSpec) SwaggerDoc() map[string]string { var map_MetricStatus = map[string]string{ "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", diff --git a/constraint/vendor/k8s.io/api/batch/v1/doc.go b/constraint/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6e7..cb5cbb600 100644 --- a/constraint/vendor/k8s.io/api/batch/v1/doc.go +++ b/constraint/vendor/k8s.io/api/batch/v1/doc.go @@ -17,5 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/constraint/vendor/k8s.io/api/batch/v1/generated.proto b/constraint/vendor/k8s.io/api/batch/v1/generated.proto index 833b118d0..361ebdca1 100644 --- a/constraint/vendor/k8s.io/api/batch/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/batch/v1/generated.proto @@ -34,7 +34,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -113,15 +113,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // Job represents the configuration of a single job. @@ -129,7 +129,7 @@ message Job { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -152,11 +152,11 @@ message JobCondition { // Last time the condition was checked. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -172,7 +172,7 @@ message JobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Jobs. repeated Job items = 2; @@ -213,8 +213,6 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; @@ -224,8 +222,8 @@ message JobSpec { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is alpha-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (disabled by default). + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). // +optional optional SuccessPolicy successPolicy = 16; @@ -262,7 +260,7 @@ message JobSpec { // Normally, the system sets this field for you. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // manualSelector controls generation of pod labels and pod selectors. // Leave `manualSelector` unset unless you are certain what you are doing. @@ -280,7 +278,7 @@ message JobSpec { // Describes the pod that will be created when executing a job. // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure". // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - optional k8s.io.api.core.v1.PodTemplateSpec template = 6; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 6; // ttlSecondsAfterFinished limits the lifetime of a Job that has finished // execution (either Complete or Failed). If this field is set, @@ -349,10 +347,11 @@ message JobSpec { // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - // all characters before the first "/" must be a valid subdomain as defined // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path - // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. // - // This field is alpha-level. The job controller accepts setting the field - // when the feature gate JobManagedBy is enabled (disabled by default). + // This field is beta-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (enabled by default). // +optional optional string managedBy = 15; } @@ -387,7 +386,7 @@ message JobStatus { // The field cannot be modified while the job is unsuspended or finished. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; // Represents time when the job was completed. It is not guaranteed to // be set in happens-before order across separate operations. @@ -396,7 +395,7 @@ message JobStatus { // The value cannot be updated or removed. The value indicates the same or // later point in time as the startTime field. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; // The number of pending and running pods which are not terminating (without // a deletionTimestamp). @@ -466,8 +465,8 @@ message JobStatus { // +optional optional UncountedTerminatedPods uncountedTerminatedPods = 8; - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). optional int32 ready = 9; } @@ -476,7 +475,7 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status diff --git a/constraint/vendor/k8s.io/api/batch/v1/types.go b/constraint/vendor/k8s.io/api/batch/v1/types.go index 49b0ec644..8e9a761b9 100644 --- a/constraint/vendor/k8s.io/api/batch/v1/types.go +++ b/constraint/vendor/k8s.io/api/batch/v1/types.go @@ -29,7 +29,6 @@ const ( // CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job. // It records the original/expected scheduled timestamp for the running job, represented in RFC3339. - // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled. CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp" JobCompletionIndexAnnotation = labelPrefix + "job-completion-index" @@ -64,6 +63,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // Job represents the configuration of a single job. type Job struct { @@ -85,6 +85,7 @@ type Job struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // JobList is a collection of jobs. type JobList struct { @@ -174,7 +175,7 @@ type PodFailurePolicyOnExitCodesRequirement struct { // When specified, it should match one the container or initContainer // names in the pod template. // +optional - ContainerName *string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + ContainerName *string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Represents the relationship between the container exit code(s) and the // specified values. Containers completed with success (exit code 0) are @@ -234,14 +235,14 @@ type PodFailurePolicyRule struct { // Represents the requirement on the container exit codes. // +optional - OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes" protobuf:"bytes,2,opt,name=onExitCodes"` + OnExitCodes *PodFailurePolicyOnExitCodesRequirement `json:"onExitCodes,omitempty" protobuf:"bytes,2,opt,name=onExitCodes"` // Represents the requirement on the pod conditions. The requirement is represented // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic // +optional - OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` + OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions,omitempty" protobuf:"bytes,3,opt,name=onPodConditions"` } // PodFailurePolicy describes how failed pods influence the backoffLimit. @@ -336,8 +337,6 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is beta-level. It can be used when the `JobPodFailurePolicy` - // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` @@ -347,8 +346,8 @@ type JobSpec struct { // When the field is specified, it must be immutable and works only for the Indexed Jobs. // Once the Job meets the SuccessPolicy, the lingering pods are terminated. // - // This field is alpha-level. To use this field, you must enable the - // `JobSuccessPolicy` feature gate (disabled by default). + // This field is beta-level. To use this field, you must enable the + // `JobSuccessPolicy` feature gate (enabled by default). // +optional SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"` @@ -477,10 +476,11 @@ type JobSpec struct { // The value must be a valid domain-prefixed path (e.g. acme.io/foo) - // all characters before the first "/" must be a valid subdomain as defined // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path - // characters as defined by RFC 3986. The value cannot exceed 64 characters. + // characters as defined by RFC 3986. The value cannot exceed 63 characters. + // This field is immutable. // - // This field is alpha-level. The job controller accepts setting the field - // when the feature gate JobManagedBy is enabled (disabled by default). + // This field is beta-level. The job controller accepts setting the field + // when the feature gate JobManagedBy is enabled (enabled by default). // +optional ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"` } @@ -594,8 +594,8 @@ type JobStatus struct { // +optional UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` - // The number of pods which have a Ready condition. - // +optional + // The number of active pods which have a Ready condition and are not + // terminating (without a deletionTimestamp). Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -633,7 +633,6 @@ const ( // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to // a failed pod matching a pod failure policy rule // https://kep.k8s.io/3329 - // This is currently a beta field. JobReasonPodFailurePolicy string = "PodFailurePolicy" // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of // times higher than backOffLimit times. @@ -649,8 +648,13 @@ const ( // JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to // a Job met successPolicy. // https://kep.k8s.io/3998 - // This is currently an alpha field. + // This is currently a beta field. JobReasonSuccessPolicy string = "SuccessPolicy" + // JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to + // a number of succeeded Job pods met completions. + // - https://kep.k8s.io/3998 + // This is currently a beta field. + JobReasonCompletionsReached string = "CompletionsReached" ) // JobCondition describes current state of a job. @@ -688,6 +692,7 @@ type JobTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJob represents the configuration of a single cron job. type CronJob struct { @@ -709,6 +714,7 @@ type CronJob struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // CronJobList is a collection of cron jobs. type CronJobList struct { diff --git a/constraint/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 84073b8d8..893f3371f 100644 --- a/constraint/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -115,8 +115,8 @@ var map_JobSpec = map[string]string{ "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", - "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.", + "successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is beta-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", @@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{ "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", - "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 64 characters.\n\nThis field is alpha-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (disabled by default).", + "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).", } func (JobSpec) SwaggerDoc() map[string]string { @@ -146,7 +146,7 @@ var map_JobStatus = map[string]string{ "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", "failedIndexes": "FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". The set of failed indexes cannot overlap with the set of completed indexes.\n\nThis field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null. The structure is empty for finished jobs.", - "ready": "The number of pods which have a Ready condition.", + "ready": "The number of active pods which have a Ready condition and are not terminating (without a deletionTimestamp).", } func (JobStatus) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b76cb0924 --- /dev/null +++ b/constraint/vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJob) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CronJobList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Job) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *JobList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/constraint/vendor/k8s.io/api/batch/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/batch/v1beta1/generated.proto index ac774f19a..6dd322128 100644 --- a/constraint/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -35,7 +35,7 @@ message CronJob { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -53,7 +53,7 @@ message CronJobList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CronJobs. repeated CronJob items = 2; @@ -116,15 +116,15 @@ message CronJobStatus { // A list of pointers to currently running jobs. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.ObjectReference active = 1; + repeated .k8s.io.api.core.v1.ObjectReference active = 1; // Information when was the last time the job was successfully scheduled. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; // Information when was the last time the job successfully completed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastSuccessfulTime = 5; } // JobTemplateSpec describes the data a Job should have when created from a template @@ -132,11 +132,11 @@ message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; + optional .k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/constraint/vendor/k8s.io/api/certificates/v1/doc.go b/constraint/vendor/k8s.io/api/certificates/v1/doc.go index fe3ea3af8..78434478e 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1/doc.go +++ b/constraint/vendor/k8s.io/api/certificates/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=certificates.k8s.io package v1 // import "k8s.io/api/certificates/v1" diff --git a/constraint/vendor/k8s.io/api/certificates/v1/generated.proto b/constraint/vendor/k8s.io/api/certificates/v1/generated.proto index 968cc2564..dac7c7f5f 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/certificates/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "k8s.io/api/certificates/v1"; // or to obtain certificates from custom non-Kubernetes signers. message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -87,19 +87,19 @@ message CertificateSigningRequestCondition { // lastUpdateTime is the time of the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } // CertificateSigningRequestList is a collection of CertificateSigningRequest objects message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of CertificateSigningRequest objects repeated CertificateSigningRequest items = 2; diff --git a/constraint/vendor/k8s.io/api/certificates/v1/types.go b/constraint/vendor/k8s.io/api/certificates/v1/types.go index 92b2018e7..ba8009840 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1/types.go +++ b/constraint/vendor/k8s.io/api/certificates/v1/types.go @@ -27,6 +27,7 @@ import ( // +genclient:nonNamespaced // +genclient:method=UpdateApproval,verb=update,subresource=approval,input=k8s.io/api/certificates/v1.CertificateSigningRequest,result=k8s.io/api/certificates/v1.CertificateSigningRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequest objects provide a mechanism to obtain x509 certificates // by submitting a certificate signing request, and having it asynchronously approved and issued. @@ -262,6 +263,7 @@ type CertificateSigningRequestCondition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // CertificateSigningRequestList is a collection of CertificateSigningRequest objects type CertificateSigningRequestList struct { diff --git a/constraint/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..3a2b27403 --- /dev/null +++ b/constraint/vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequest) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CertificateSigningRequestList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/constraint/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/certificates/v1alpha1/generated.proto index b0ebc4bd4..7155f778c 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/certificates/v1alpha1/generated.proto @@ -46,7 +46,7 @@ option go_package = "k8s.io/api/certificates/v1alpha1"; message ClusterTrustBundle { // metadata contains the object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the signer (if any) and trust anchors. optional ClusterTrustBundleSpec spec = 2; @@ -57,7 +57,7 @@ message ClusterTrustBundleList { // metadata contains the list metadata. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a collection of ClusterTrustBundle objects repeated ClusterTrustBundle items = 2; diff --git a/constraint/vendor/k8s.io/api/certificates/v1alpha1/types.go b/constraint/vendor/k8s.io/api/certificates/v1alpha1/types.go index 1a9fda011..beef02599 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/api/certificates/v1alpha1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.34 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors @@ -90,6 +91,7 @@ type ClusterTrustBundleSpec struct { } // +k8s:prerelease-lifecycle-gen:introduced=1.26 +// +k8s:prerelease-lifecycle-gen:deprecated=1.34 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterTrustBundleList is a collection of ClusterTrustBundle objects diff --git a/constraint/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go index dfafa656c..3121a87d0 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/constraint/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -30,13 +30,13 @@ func (in *ClusterTrustBundle) APILifecycleIntroduced() (major, minor int) { // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *ClusterTrustBundle) APILifecycleDeprecated() (major, minor int) { - return 1, 29 + return 1, 34 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ClusterTrustBundle) APILifecycleRemoved() (major, minor int) { - return 1, 32 + return 1, 37 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. @@ -48,11 +48,11 @@ func (in *ClusterTrustBundleList) APILifecycleIntroduced() (major, minor int) { // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) { - return 1, 29 + return 1, 34 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) { - return 1, 32 + return 1, 37 } diff --git a/constraint/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/certificates/v1beta1/generated.proto index f70f01ef7..f3ec4c06e 100644 --- a/constraint/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/certificates/v1beta1"; // Describes a certificate signing request message CertificateSigningRequest { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the certificate request, and is immutable after creation. // Only the request, signerName, expirationSeconds, and usages fields can be set on creation. @@ -65,18 +65,18 @@ message CertificateSigningRequestCondition { // timestamp for the last update to this condition // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; // lastTransitionTime is the time the condition last transitioned from one status to another. // If unset, when a new condition type is added or an existing condition's status is changed, // the server defaults this to the current time. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; } message CertificateSigningRequestList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated CertificateSigningRequest items = 2; } diff --git a/constraint/vendor/k8s.io/api/coordination/v1/doc.go b/constraint/vendor/k8s.io/api/coordination/v1/doc.go index fc2f4f2c6..9b2fbbda3 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/doc.go +++ b/constraint/vendor/k8s.io/api/coordination/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=coordination.k8s.io diff --git a/constraint/vendor/k8s.io/api/coordination/v1/generated.pb.go b/constraint/vendor/k8s.io/api/coordination/v1/generated.pb.go index 8b7ab98ca..cf6702aef 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -139,40 +139,44 @@ func init() { } var fileDescriptor_239d5a4df3139dce = []byte{ - // 524 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30, - 0x18, 0xc6, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x19, 0x95, 0x90, - 0x26, 0x24, 0x1c, 0x3a, 0x21, 0x84, 0x38, 0x8d, 0x08, 0x01, 0x93, 0x3a, 0x21, 0x65, 0x3b, 0xa1, - 0x1d, 0x70, 0x93, 0x97, 0xd4, 0x74, 0x89, 0x83, 0xed, 0x16, 0xed, 0xc6, 0x47, 0xe0, 0xca, 0xc7, - 0x80, 0x4f, 0xd1, 0xe3, 0x8e, 0x3b, 0x45, 0xd4, 0x7c, 0x11, 0x64, 0xb7, 0x5b, 0x4b, 0xff, 0x68, - 0xd3, 0x6e, 0xf1, 0xeb, 0xe7, 0xf9, 0xbd, 0x8f, 0x9f, 0x43, 0xd0, 0x93, 0xc1, 0x4b, 0x81, 0x29, - 0xf3, 0x49, 0x4e, 0xfd, 0x88, 0x31, 0x1e, 0xd3, 0x8c, 0x48, 0xca, 0x32, 0x7f, 0xd4, 0xf1, 0x13, - 0xc8, 0x80, 0x13, 0x09, 0x31, 0xce, 0x39, 0x93, 0xcc, 0x6e, 0x4d, 0xb5, 0x98, 0xe4, 0x14, 0x2f, - 0x6a, 0xf1, 0xa8, 0xd3, 0x7a, 0x9a, 0x50, 0xd9, 0x1f, 0xf6, 0x70, 0xc4, 0x52, 0x3f, 0x61, 0x09, - 0xf3, 0x8d, 0xa5, 0x37, 0xfc, 0x6c, 0x4e, 0xe6, 0x60, 0xbe, 0xa6, 0xa8, 0xd6, 0xf3, 0xf9, 0xda, - 0x94, 0x44, 0x7d, 0x9a, 0x01, 0xbf, 0xf0, 0xf3, 0x41, 0xa2, 0x07, 0xc2, 0x4f, 0x41, 0x92, 0x35, - 0x01, 0x5a, 0xfe, 0x26, 0x17, 0x1f, 0x66, 0x92, 0xa6, 0xb0, 0x62, 0x78, 0x71, 0x9b, 0x41, 0x44, - 0x7d, 0x48, 0xc9, 0xb2, 0xaf, 0xfd, 0xdb, 0x42, 0xd5, 0x2e, 0x10, 0x01, 0xf6, 0x27, 0xb4, 0xad, - 0xd3, 0xc4, 0x44, 0x12, 0xc7, 0xda, 0xb3, 0xf6, 0xeb, 0x07, 0xcf, 0xf0, 0xbc, 0x86, 0x1b, 0x28, - 0xce, 0x07, 0x89, 0x1e, 0x08, 0xac, 0xd5, 0x78, 0xd4, 0xc1, 0x1f, 0x7a, 0x5f, 0x20, 0x92, 0xc7, - 0x20, 0x49, 0x60, 0x8f, 0x0b, 0xaf, 0xa4, 0x0a, 0x0f, 0xcd, 0x67, 0xe1, 0x0d, 0xd5, 0x7e, 0x87, - 0x2a, 0x22, 0x87, 0xc8, 0xd9, 0x32, 0xf4, 0xc7, 0x78, 0x73, 0xc9, 0xd8, 0x44, 0x3a, 0xc9, 0x21, - 0x0a, 0x1e, 0xcc, 0x90, 0x15, 0x7d, 0x0a, 0x0d, 0xa0, 0xfd, 0xcb, 0x42, 0x35, 0xa3, 0xe8, 0x52, - 0x21, 0xed, 0xb3, 0x95, 0xe0, 0xf8, 0x6e, 0xc1, 0xb5, 0xdb, 0xc4, 0x6e, 0xcc, 0x76, 0x6c, 0x5f, - 0x4f, 0x16, 0x42, 0xbf, 0x45, 0x55, 0x2a, 0x21, 0x15, 0xce, 0xd6, 0x5e, 0x79, 0xbf, 0x7e, 0xf0, - 0xe8, 0xd6, 0xd4, 0xc1, 0xce, 0x8c, 0x56, 0x3d, 0xd2, 0xbe, 0x70, 0x6a, 0x6f, 0xff, 0x2c, 0xcf, - 0x32, 0xeb, 0x77, 0xd8, 0xaf, 0xd0, 0x6e, 0x9f, 0x9d, 0xc7, 0xc0, 0x8f, 0x62, 0xc8, 0x24, 0x95, - 0x17, 0x26, 0x79, 0x2d, 0xb0, 0x55, 0xe1, 0xed, 0xbe, 0xff, 0xef, 0x26, 0x5c, 0x52, 0xda, 0x5d, - 0xd4, 0x3c, 0xd7, 0xa0, 0x37, 0x43, 0x6e, 0x36, 0x9f, 0x40, 0xc4, 0xb2, 0x58, 0x98, 0x5a, 0xab, - 0x81, 0xa3, 0x0a, 0xaf, 0xd9, 0x5d, 0x73, 0x1f, 0xae, 0x75, 0xd9, 0x3d, 0x54, 0x27, 0xd1, 0xd7, - 0x21, 0xe5, 0x70, 0x4a, 0x53, 0x70, 0xca, 0xa6, 0x40, 0xff, 0x6e, 0x05, 0x1e, 0xd3, 0x88, 0x33, - 0x6d, 0x0b, 0x1e, 0xaa, 0xc2, 0xab, 0xbf, 0x9e, 0x73, 0xc2, 0x45, 0xa8, 0x7d, 0x86, 0x6a, 0x1c, - 0x32, 0xf8, 0x66, 0x36, 0x54, 0xee, 0xb7, 0x61, 0x47, 0x15, 0x5e, 0x2d, 0xbc, 0xa6, 0x84, 0x73, - 0xa0, 0x7d, 0x88, 0x1a, 0xe6, 0x65, 0xa7, 0x9c, 0x64, 0x82, 0xea, 0xb7, 0x09, 0xa7, 0x6a, 0xba, - 0x68, 0xaa, 0xc2, 0x6b, 0x74, 0x97, 0xee, 0xc2, 0x15, 0x75, 0x70, 0x38, 0x9e, 0xb8, 0xa5, 0xcb, - 0x89, 0x5b, 0xba, 0x9a, 0xb8, 0xa5, 0xef, 0xca, 0xb5, 0xc6, 0xca, 0xb5, 0x2e, 0x95, 0x6b, 0x5d, - 0x29, 0xd7, 0xfa, 0xa3, 0x5c, 0xeb, 0xc7, 0x5f, 0xb7, 0xf4, 0xb1, 0xb5, 0xf9, 0x07, 0xf2, 0x2f, - 0x00, 0x00, 0xff, 0xff, 0xb0, 0xb0, 0x3a, 0x46, 0x5d, 0x04, 0x00, 0x00, + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40, + 0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98, + 0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32, + 0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81, + 0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b, + 0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff, + 0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70, + 0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09, + 0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79, + 0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a, + 0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06, + 0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e, + 0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04, + 0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32, + 0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf, + 0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b, + 0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26, + 0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e, + 0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed, + 0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a, + 0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d, + 0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62, + 0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c, + 0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7, + 0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb, + 0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b, + 0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86, + 0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37, + 0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9, + 0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde, + 0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb, + 0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a, + 0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d, + 0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c, + 0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7, + 0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -285,6 +289,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -394,6 +412,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -440,6 +466,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -859,6 +887,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/constraint/vendor/k8s.io/api/coordination/v1/generated.proto b/constraint/vendor/k8s.io/api/coordination/v1/generated.proto index 36fce60f2..4d4f7e08f 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/coordination/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "k8s.io/api/coordination/v1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +45,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,27 +54,43 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional optional int32 leaseDurationSeconds = 2; // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/constraint/vendor/k8s.io/api/coordination/v1/types.go b/constraint/vendor/k8s.io/api/coordination/v1/types.go index b0e1d0682..5307cea88 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/types.go +++ b/constraint/vendor/k8s.io/api/coordination/v1/types.go @@ -20,8 +20,21 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type CoordinatedLeaseStrategy string + +// CoordinatedLeaseStrategy defines the strategy for picking the leader for coordinated leader election. +const ( + // OldestEmulationVersion picks the oldest LeaseCandidate, where "oldest" is defined as follows + // 1) Select the candidate(s) with the lowest emulation version + // 2) If multiple candidates have the same emulation version, select the candidate(s) with the lowest binary version. (Note that binary version must be greater or equal to emulation version) + // 3) If multiple candidates have the same binary version, select the candidate with the oldest creationTimestamp. + // If a candidate does not specify the emulationVersion and binaryVersion fields, it will not be considered a candidate for the lease. + OldestEmulationVersion CoordinatedLeaseStrategy = "OldestEmulationVersion" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // Lease defines a lease concept. type Lease struct { @@ -39,10 +52,12 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last + // to wait to force acquire it. This is measured against the time of last // observed renewTime. // +optional LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` @@ -57,9 +72,22 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election. + // If the field is not specified, there is no active coordination for this lease. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // This field can only be set if Strategy is also set. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // LeaseList is a list of Lease objects. type LeaseList struct { diff --git a/constraint/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go index f3720eca0..6c1a7ea8b 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measured against the time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election. If the field is not specified, there is no active coordination for this lease. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up. This field can only be set if Strategy is also set.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go index 99f6b0be7..4d549cc99 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -111,6 +111,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a22632cba --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Lease) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/doc.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/doc.go new file mode 100644 index 000000000..5e6d65530 --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=coordination.k8s.io + +package v1alpha2 // import "k8s.io/api/coordination/v1alpha2" diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go new file mode 100644 index 000000000..85ceea1f2 --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go @@ -0,0 +1,1027 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/coordination/v1alpha2/generated.proto + +package v1alpha2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} } +func (*LeaseCandidate) ProtoMessage() {} +func (*LeaseCandidate) Descriptor() ([]byte, []int) { + return fileDescriptor_c1ec5c989d262916, []int{0} +} +func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidate) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidate.Merge(m, src) +} +func (m *LeaseCandidate) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidate) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidate.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo + +func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} } +func (*LeaseCandidateList) ProtoMessage() {} +func (*LeaseCandidateList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1ec5c989d262916, []int{1} +} +func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateList.Merge(m, src) +} +func (m *LeaseCandidateList) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo + +func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} } +func (*LeaseCandidateSpec) ProtoMessage() {} +func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1ec5c989d262916, []int{2} +} +func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseCandidateSpec.Merge(m, src) +} +func (m *LeaseCandidateSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseCandidateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidate") + proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateList") + proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/coordination/v1alpha2/generated.proto", fileDescriptor_c1ec5c989d262916) +} + +var fileDescriptor_c1ec5c989d262916 = []byte{ + // 555 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0xd3, 0x4e, + 0x18, 0xc7, 0x9b, 0xdd, 0xf6, 0x47, 0x3b, 0xbf, 0xad, 0xd4, 0x01, 0x21, 0xf4, 0x90, 0x96, 0x9e, + 0x44, 0x70, 0x66, 0x77, 0x5d, 0x44, 0xf0, 0x96, 0xf5, 0x0f, 0x42, 0x57, 0x25, 0xab, 0x0b, 0xca, + 0x1e, 0x9c, 0x26, 0x8f, 0xe9, 0xd8, 0x26, 0x13, 0x92, 0xe9, 0x4a, 0x6f, 0xbe, 0x04, 0x5f, 0x56, + 0xf5, 0xb4, 0xc7, 0x3d, 0x15, 0x1b, 0xc1, 0x17, 0xe1, 0x49, 0x66, 0x9a, 0xf4, 0xaf, 0xa5, 0xc5, + 0x5b, 0xe7, 0x99, 0xe7, 0xf3, 0x99, 0xf9, 0x3e, 0x69, 0x82, 0x0e, 0x7b, 0x8f, 0x12, 0xc2, 0x05, + 0x65, 0x11, 0xa7, 0xae, 0x10, 0xb1, 0xc7, 0x43, 0x26, 0xb9, 0x08, 0xe9, 0xd5, 0x11, 0xeb, 0x47, + 0x5d, 0x76, 0x4c, 0x7d, 0x08, 0x21, 0x66, 0x12, 0x3c, 0x12, 0xc5, 0x42, 0x0a, 0xdc, 0x9c, 0x12, + 0x84, 0x45, 0x9c, 0x2c, 0x12, 0x24, 0x27, 0xea, 0xf7, 0x7d, 0x2e, 0xbb, 0x83, 0x0e, 0x71, 0x45, + 0x40, 0x7d, 0xe1, 0x0b, 0xaa, 0xc1, 0xce, 0xe0, 0xa3, 0x5e, 0xe9, 0x85, 0xfe, 0x35, 0x15, 0xd6, + 0xef, 0x6d, 0xbe, 0xc2, 0xea, 0xe1, 0xf5, 0x93, 0x79, 0x6f, 0xc0, 0xdc, 0x2e, 0x0f, 0x21, 0x1e, + 0xd2, 0xa8, 0xe7, 0xab, 0x42, 0x42, 0x03, 0x90, 0xec, 0x6f, 0x14, 0xdd, 0x44, 0xc5, 0x83, 0x50, + 0xf2, 0x00, 0xd6, 0x80, 0x87, 0xdb, 0x80, 0xc4, 0xed, 0x42, 0xc0, 0x56, 0xb9, 0xd6, 0x77, 0x03, + 0xdd, 0x6a, 0x03, 0x4b, 0xe0, 0x94, 0x85, 0x1e, 0xf7, 0x98, 0x04, 0xfc, 0x01, 0x95, 0xd5, 0xb5, + 0x3c, 0x26, 0x99, 0x69, 0x34, 0x8d, 0xbb, 0xff, 0x1f, 0x1f, 0x92, 0xf9, 0x04, 0x67, 0x76, 0x12, + 0xf5, 0x7c, 0x55, 0x48, 0x88, 0xea, 0x26, 0x57, 0x47, 0xe4, 0x55, 0xe7, 0x13, 0xb8, 0xf2, 0x0c, + 0x24, 0xb3, 0xf1, 0x68, 0xdc, 0x28, 0xa4, 0xe3, 0x06, 0x9a, 0xd7, 0x9c, 0x99, 0x15, 0x5f, 0xa0, + 0x62, 0x12, 0x81, 0x6b, 0xee, 0x69, 0xfb, 0x09, 0xd9, 0xf6, 0x7c, 0xc8, 0xf2, 0x0d, 0xcf, 0x23, + 0x70, 0xed, 0x83, 0xec, 0x84, 0xa2, 0x5a, 0x39, 0xda, 0xd7, 0xfa, 0x66, 0x20, 0xbc, 0xdc, 0xda, + 0xe6, 0x89, 0xc4, 0x97, 0x6b, 0x81, 0xc8, 0x6e, 0x81, 0x14, 0xad, 0xe3, 0xd4, 0xb2, 0xc3, 0xca, + 0x79, 0x65, 0x21, 0xcc, 0x5b, 0x54, 0xe2, 0x12, 0x82, 0xc4, 0xdc, 0x6b, 0xee, 0xaf, 0xcc, 0x6a, + 0xa7, 0x34, 0x76, 0x35, 0x93, 0x97, 0x5e, 0x28, 0x8d, 0x33, 0xb5, 0xb5, 0x7e, 0xed, 0xaf, 0x66, + 0x51, 0x41, 0x31, 0x45, 0x95, 0xbe, 0xaa, 0xbe, 0x64, 0x01, 0xe8, 0x30, 0x15, 0xfb, 0x76, 0xc6, + 0x57, 0xda, 0xf9, 0x86, 0x33, 0xef, 0xc1, 0xef, 0x50, 0x39, 0xe2, 0xa1, 0xff, 0x86, 0x07, 0x90, + 0xcd, 0x9b, 0xee, 0x16, 0xfe, 0x8c, 0xbb, 0xb1, 0x50, 0x98, 0x7d, 0xa0, 0x92, 0xbf, 0xce, 0x24, + 0xce, 0x4c, 0x87, 0x2f, 0x51, 0x25, 0x86, 0x10, 0x3e, 0x6b, 0xf7, 0xfe, 0xbf, 0xb9, 0xab, 0xea, + 0xe2, 0x4e, 0x6e, 0x71, 0xe6, 0x42, 0xfc, 0x18, 0x55, 0x3b, 0x3c, 0x64, 0xf1, 0xf0, 0x02, 0xe2, + 0x84, 0x8b, 0xd0, 0x2c, 0xea, 0xb4, 0x77, 0xb2, 0xb4, 0x55, 0x7b, 0x71, 0xd3, 0x59, 0xee, 0xc5, + 0x4f, 0x50, 0x0d, 0x82, 0x41, 0x5f, 0x0f, 0x3e, 0xe7, 0x4b, 0x9a, 0x37, 0x33, 0xbe, 0xf6, 0x74, + 0x65, 0xdf, 0x59, 0x23, 0xb0, 0x8b, 0xca, 0x89, 0x54, 0x6f, 0x8b, 0x3f, 0x34, 0xff, 0xd3, 0xf4, + 0xf3, 0xfc, 0x8f, 0x70, 0x9e, 0xd5, 0x7f, 0x8f, 0x1b, 0x0f, 0x36, 0x7f, 0x0d, 0xc8, 0x69, 0xbe, + 0x06, 0x4f, 0x3f, 0x9d, 0x1c, 0x73, 0x66, 0x62, 0xfb, 0xd9, 0x68, 0x62, 0x15, 0xae, 0x27, 0x56, + 0xe1, 0x66, 0x62, 0x15, 0xbe, 0xa4, 0x96, 0x31, 0x4a, 0x2d, 0xe3, 0x3a, 0xb5, 0x8c, 0x9b, 0xd4, + 0x32, 0x7e, 0xa4, 0x96, 0xf1, 0xf5, 0xa7, 0x55, 0x78, 0xdf, 0xdc, 0xf6, 0xd5, 0xfb, 0x13, 0x00, + 0x00, 0xff, 0xff, 0x7f, 0x15, 0x63, 0xd0, 0x18, 0x05, 0x00, 0x00, +} + +func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseCandidateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseCandidateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseCandidateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Strategy) + copy(dAtA[i:], m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i-- + dAtA[i] = 0x32 + i -= len(m.EmulationVersion) + copy(dAtA[i:], m.EmulationVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EmulationVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.BinaryVersion) + copy(dAtA[i:], m.BinaryVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BinaryVersion))) + i-- + dAtA[i] = 0x22 + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PingTime != nil { + { + size, err := m.PingTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LeaseName) + copy(dAtA[i:], m.LeaseName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LeaseName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseCandidate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseCandidateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseCandidateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LeaseName) + n += 1 + l + sovGenerated(uint64(l)) + if m.PingTime != nil { + l = m.PingTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.BinaryVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EmulationVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LeaseCandidate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseCandidateSpec", "LeaseCandidateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LeaseCandidate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LeaseCandidate", "LeaseCandidate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseCandidateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseCandidateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseCandidateSpec{`, + `LeaseName:` + fmt.Sprintf("%v", this.LeaseName) + `,`, + `PingTime:` + strings.Replace(fmt.Sprintf("%v", this.PingTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `BinaryVersion:` + fmt.Sprintf("%v", this.BinaryVersion) + `,`, + `EmulationVersion:` + fmt.Sprintf("%v", this.EmulationVersion) + `,`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LeaseCandidate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LeaseCandidate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCandidateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCandidateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCandidateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeaseName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PingTime == nil { + m.PingTime = &v1.MicroTime{} + } + if err := m.PingTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinaryVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmulationVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EmulationVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.proto b/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.proto new file mode 100644 index 000000000..7e56cd7f9 --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/generated.proto @@ -0,0 +1,100 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.coordination.v1alpha2; + +import "k8s.io/api/coordination/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/coordination/v1alpha2"; + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +message LeaseCandidate { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseCandidateSpec spec = 2; +} + +// LeaseCandidateList is a list of Lease objects. +message LeaseCandidateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated LeaseCandidate items = 2; +} + +// LeaseCandidateSpec is a specification of a Lease. +message LeaseCandidateSpec { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + optional string leaseName = 1; + + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime pingTime = 2; + + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 3; + + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required. + // +required + optional string binaryVersion = 4; + + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + optional string emulationVersion = 5; + + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +required + optional string strategy = 6; +} + diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/register.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/register.go new file mode 100644 index 000000000..86bb8e0f2 --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &LeaseCandidate{}, + &LeaseCandidateList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/types.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/types.go new file mode 100644 index 000000000..2f53b097a --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/types.go @@ -0,0 +1,95 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// LeaseCandidate defines a candidate for a Lease object. +// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates. +type LeaseCandidate struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec contains the specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseCandidateSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseCandidateSpec is a specification of a Lease. +type LeaseCandidateSpec struct { + // LeaseName is the name of the lease for which this candidate is contending. + // This field is immutable. + // +required + LeaseName string `json:"leaseName" protobuf:"bytes,1,name=leaseName"` + // PingTime is the last time that the server has requested the LeaseCandidate + // to renew. It is only done during leader election to check if any + // LeaseCandidates have become ineligible. When PingTime is updated, the + // LeaseCandidate will respond by updating RenewTime. + // +optional + PingTime *metav1.MicroTime `json:"pingTime,omitempty" protobuf:"bytes,2,opt,name=pingTime"` + // RenewTime is the time that the LeaseCandidate was last updated. + // Any time a Lease needs to do leader election, the PingTime field + // is updated to signal to the LeaseCandidate that they should update + // the RenewTime. + // Old LeaseCandidate objects are also garbage collected if it has been hours + // since the last renew. The PingTime field is updated regularly to prevent + // garbage collection for still active LeaseCandidates. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,3,opt,name=renewTime"` + // BinaryVersion is the binary version. It must be in a semver format without leading `v`. + // This field is required. + // +required + BinaryVersion string `json:"binaryVersion" protobuf:"bytes,4,name=binaryVersion"` + // EmulationVersion is the emulation version. It must be in a semver format without leading `v`. + // EmulationVersion must be less than or equal to BinaryVersion. + // This field is required when strategy is "OldestEmulationVersion" + // +optional + EmulationVersion string `json:"emulationVersion,omitempty" protobuf:"bytes,5,opt,name=emulationVersion"` + // Strategy is the strategy that coordinated leader election will use for picking the leader. + // If multiple candidates for the same Lease return different strategies, the strategy provided + // by the candidate with the latest BinaryVersion will be used. If there is still conflict, + // this is a user error and coordinated leader election will not operate the Lease until resolved. + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +required + Strategy v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// LeaseCandidateList is a list of Lease objects. +type LeaseCandidateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []LeaseCandidate `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go new file mode 100644 index 000000000..39534e6ad --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LeaseCandidate = map[string]string{ + "": "LeaseCandidate defines a candidate for a Lease object. Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LeaseCandidate) SwaggerDoc() map[string]string { + return map_LeaseCandidate +} + +var map_LeaseCandidateList = map[string]string{ + "": "LeaseCandidateList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (LeaseCandidateList) SwaggerDoc() map[string]string { + return map_LeaseCandidateList +} + +var map_LeaseCandidateSpec = map[string]string{ + "": "LeaseCandidateSpec is a specification of a Lease.", + "leaseName": "LeaseName is the name of the lease for which this candidate is contending. This field is immutable.", + "pingTime": "PingTime is the last time that the server has requested the LeaseCandidate to renew. It is only done during leader election to check if any LeaseCandidates have become ineligible. When PingTime is updated, the LeaseCandidate will respond by updating RenewTime.", + "renewTime": "RenewTime is the time that the LeaseCandidate was last updated. Any time a Lease needs to do leader election, the PingTime field is updated to signal to the LeaseCandidate that they should update the RenewTime. Old LeaseCandidate objects are also garbage collected if it has been hours since the last renew. The PingTime field is updated regularly to prevent garbage collection for still active LeaseCandidates.", + "binaryVersion": "BinaryVersion is the binary version. It must be in a semver format without leading `v`. This field is required.", + "emulationVersion": "EmulationVersion is the emulation version. It must be in a semver format without leading `v`. EmulationVersion must be less than or equal to BinaryVersion. This field is required when strategy is \"OldestEmulationVersion\"", + "strategy": "Strategy is the strategy that coordinated leader election will use for picking the leader. If multiple candidates for the same Lease return different strategies, the strategy provided by the candidate with the latest BinaryVersion will be used. If there is still conflict, this is a user error and coordinated leader election will not operate the Lease until resolved. (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", +} + +func (LeaseCandidateSpec) SwaggerDoc() map[string]string { + return map_LeaseCandidateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..a20284797 --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,110 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidate) DeepCopyInto(out *LeaseCandidate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidate. +func (in *LeaseCandidate) DeepCopy() *LeaseCandidate { + if in == nil { + return nil + } + out := new(LeaseCandidate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateList) DeepCopyInto(out *LeaseCandidateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LeaseCandidate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateList. +func (in *LeaseCandidateList) DeepCopy() *LeaseCandidateList { + if in == nil { + return nil + } + out := new(LeaseCandidateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseCandidateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseCandidateSpec) DeepCopyInto(out *LeaseCandidateSpec) { + *out = *in + if in.PingTime != nil { + in, out := &in.PingTime, &out.PingTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseCandidateSpec. +func (in *LeaseCandidateSpec) DeepCopy() *LeaseCandidateSpec { + if in == nil { + return nil + } + out := new(LeaseCandidateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a99b9ab5b --- /dev/null +++ b/constraint/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha2 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidate) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidate) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidate) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LeaseCandidateList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *LeaseCandidateList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *LeaseCandidateList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} diff --git a/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 655de5659..bea9b8146 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -25,6 +25,8 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" + + k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -139,40 +141,45 @@ func init() { } var fileDescriptor_8d4e223b8bb23da3 = []byte{ - // 527 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0x9b, 0xb5, 0x95, 0x56, 0x97, 0x8d, 0x2a, 0xea, 0x21, 0xea, 0x21, 0x99, 0x7a, 0x40, - 0x13, 0x12, 0x36, 0x9d, 0x10, 0x42, 0x9c, 0x20, 0x02, 0x89, 0x89, 0x4c, 0x48, 0xd9, 0x4e, 0x68, - 0x07, 0xdc, 0xe4, 0x91, 0x9a, 0x2e, 0x71, 0x88, 0xdd, 0xa2, 0xdd, 0xf8, 0x08, 0x5c, 0xf9, 0x22, - 0xf0, 0x15, 0x7a, 0xdc, 0x71, 0xa7, 0x88, 0x9a, 0x2f, 0x82, 0xec, 0x76, 0x6b, 0x69, 0x87, 0x5a, - 0x71, 0x8b, 0x9f, 0xdf, 0xef, 0xf7, 0xfe, 0x7e, 0x87, 0x20, 0x32, 0x7c, 0x26, 0x30, 0xe3, 0x84, - 0xe6, 0x8c, 0x44, 0x9c, 0x17, 0x31, 0xcb, 0xa8, 0x64, 0x3c, 0x23, 0xe3, 0x5e, 0x1f, 0x24, 0xed, - 0x91, 0x04, 0x32, 0x28, 0xa8, 0x84, 0x18, 0xe7, 0x05, 0x97, 0xdc, 0xf6, 0x66, 0x00, 0xa6, 0x39, - 0xc3, 0xcb, 0x00, 0x9e, 0x03, 0x9d, 0x47, 0x09, 0x93, 0x83, 0x51, 0x1f, 0x47, 0x3c, 0x25, 0x09, - 0x4f, 0x38, 0x31, 0x5c, 0x7f, 0xf4, 0xd1, 0x9c, 0xcc, 0xc1, 0x7c, 0xcd, 0x7c, 0x9d, 0x27, 0x8b, - 0x00, 0x29, 0x8d, 0x06, 0x2c, 0x83, 0xe2, 0x92, 0xe4, 0xc3, 0x44, 0x17, 0x04, 0x49, 0x41, 0x52, - 0x32, 0x5e, 0x4b, 0xd1, 0x21, 0xff, 0xa2, 0x8a, 0x51, 0x26, 0x59, 0x0a, 0x6b, 0xc0, 0xd3, 0x4d, - 0x80, 0x88, 0x06, 0x90, 0xd2, 0x55, 0xae, 0xfb, 0xd3, 0x42, 0xf5, 0x00, 0xa8, 0x00, 0xfb, 0x03, - 0xda, 0xd5, 0x69, 0x62, 0x2a, 0xa9, 0x63, 0x1d, 0x58, 0x87, 0xcd, 0xa3, 0xc7, 0x78, 0xb1, 0x8b, - 0x5b, 0x29, 0xce, 0x87, 0x89, 0x2e, 0x08, 0xac, 0xbb, 0xf1, 0xb8, 0x87, 0xdf, 0xf5, 0x3f, 0x41, - 0x24, 0x4f, 0x40, 0x52, 0xdf, 0x9e, 0x94, 0x5e, 0x45, 0x95, 0x1e, 0x5a, 0xd4, 0xc2, 0x5b, 0xab, - 0x1d, 0xa0, 0x9a, 0xc8, 0x21, 0x72, 0x76, 0x8c, 0xfd, 0x21, 0xde, 0xb0, 0x69, 0x6c, 0x72, 0x9d, - 0xe6, 0x10, 0xf9, 0xf7, 0xe6, 0xde, 0x9a, 0x3e, 0x85, 0xc6, 0xd2, 0xfd, 0x61, 0xa1, 0x86, 0xe9, - 0x08, 0x98, 0x90, 0xf6, 0xf9, 0x5a, 0x7a, 0xbc, 0x5d, 0x7a, 0x4d, 0x9b, 0xec, 0xad, 0xf9, 0x8c, - 0xdd, 0x9b, 0xca, 0x52, 0xf2, 0xb7, 0xa8, 0xce, 0x24, 0xa4, 0xc2, 0xd9, 0x39, 0xa8, 0x1e, 0x36, - 0x8f, 0x1e, 0x6c, 0x17, 0xdd, 0xdf, 0x9b, 0x2b, 0xeb, 0xc7, 0x1a, 0x0e, 0x67, 0x8e, 0xee, 0xf7, - 0xea, 0x3c, 0xb8, 0x7e, 0x8c, 0xfd, 0x1c, 0xed, 0x0f, 0xf8, 0x45, 0x0c, 0xc5, 0x71, 0x0c, 0x99, - 0x64, 0xf2, 0xd2, 0xc4, 0x6f, 0xf8, 0xb6, 0x2a, 0xbd, 0xfd, 0x37, 0x7f, 0xdd, 0x84, 0x2b, 0x9d, - 0x76, 0x80, 0xda, 0x17, 0x5a, 0xf4, 0x6a, 0x54, 0x98, 0xf1, 0xa7, 0x10, 0xf1, 0x2c, 0x16, 0x66, - 0xc1, 0x75, 0xdf, 0x51, 0xa5, 0xd7, 0x0e, 0xee, 0xb8, 0x0f, 0xef, 0xa4, 0xec, 0x3e, 0x6a, 0xd2, - 0xe8, 0xf3, 0x88, 0x15, 0x70, 0xc6, 0x52, 0x70, 0xaa, 0x66, 0x8b, 0x64, 0xbb, 0x2d, 0x9e, 0xb0, - 0xa8, 0xe0, 0x1a, 0xf3, 0xef, 0xab, 0xd2, 0x6b, 0xbe, 0x5c, 0x78, 0xc2, 0x65, 0xa9, 0x7d, 0x8e, - 0x1a, 0x05, 0x64, 0xf0, 0xc5, 0x4c, 0xa8, 0xfd, 0xdf, 0x84, 0x3d, 0x55, 0x7a, 0x8d, 0xf0, 0xc6, - 0x12, 0x2e, 0x84, 0xf6, 0x0b, 0xd4, 0x32, 0x2f, 0x3b, 0x2b, 0x68, 0x26, 0x98, 0x7e, 0x9b, 0x70, - 0xea, 0x66, 0x17, 0x6d, 0x55, 0x7a, 0xad, 0x60, 0xe5, 0x2e, 0x5c, 0xeb, 0xf6, 0x5f, 0x4f, 0xa6, - 0x6e, 0xe5, 0x6a, 0xea, 0x56, 0xae, 0xa7, 0x6e, 0xe5, 0xab, 0x72, 0xad, 0x89, 0x72, 0xad, 0x2b, - 0xe5, 0x5a, 0xd7, 0xca, 0xb5, 0x7e, 0x29, 0xd7, 0xfa, 0xf6, 0xdb, 0xad, 0xbc, 0xf7, 0x36, 0xfc, - 0x54, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x81, 0x42, 0xfe, 0x76, 0x04, 0x00, 0x00, + // 600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x4e, + 0x14, 0xc7, 0xb7, 0xb0, 0xfb, 0xfb, 0xb1, 0xb3, 0xf2, 0x27, 0x23, 0x17, 0x0d, 0x17, 0x2d, 0xe1, + 0xc2, 0x10, 0x12, 0xa7, 0x82, 0xc6, 0x18, 0x13, 0x13, 0x2d, 0x9a, 0x48, 0x2c, 0xd1, 0x14, 0xae, + 0x0c, 0x89, 0xce, 0xb6, 0x87, 0xee, 0x08, 0xed, 0xd4, 0x99, 0x59, 0x0c, 0x77, 0x3e, 0x82, 0x4f, + 0xa3, 0xf1, 0x0d, 0xb8, 0xe4, 0x92, 0xab, 0x46, 0xc6, 0xb7, 0xf0, 0xca, 0xcc, 0x6c, 0x61, 0x61, + 0x81, 0xb0, 0xf1, 0x6e, 0xe7, 0x9c, 0xf3, 0xfd, 0x9c, 0xef, 0x9c, 0xb3, 0x53, 0x14, 0xec, 0x3d, + 0x91, 0x84, 0xf1, 0x80, 0x96, 0x2c, 0x48, 0x38, 0x17, 0x29, 0x2b, 0xa8, 0x62, 0xbc, 0x08, 0x0e, + 0x56, 0xbb, 0xa0, 0xe8, 0x6a, 0x90, 0x41, 0x01, 0x82, 0x2a, 0x48, 0x49, 0x29, 0xb8, 0xe2, 0xd8, + 0x1f, 0x08, 0x08, 0x2d, 0x19, 0xb9, 0x28, 0x20, 0xb5, 0x60, 0xe1, 0x7e, 0xc6, 0x54, 0xaf, 0xdf, + 0x25, 0x09, 0xcf, 0x83, 0x8c, 0x67, 0x3c, 0xb0, 0xba, 0x6e, 0x7f, 0xd7, 0x9e, 0xec, 0xc1, 0xfe, + 0x1a, 0xf0, 0x16, 0x56, 0x6e, 0x36, 0x30, 0xda, 0x7b, 0xe1, 0xd1, 0xb0, 0x36, 0xa7, 0x49, 0x8f, + 0x15, 0x20, 0x0e, 0x83, 0x72, 0x2f, 0x33, 0x01, 0x19, 0xe4, 0xa0, 0xe8, 0x75, 0xaa, 0xe0, 0x26, + 0x95, 0xe8, 0x17, 0x8a, 0xe5, 0x70, 0x45, 0xf0, 0xf8, 0x36, 0x81, 0x4c, 0x7a, 0x90, 0xd3, 0x51, + 0xdd, 0xd2, 0x0f, 0x07, 0xb5, 0x22, 0xa0, 0x12, 0xf0, 0x47, 0x34, 0x65, 0xdc, 0xa4, 0x54, 0x51, + 0xd7, 0x59, 0x74, 0x96, 0x3b, 0x6b, 0x0f, 0xc8, 0x70, 0x6e, 0xe7, 0x50, 0x52, 0xee, 0x65, 0x26, + 0x20, 0x89, 0xa9, 0x26, 0x07, 0xab, 0xe4, 0x6d, 0xf7, 0x13, 0x24, 0x6a, 0x13, 0x14, 0x0d, 0xf1, + 0x51, 0xe5, 0x37, 0x74, 0xe5, 0xa3, 0x61, 0x2c, 0x3e, 0xa7, 0xe2, 0x08, 0x35, 0x65, 0x09, 0x89, + 0x3b, 0x61, 0xe9, 0x2b, 0xe4, 0x96, 0xad, 0x10, 0xeb, 0x6b, 0xab, 0x84, 0x24, 0xbc, 0x53, 0x73, + 0x9b, 0xe6, 0x14, 0x5b, 0xca, 0xd2, 0x77, 0x07, 0xb5, 0x6d, 0x45, 0xc4, 0xa4, 0xc2, 0x3b, 0x57, + 0xdc, 0x93, 0xf1, 0xdc, 0x1b, 0xb5, 0xf5, 0x3e, 0x57, 0xf7, 0x98, 0x3a, 0x8b, 0x5c, 0x70, 0xfe, + 0x06, 0xb5, 0x98, 0x82, 0x5c, 0xba, 0x13, 0x8b, 0x93, 0xcb, 0x9d, 0xb5, 0x7b, 0xe3, 0x59, 0x0f, + 0xa7, 0x6b, 0x64, 0x6b, 0xc3, 0x88, 0xe3, 0x01, 0x63, 0xe9, 0x67, 0xb3, 0x36, 0x6e, 0x2e, 0x83, + 0x9f, 0xa2, 0x99, 0x1e, 0xdf, 0x4f, 0x41, 0x6c, 0xa4, 0x50, 0x28, 0xa6, 0x0e, 0xad, 0xfd, 0x76, + 0x88, 0x75, 0xe5, 0xcf, 0xbc, 0xbe, 0x94, 0x89, 0x47, 0x2a, 0x71, 0x84, 0xe6, 0xf7, 0x0d, 0xe8, + 0x65, 0x5f, 0xd8, 0xf6, 0x5b, 0x90, 0xf0, 0x22, 0x95, 0x76, 0xc0, 0xad, 0xd0, 0xd5, 0x95, 0x3f, + 0x1f, 0x5d, 0x93, 0x8f, 0xaf, 0x55, 0xe1, 0x2e, 0xea, 0xd0, 0xe4, 0x73, 0x9f, 0x09, 0xd8, 0x66, + 0x39, 0xb8, 0x93, 0x76, 0x8a, 0xc1, 0x78, 0x53, 0xdc, 0x64, 0x89, 0xe0, 0x46, 0x16, 0xce, 0xea, + 0xca, 0xef, 0xbc, 0x18, 0x72, 0xe2, 0x8b, 0x50, 0xbc, 0x83, 0xda, 0x02, 0x0a, 0xf8, 0x62, 0x3b, + 0x34, 0xff, 0xad, 0xc3, 0xb4, 0xae, 0xfc, 0x76, 0x7c, 0x46, 0x89, 0x87, 0x40, 0xfc, 0x1c, 0xcd, + 0xd9, 0x9b, 0x6d, 0x0b, 0x5a, 0x48, 0x66, 0xee, 0x26, 0xdd, 0x96, 0x9d, 0xc5, 0xbc, 0xae, 0xfc, + 0xb9, 0x68, 0x24, 0x17, 0x5f, 0xa9, 0xc6, 0x1f, 0xd0, 0x94, 0x54, 0xe6, 0x7d, 0x64, 0x87, 0xee, + 0x7f, 0x76, 0x0f, 0xeb, 0xe6, 0x2f, 0xb1, 0x55, 0xc7, 0xfe, 0x54, 0xfe, 0xc3, 0x9b, 0xdf, 0x3e, + 0x59, 0x3f, 0x3b, 0x43, 0x3a, 0x58, 0x70, 0x2d, 0x8b, 0xcf, 0xa1, 0xf8, 0x19, 0x9a, 0x2d, 0x05, + 0xec, 0x82, 0x10, 0x90, 0x0e, 0xb6, 0xeb, 0xfe, 0x6f, 0xfb, 0xdc, 0xd5, 0x95, 0x3f, 0xfb, 0xee, + 0x72, 0x2a, 0x1e, 0xad, 0x0d, 0x5f, 0x1d, 0x9d, 0x7a, 0x8d, 0xe3, 0x53, 0xaf, 0x71, 0x72, 0xea, + 0x35, 0xbe, 0x6a, 0xcf, 0x39, 0xd2, 0x9e, 0x73, 0xac, 0x3d, 0xe7, 0x44, 0x7b, 0xce, 0x2f, 0xed, + 0x39, 0xdf, 0x7e, 0x7b, 0x8d, 0xf7, 0xfe, 0x2d, 0x1f, 0xc8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x57, 0x93, 0xf3, 0xef, 0x42, 0x05, 0x00, 0x00, } func (m *Lease) Marshal() (dAtA []byte, err error) { @@ -285,6 +292,20 @@ func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PreferredHolder != nil { + i -= len(*m.PreferredHolder) + copy(dAtA[i:], *m.PreferredHolder) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreferredHolder))) + i-- + dAtA[i] = 0x3a + } + if m.Strategy != nil { + i -= len(*m.Strategy) + copy(dAtA[i:], *m.Strategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Strategy))) + i-- + dAtA[i] = 0x32 + } if m.LeaseTransitions != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) i-- @@ -394,6 +415,14 @@ func (m *LeaseSpec) Size() (n int) { if m.LeaseTransitions != nil { n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } + if m.Strategy != nil { + l = len(*m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreferredHolder != nil { + l = len(*m.PreferredHolder) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -440,6 +469,8 @@ func (this *LeaseSpec) String() string { `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `Strategy:` + valueToStringGenerated(this.Strategy) + `,`, + `PreferredHolder:` + valueToStringGenerated(this.PreferredHolder) + `,`, `}`, }, "") return s @@ -859,6 +890,72 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } } m.LeaseTransitions = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_coordination_v1.CoordinatedLeaseStrategy(dAtA[iNdEx:postIndex]) + m.Strategy = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredHolder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PreferredHolder = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.proto index 92c8918b8..088811a74 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.coordination.v1beta1; +import "k8s.io/api/coordination/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -32,7 +33,7 @@ option go_package = "k8s.io/api/coordination/v1beta1"; message Lease { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec contains the specification of the Lease. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -45,7 +46,7 @@ message LeaseList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Lease items = 2; @@ -54,6 +55,8 @@ message LeaseList { // LeaseSpec is a specification of a Lease. message LeaseSpec { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional optional string holderIdentity = 1; @@ -65,16 +68,28 @@ message LeaseSpec { // acquireTime is a time when the current lease was acquired. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; // renewTime is a time when the current holder of a lease has last // updated the lease. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; // leaseTransitions is the number of transitions of a lease between // holders. // +optional optional int32 leaseTransitions = 5; + + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string strategy = 6; + + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + optional string preferredHolder = 7; } diff --git a/constraint/vendor/k8s.io/api/coordination/v1beta1/types.go b/constraint/vendor/k8s.io/api/coordination/v1beta1/types.go index 3a3d5f32e..d63fc30a9 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -42,6 +43,8 @@ type Lease struct { // LeaseSpec is a specification of a Lease. type LeaseSpec struct { // holderIdentity contains the identity of the holder of a current lease. + // If Coordinated Leader Election is used, the holder identity must be + // equal to the elected LeaseCandidate.metadata.name field. // +optional HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` // leaseDurationSeconds is a duration that candidates for a lease need @@ -60,6 +63,16 @@ type LeaseSpec struct { // holders. // +optional LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` + // Strategy indicates the strategy for picking the leader for coordinated leader election + // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled. + // +featureGate=CoordinatedLeaderElection + // +optional + Strategy *v1.CoordinatedLeaseStrategy `json:"strategy,omitempty" protobuf:"bytes,6,opt,name=strategy"` + // PreferredHolder signals to a lease holder that the lease has a + // more optimal holder and should be given up. + // +featureGate=CoordinatedLeaderElection + // +optional + PreferredHolder *string `json:"preferredHolder,omitempty" protobuf:"bytes,7,opt,name=preferredHolder"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/constraint/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index 78ca4e393..50fe8ea18 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -49,11 +49,13 @@ func (LeaseList) SwaggerDoc() map[string]string { var map_LeaseSpec = map[string]string{ "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease. If Coordinated Leader Election is used, the holder identity must be equal to the elected LeaseCandidate.metadata.name field.", "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", "acquireTime": "acquireTime is a time when the current lease was acquired.", "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", + "strategy": "Strategy indicates the strategy for picking the leader for coordinated leader election (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.", + "preferredHolder": "PreferredHolder signals to a lease holder that the lease has a more optimal holder and should be given up.", } func (LeaseSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index 3adfd8720..dcef1e346 100644 --- a/constraint/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/coordination/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -111,6 +112,16 @@ func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { *out = new(int32) **out = **in } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(v1.CoordinatedLeaseStrategy) + **out = **in + } + if in.PreferredHolder != nil { + in, out := &in.PreferredHolder, &out.PreferredHolder + *out = new(string) + **out = **in + } return } diff --git a/constraint/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/constraint/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 5cf6f329f..62e86402e 100644 --- a/constraint/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/constraint/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -23,7 +23,7 @@ const ( // webhook backend fails. ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" // TolerationsAnnotationKey represents the key of tolerations data (json serialized) @@ -80,7 +80,7 @@ const ( // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - // annotation key prefix used to identify non-convertible json paths. + // NonConvertibleAnnotationPrefix is the annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" kubectlPrefix = "kubectl.kubernetes.io/" diff --git a/constraint/vendor/k8s.io/api/core/v1/doc.go b/constraint/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b25b..bc0041b33 100644 --- a/constraint/vendor/k8s.io/api/core/v1/doc.go +++ b/constraint/vendor/k8s.io/api/core/v1/doc.go @@ -17,6 +17,8 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName= // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/constraint/vendor/k8s.io/api/core/v1/generated.pb.go b/constraint/vendor/k8s.io/api/core/v1/generated.pb.go index d52d8da18..9d466c6d7 100644 --- a/constraint/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/core/v1/generated.pb.go @@ -497,38 +497,10 @@ func (m *CinderVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo -func (m *ClaimSource) Reset() { *m = ClaimSource{} } -func (*ClaimSource) ProtoMessage() {} -func (*ClaimSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{16} -} -func (m *ClaimSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClaimSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClaimSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClaimSource.Merge(m, src) -} -func (m *ClaimSource) XXX_Size() int { - return m.Size() -} -func (m *ClaimSource) XXX_DiscardUnknown() { - xxx_messageInfo_ClaimSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ClaimSource proto.InternalMessageInfo - func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{17} + return fileDescriptor_6c07b07c062484ab, []int{16} } func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +528,7 @@ var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } func (*ClusterTrustBundleProjection) ProtoMessage() {} func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{18} + return fileDescriptor_6c07b07c062484ab, []int{17} } func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +556,7 @@ var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{19} + return fileDescriptor_6c07b07c062484ab, []int{18} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +584,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{20} + return fileDescriptor_6c07b07c062484ab, []int{19} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +612,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{21} + return fileDescriptor_6c07b07c062484ab, []int{20} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +640,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{22} + return fileDescriptor_6c07b07c062484ab, []int{21} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +668,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{23} + return fileDescriptor_6c07b07c062484ab, []int{22} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +696,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{24} + return fileDescriptor_6c07b07c062484ab, []int{23} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +724,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{25} + return fileDescriptor_6c07b07c062484ab, []int{24} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +752,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{26} + return fileDescriptor_6c07b07c062484ab, []int{25} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +780,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{27} + return fileDescriptor_6c07b07c062484ab, []int{26} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +808,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{28} + return fileDescriptor_6c07b07c062484ab, []int{27} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +836,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{29} + return fileDescriptor_6c07b07c062484ab, []int{28} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +864,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{30} + return fileDescriptor_6c07b07c062484ab, []int{29} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +892,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{31} + return fileDescriptor_6c07b07c062484ab, []int{30} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +920,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{32} + return fileDescriptor_6c07b07c062484ab, []int{31} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +948,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{33} + return fileDescriptor_6c07b07c062484ab, []int{32} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +976,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{34} + return fileDescriptor_6c07b07c062484ab, []int{33} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1004,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{35} + return fileDescriptor_6c07b07c062484ab, []int{34} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1032,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{36} + return fileDescriptor_6c07b07c062484ab, []int{35} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1060,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{37} + return fileDescriptor_6c07b07c062484ab, []int{36} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1113,6 +1085,34 @@ func (m *ContainerStatus) XXX_DiscardUnknown() { var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo +func (m *ContainerUser) Reset() { *m = ContainerUser{} } +func (*ContainerUser) ProtoMessage() {} +func (*ContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{37} +} +func (m *ContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerUser.Merge(m, src) +} +func (m *ContainerUser) XXX_Size() int { + return m.Size() +} +func (m *ContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerUser proto.InternalMessageInfo + func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { @@ -2149,10 +2149,38 @@ func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo +func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} } +func (*ImageVolumeSource) ProtoMessage() {} +func (*ImageVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{75} +} +func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageVolumeSource.Merge(m, src) +} +func (m *ImageVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ImageVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo + func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{75} + return fileDescriptor_6c07b07c062484ab, []int{76} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2208,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{76} + return fileDescriptor_6c07b07c062484ab, []int{77} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2236,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{77} + return fileDescriptor_6c07b07c062484ab, []int{78} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2264,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{78} + return fileDescriptor_6c07b07c062484ab, []int{79} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{79} + return fileDescriptor_6c07b07c062484ab, []int{80} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2320,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{80} + return fileDescriptor_6c07b07c062484ab, []int{81} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2348,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{81} + return fileDescriptor_6c07b07c062484ab, []int{82} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2345,10 +2373,38 @@ func (m *LimitRangeSpec) XXX_DiscardUnknown() { var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo +func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} } +func (*LinuxContainerUser) ProtoMessage() {} +func (*LinuxContainerUser) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{83} +} +func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LinuxContainerUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerUser.Merge(m, src) +} +func (m *LinuxContainerUser) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerUser) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo + func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{82} + return fileDescriptor_6c07b07c062484ab, []int{84} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2432,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{83} + return fileDescriptor_6c07b07c062484ab, []int{85} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2460,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{84} + return fileDescriptor_6c07b07c062484ab, []int{86} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2488,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{85} + return fileDescriptor_6c07b07c062484ab, []int{87} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2460,7 +2516,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{86} + return fileDescriptor_6c07b07c062484ab, []int{88} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2488,7 +2544,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } func (*ModifyVolumeStatus) ProtoMessage() {} func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{87} + return fileDescriptor_6c07b07c062484ab, []int{89} } func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2572,7 @@ var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{88} + return fileDescriptor_6c07b07c062484ab, []int{90} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2600,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{89} + return fileDescriptor_6c07b07c062484ab, []int{91} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2628,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{90} + return fileDescriptor_6c07b07c062484ab, []int{92} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2656,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{91} + return fileDescriptor_6c07b07c062484ab, []int{93} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2684,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{92} + return fileDescriptor_6c07b07c062484ab, []int{94} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2712,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{93} + return fileDescriptor_6c07b07c062484ab, []int{95} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2740,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{94} + return fileDescriptor_6c07b07c062484ab, []int{96} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2768,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{95} + return fileDescriptor_6c07b07c062484ab, []int{97} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2796,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{96} + return fileDescriptor_6c07b07c062484ab, []int{98} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2824,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{97} + return fileDescriptor_6c07b07c062484ab, []int{99} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2852,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{98} + return fileDescriptor_6c07b07c062484ab, []int{100} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2880,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{99} + return fileDescriptor_6c07b07c062484ab, []int{101} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2908,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{100} + return fileDescriptor_6c07b07c062484ab, []int{102} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2877,10 +2933,38 @@ func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo +func (m *NodeFeatures) Reset() { *m = NodeFeatures{} } +func (*NodeFeatures) ProtoMessage() {} +func (*NodeFeatures) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{103} +} +func (m *NodeFeatures) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFeatures) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFeatures.Merge(m, src) +} +func (m *NodeFeatures) XXX_Size() int { + return m.Size() +} +func (m *NodeFeatures) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFeatures.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo + func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{101} + return fileDescriptor_6c07b07c062484ab, []int{104} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2992,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{102} + return fileDescriptor_6c07b07c062484ab, []int{105} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +3020,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} } func (*NodeRuntimeHandler) ProtoMessage() {} func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{103} + return fileDescriptor_6c07b07c062484ab, []int{106} } func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +3048,7 @@ var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} } func (*NodeRuntimeHandlerFeatures) ProtoMessage() {} func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{104} + return fileDescriptor_6c07b07c062484ab, []int{107} } func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +3076,7 @@ var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{105} + return fileDescriptor_6c07b07c062484ab, []int{108} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +3104,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{106} + return fileDescriptor_6c07b07c062484ab, []int{109} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +3132,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{107} + return fileDescriptor_6c07b07c062484ab, []int{110} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3160,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{108} + return fileDescriptor_6c07b07c062484ab, []int{111} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3188,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{109} + return fileDescriptor_6c07b07c062484ab, []int{112} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3216,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{110} + return fileDescriptor_6c07b07c062484ab, []int{113} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3244,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{111} + return fileDescriptor_6c07b07c062484ab, []int{114} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3272,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{112} + return fileDescriptor_6c07b07c062484ab, []int{115} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3300,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{113} + return fileDescriptor_6c07b07c062484ab, []int{116} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3328,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{114} + return fileDescriptor_6c07b07c062484ab, []int{117} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{115} + return fileDescriptor_6c07b07c062484ab, []int{118} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{116} + return fileDescriptor_6c07b07c062484ab, []int{119} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{117} + return fileDescriptor_6c07b07c062484ab, []int{120} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{118} + return fileDescriptor_6c07b07c062484ab, []int{121} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3468,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{119} + return fileDescriptor_6c07b07c062484ab, []int{122} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3496,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{120} + return fileDescriptor_6c07b07c062484ab, []int{123} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3524,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{121} + return fileDescriptor_6c07b07c062484ab, []int{124} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3552,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{122} + return fileDescriptor_6c07b07c062484ab, []int{125} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3580,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{123} + return fileDescriptor_6c07b07c062484ab, []int{126} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3608,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{124} + return fileDescriptor_6c07b07c062484ab, []int{127} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3636,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{125} + return fileDescriptor_6c07b07c062484ab, []int{128} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3664,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{126} + return fileDescriptor_6c07b07c062484ab, []int{129} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3692,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{127} + return fileDescriptor_6c07b07c062484ab, []int{130} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3720,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{128} + return fileDescriptor_6c07b07c062484ab, []int{131} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3748,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{129} + return fileDescriptor_6c07b07c062484ab, []int{132} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3776,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{130} + return fileDescriptor_6c07b07c062484ab, []int{133} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3804,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{131} + return fileDescriptor_6c07b07c062484ab, []int{134} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3832,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{132} + return fileDescriptor_6c07b07c062484ab, []int{135} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3860,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{133} + return fileDescriptor_6c07b07c062484ab, []int{136} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3888,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{134} + return fileDescriptor_6c07b07c062484ab, []int{137} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3916,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{135} + return fileDescriptor_6c07b07c062484ab, []int{138} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3944,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{136} + return fileDescriptor_6c07b07c062484ab, []int{139} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3972,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{137} + return fileDescriptor_6c07b07c062484ab, []int{140} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +4000,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{138} + return fileDescriptor_6c07b07c062484ab, []int{141} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +4028,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{139} + return fileDescriptor_6c07b07c062484ab, []int{142} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +4056,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{140} + return fileDescriptor_6c07b07c062484ab, []int{143} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +4084,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{141} + return fileDescriptor_6c07b07c062484ab, []int{144} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +4112,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{142} + return fileDescriptor_6c07b07c062484ab, []int{145} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4140,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{143} + return fileDescriptor_6c07b07c062484ab, []int{146} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4168,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{144} + return fileDescriptor_6c07b07c062484ab, []int{147} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4196,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{145} + return fileDescriptor_6c07b07c062484ab, []int{148} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4224,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{146} + return fileDescriptor_6c07b07c062484ab, []int{149} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4252,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{147} + return fileDescriptor_6c07b07c062484ab, []int{150} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4280,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{148} + return fileDescriptor_6c07b07c062484ab, []int{151} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4308,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{149} + return fileDescriptor_6c07b07c062484ab, []int{152} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4336,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{150} + return fileDescriptor_6c07b07c062484ab, []int{153} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4364,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{151} + return fileDescriptor_6c07b07c062484ab, []int{154} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4392,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{152} + return fileDescriptor_6c07b07c062484ab, []int{155} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4420,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{153} + return fileDescriptor_6c07b07c062484ab, []int{156} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4448,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{154} + return fileDescriptor_6c07b07c062484ab, []int{157} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4476,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{155} + return fileDescriptor_6c07b07c062484ab, []int{158} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4504,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{156} + return fileDescriptor_6c07b07c062484ab, []int{159} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4532,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{157} + return fileDescriptor_6c07b07c062484ab, []int{160} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4560,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{158} + return fileDescriptor_6c07b07c062484ab, []int{161} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4588,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{159} + return fileDescriptor_6c07b07c062484ab, []int{162} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4616,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{160} + return fileDescriptor_6c07b07c062484ab, []int{163} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4644,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{161} + return fileDescriptor_6c07b07c062484ab, []int{164} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4672,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{162} + return fileDescriptor_6c07b07c062484ab, []int{165} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4700,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{163} + return fileDescriptor_6c07b07c062484ab, []int{166} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4728,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{164} + return fileDescriptor_6c07b07c062484ab, []int{167} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4756,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{165} + return fileDescriptor_6c07b07c062484ab, []int{168} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4784,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{166} + return fileDescriptor_6c07b07c062484ab, []int{169} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4812,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{167} + return fileDescriptor_6c07b07c062484ab, []int{170} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4840,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{168} + return fileDescriptor_6c07b07c062484ab, []int{171} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4868,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{169} + return fileDescriptor_6c07b07c062484ab, []int{172} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4896,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{170} + return fileDescriptor_6c07b07c062484ab, []int{173} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4924,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{171} + return fileDescriptor_6c07b07c062484ab, []int{174} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4865,10 +4949,38 @@ func (m *ResourceFieldSelector) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo +func (m *ResourceHealth) Reset() { *m = ResourceHealth{} } +func (*ResourceHealth) ProtoMessage() {} +func (*ResourceHealth) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{175} +} +func (m *ResourceHealth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceHealth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceHealth.Merge(m, src) +} +func (m *ResourceHealth) XXX_Size() int { + return m.Size() +} +func (m *ResourceHealth) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceHealth.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo + func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{172} + return fileDescriptor_6c07b07c062484ab, []int{176} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +5008,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{173} + return fileDescriptor_6c07b07c062484ab, []int{177} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +5036,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{174} + return fileDescriptor_6c07b07c062484ab, []int{178} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +5064,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{175} + return fileDescriptor_6c07b07c062484ab, []int{179} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +5092,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{176} + return fileDescriptor_6c07b07c062484ab, []int{180} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5005,10 +5117,38 @@ func (m *ResourceRequirements) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo +func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } +func (*ResourceStatus) ProtoMessage() {} +func (*ResourceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6c07b07c062484ab, []int{181} +} +func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceStatus.Merge(m, src) +} +func (m *ResourceStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo + func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{177} + return fileDescriptor_6c07b07c062484ab, []int{182} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +5176,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{178} + return fileDescriptor_6c07b07c062484ab, []int{183} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5204,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{179} + return fileDescriptor_6c07b07c062484ab, []int{184} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5232,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{180} + return fileDescriptor_6c07b07c062484ab, []int{185} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5260,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{181} + return fileDescriptor_6c07b07c062484ab, []int{186} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5288,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{182} + return fileDescriptor_6c07b07c062484ab, []int{187} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5316,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{183} + return fileDescriptor_6c07b07c062484ab, []int{188} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5344,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{184} + return fileDescriptor_6c07b07c062484ab, []int{189} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5372,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{185} + return fileDescriptor_6c07b07c062484ab, []int{190} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5400,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{186} + return fileDescriptor_6c07b07c062484ab, []int{191} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5428,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{187} + return fileDescriptor_6c07b07c062484ab, []int{192} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5456,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{188} + return fileDescriptor_6c07b07c062484ab, []int{193} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5484,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{189} + return fileDescriptor_6c07b07c062484ab, []int{194} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5512,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{190} + return fileDescriptor_6c07b07c062484ab, []int{195} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5540,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{191} + return fileDescriptor_6c07b07c062484ab, []int{196} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5568,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{192} + return fileDescriptor_6c07b07c062484ab, []int{197} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5596,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{193} + return fileDescriptor_6c07b07c062484ab, []int{198} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5624,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{194} + return fileDescriptor_6c07b07c062484ab, []int{199} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5652,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{195} + return fileDescriptor_6c07b07c062484ab, []int{200} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5680,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{196} + return fileDescriptor_6c07b07c062484ab, []int{201} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5708,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{197} + return fileDescriptor_6c07b07c062484ab, []int{202} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5736,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{198} + return fileDescriptor_6c07b07c062484ab, []int{203} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5764,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{199} + return fileDescriptor_6c07b07c062484ab, []int{204} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5652,7 +5792,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{200} + return fileDescriptor_6c07b07c062484ab, []int{205} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5680,7 +5820,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{201} + return fileDescriptor_6c07b07c062484ab, []int{206} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5848,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *SleepAction) Reset() { *m = SleepAction{} } func (*SleepAction) ProtoMessage() {} func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{202} + return fileDescriptor_6c07b07c062484ab, []int{207} } func (m *SleepAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5876,7 @@ var xxx_messageInfo_SleepAction proto.InternalMessageInfo func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{203} + return fileDescriptor_6c07b07c062484ab, []int{208} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5904,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{204} + return fileDescriptor_6c07b07c062484ab, []int{209} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5932,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{205} + return fileDescriptor_6c07b07c062484ab, []int{210} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5960,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{206} + return fileDescriptor_6c07b07c062484ab, []int{211} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5988,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{207} + return fileDescriptor_6c07b07c062484ab, []int{212} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +6016,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{208} + return fileDescriptor_6c07b07c062484ab, []int{213} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +6044,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{209} + return fileDescriptor_6c07b07c062484ab, []int{214} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +6072,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{210} + return fileDescriptor_6c07b07c062484ab, []int{215} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +6100,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{211} + return fileDescriptor_6c07b07c062484ab, []int{216} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +6128,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{212} + return fileDescriptor_6c07b07c062484ab, []int{217} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +6156,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{213} + return fileDescriptor_6c07b07c062484ab, []int{218} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +6184,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{214} + return fileDescriptor_6c07b07c062484ab, []int{219} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +6212,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{215} + return fileDescriptor_6c07b07c062484ab, []int{220} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6240,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{216} + return fileDescriptor_6c07b07c062484ab, []int{221} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6128,7 +6268,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} } func (*VolumeMountStatus) ProtoMessage() {} func (*VolumeMountStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{217} + return fileDescriptor_6c07b07c062484ab, []int{222} } func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6156,7 +6296,7 @@ var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{218} + return fileDescriptor_6c07b07c062484ab, []int{223} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6324,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{219} + return fileDescriptor_6c07b07c062484ab, []int{224} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6352,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } func (*VolumeResourceRequirements) ProtoMessage() {} func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{220} + return fileDescriptor_6c07b07c062484ab, []int{225} } func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6380,7 @@ var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{221} + return fileDescriptor_6c07b07c062484ab, []int{226} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6268,7 +6408,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{222} + return fileDescriptor_6c07b07c062484ab, []int{227} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6296,7 +6436,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{223} + return fileDescriptor_6c07b07c062484ab, []int{228} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6324,7 +6464,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_6c07b07c062484ab, []int{224} + return fileDescriptor_6c07b07c062484ab, []int{229} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6368,7 +6508,6 @@ func init() { proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") @@ -6393,6 +6532,7 @@ func init() { proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry") + proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser") proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") @@ -6432,6 +6572,7 @@ func init() { proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler") @@ -6444,6 +6585,7 @@ func init() { proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser") proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") @@ -6463,6 +6605,7 @@ func init() { proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures") proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler") @@ -6543,6 +6686,7 @@ func init() { proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim") proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth") proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") @@ -6553,6 +6697,7 @@ func init() { proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus") proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") @@ -6613,989 +6758,1015 @@ func init() { } var fileDescriptor_6c07b07c062484ab = []byte{ - // 15708 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x59, 0x8c, 0x1c, 0xd9, - 0x75, 0x20, 0xaa, 0xc8, 0xac, 0xf5, 0xd4, 0x7e, 0x8b, 0x64, 0x17, 0xab, 0x49, 0x26, 0x3b, 0xba, - 0x9b, 0xcd, 0xde, 0x8a, 0x62, 0x2f, 0xea, 0x56, 0x77, 0xab, 0xad, 0x5a, 0xc9, 0x6c, 0x56, 0x15, - 0xb3, 0x6f, 0x16, 0x49, 0xa9, 0xd5, 0x92, 0x15, 0xcc, 0xbc, 0x55, 0x15, 0xaa, 0xcc, 0x88, 0xec, - 0x88, 0xc8, 0x22, 0x8b, 0x4f, 0x86, 0x6d, 0xf9, 0x59, 0xb6, 0x64, 0x3f, 0x40, 0x78, 0xf0, 0x5b, - 0x20, 0x1b, 0xc6, 0x83, 0x9f, 0x9f, 0x97, 0xa7, 0x67, 0xbf, 0xd1, 0xc8, 0xe3, 0x4d, 0xde, 0xc6, - 0x33, 0x03, 0xd8, 0x83, 0x81, 0xc7, 0x63, 0xc0, 0x96, 0x31, 0xc6, 0x94, 0x47, 0xf4, 0x00, 0x86, - 0x3f, 0xc6, 0x36, 0x3c, 0xf3, 0x31, 0x53, 0xf0, 0x8c, 0x07, 0x77, 0x8d, 0x7b, 0x63, 0xc9, 0xcc, - 0x62, 0x93, 0xa5, 0x96, 0xd0, 0x7f, 0x99, 0xe7, 0x9c, 0x7b, 0xee, 0x8d, 0xbb, 0x9e, 0x7b, 0xce, - 0xb9, 0xe7, 0x80, 0xbd, 0xf3, 0x72, 0x38, 0xe7, 0xfa, 0x17, 0x9c, 0x96, 0x7b, 0xa1, 0xe6, 0x07, - 0xe4, 0xc2, 0xee, 0xc5, 0x0b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x5c, 0x2b, 0xf0, 0x23, - 0x1f, 0x21, 0x4e, 0x33, 0xe7, 0xb4, 0xdc, 0x39, 0x4a, 0x33, 0xb7, 0x7b, 0x71, 0xf6, 0xd9, 0x2d, - 0x37, 0xda, 0x6e, 0xdf, 0x9c, 0xab, 0xf9, 0xcd, 0x0b, 0x5b, 0xfe, 0x96, 0x7f, 0x81, 0x91, 0xde, - 0x6c, 0x6f, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x59, 0xcc, 0xbe, 0x10, 0x57, 0xd3, 0x74, 0x6a, - 0xdb, 0xae, 0x47, 0x82, 0xbd, 0x0b, 0xad, 0x9d, 0x2d, 0x56, 0x6f, 0x40, 0x42, 0xbf, 0x1d, 0xd4, - 0x48, 0xb2, 0xe2, 0x8e, 0xa5, 0xc2, 0x0b, 0x4d, 0x12, 0x39, 0x19, 0xcd, 0x9d, 0xbd, 0x90, 0x57, - 0x2a, 0x68, 0x7b, 0x91, 0xdb, 0x4c, 0x57, 0xf3, 0xa1, 0x6e, 0x05, 0xc2, 0xda, 0x36, 0x69, 0x3a, - 0xa9, 0x72, 0xcf, 0xe7, 0x95, 0x6b, 0x47, 0x6e, 0xe3, 0x82, 0xeb, 0x45, 0x61, 0x14, 0x24, 0x0b, - 0xd9, 0xdf, 0xb0, 0xe0, 0xec, 0xfc, 0x8d, 0xea, 0x72, 0xc3, 0x09, 0x23, 0xb7, 0xb6, 0xd0, 0xf0, - 0x6b, 0x3b, 0xd5, 0xc8, 0x0f, 0xc8, 0x75, 0xbf, 0xd1, 0x6e, 0x92, 0x2a, 0xeb, 0x08, 0xf4, 0x0c, - 0x0c, 0xed, 0xb2, 0xff, 0xe5, 0xa5, 0x19, 0xeb, 0xac, 0x75, 0x7e, 0x78, 0x61, 0xf2, 0xf7, 0xf6, - 0x4b, 0x1f, 0xb8, 0xbb, 0x5f, 0x1a, 0xba, 0x2e, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0x30, 0xb0, 0x19, - 0x6e, 0xec, 0xb5, 0xc8, 0x4c, 0x81, 0xd1, 0x8e, 0x0b, 0xda, 0x81, 0x95, 0x2a, 0x85, 0x62, 0x81, - 0x45, 0x17, 0x60, 0xb8, 0xe5, 0x04, 0x91, 0x1b, 0xb9, 0xbe, 0x37, 0x53, 0x3c, 0x6b, 0x9d, 0xef, - 0x5f, 0x98, 0x12, 0xa4, 0xc3, 0x15, 0x89, 0xc0, 0x31, 0x0d, 0x6d, 0x46, 0x40, 0x9c, 0xfa, 0x55, - 0xaf, 0xb1, 0x37, 0xd3, 0x77, 0xd6, 0x3a, 0x3f, 0x14, 0x37, 0x03, 0x0b, 0x38, 0x56, 0x14, 0xf6, - 0x97, 0x0b, 0x30, 0x34, 0xbf, 0xb9, 0xe9, 0x7a, 0x6e, 0xb4, 0x87, 0xae, 0xc3, 0xa8, 0xe7, 0xd7, - 0x89, 0xfc, 0xcf, 0xbe, 0x62, 0xe4, 0xb9, 0xb3, 0x73, 0xe9, 0xa9, 0x34, 0xb7, 0xae, 0xd1, 0x2d, - 0x4c, 0xde, 0xdd, 0x2f, 0x8d, 0xea, 0x10, 0x6c, 0xf0, 0x41, 0x18, 0x46, 0x5a, 0x7e, 0x5d, 0xb1, - 0x2d, 0x30, 0xb6, 0xa5, 0x2c, 0xb6, 0x95, 0x98, 0x6c, 0x61, 0xe2, 0xee, 0x7e, 0x69, 0x44, 0x03, - 0x60, 0x9d, 0x09, 0xba, 0x09, 0x13, 0xf4, 0xaf, 0x17, 0xb9, 0x8a, 0x6f, 0x91, 0xf1, 0x7d, 0x34, - 0x8f, 0xaf, 0x46, 0xba, 0x30, 0x7d, 0x77, 0xbf, 0x34, 0x91, 0x00, 0xe2, 0x24, 0x43, 0xfb, 0x47, - 0x2d, 0x98, 0x98, 0x6f, 0xb5, 0xe6, 0x83, 0xa6, 0x1f, 0x54, 0x02, 0x7f, 0xd3, 0x6d, 0x10, 0xf4, - 0x12, 0xf4, 0x45, 0x74, 0xd4, 0xf8, 0x08, 0x3f, 0x2a, 0xba, 0xb6, 0x8f, 0x8e, 0xd5, 0xc1, 0x7e, - 0x69, 0x3a, 0x41, 0xce, 0x86, 0x92, 0x15, 0x40, 0x1f, 0x85, 0xc9, 0x86, 0x5f, 0x73, 0x1a, 0xdb, - 0x7e, 0x18, 0x09, 0xac, 0x18, 0xfa, 0x63, 0x77, 0xf7, 0x4b, 0x93, 0xab, 0x09, 0x1c, 0x4e, 0x51, - 0xdb, 0x77, 0x60, 0x7c, 0x3e, 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x09, 0x85, 0x5e, 0x80, 0x3e, - 0xcf, 0x69, 0xca, 0xc6, 0x9c, 0x95, 0x8d, 0x59, 0x77, 0x9a, 0xb4, 0x31, 0x93, 0xd7, 0x3c, 0xf7, - 0x9d, 0xb6, 0x98, 0xa4, 0x14, 0x86, 0x19, 0x35, 0x7a, 0x0e, 0xa0, 0x4e, 0x76, 0xdd, 0x1a, 0xa9, - 0x38, 0xd1, 0xb6, 0x68, 0x03, 0x12, 0x65, 0x61, 0x49, 0x61, 0xb0, 0x46, 0x65, 0xdf, 0x86, 0xe1, - 0xf9, 0x5d, 0xdf, 0xad, 0x57, 0xfc, 0x7a, 0x88, 0x76, 0x60, 0xa2, 0x15, 0x90, 0x4d, 0x12, 0x28, - 0xd0, 0x8c, 0x75, 0xb6, 0x78, 0x7e, 0xe4, 0xb9, 0xf3, 0x99, 0x7d, 0x6f, 0x92, 0x2e, 0x7b, 0x51, - 0xb0, 0xb7, 0xf0, 0x90, 0xa8, 0x6f, 0x22, 0x81, 0xc5, 0x49, 0xce, 0xf6, 0x3f, 0x2f, 0xc0, 0xf1, - 0xf9, 0x3b, 0xed, 0x80, 0x2c, 0xb9, 0xe1, 0x4e, 0x72, 0xc1, 0xd5, 0xdd, 0x70, 0x67, 0x3d, 0xee, - 0x01, 0x35, 0xd3, 0x97, 0x04, 0x1c, 0x2b, 0x0a, 0xf4, 0x2c, 0x0c, 0xd2, 0xdf, 0xd7, 0x70, 0x59, - 0x7c, 0xf2, 0xb4, 0x20, 0x1e, 0x59, 0x72, 0x22, 0x67, 0x89, 0xa3, 0xb0, 0xa4, 0x41, 0x6b, 0x30, - 0x52, 0x63, 0xfb, 0xc3, 0xd6, 0x9a, 0x5f, 0x27, 0x6c, 0x6e, 0x0d, 0x2f, 0x3c, 0x4d, 0xc9, 0x17, - 0x63, 0xf0, 0xc1, 0x7e, 0x69, 0x86, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, - 0xdc, 0xfb, 0x18, 0x27, 0xc8, 0x58, 0xea, 0xe7, 0xb5, 0x95, 0xdb, 0xcf, 0x56, 0xee, 0x68, 0xf6, - 0xaa, 0x45, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0x33, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, - 0x5c, 0xaf, 0x7e, 0xb0, 0x5f, 0x9a, 0x32, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xc9, 0x82, - 0x12, 0xc3, 0xad, 0xb8, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, - 0x03, 0x08, 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, - 0x7f, 0x0a, 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x7f, 0xaa, 0x4a, 0x04, 0x8e, 0x69, - 0x8c, 0xfd, 0xa9, 0xd8, 0x6d, 0x7f, 0x42, 0x1f, 0x81, 0x89, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, - 0x07, 0xb2, 0x15, 0x5c, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0xbf, 0x96, 0x98, 0x3c, 0xf4, 0xab, - 0xdf, 0xe3, 0xdf, 0x6a, 0xff, 0xaa, 0x05, 0x83, 0x0b, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x69, - 0x18, 0xa2, 0x47, 0x65, 0xdd, 0x89, 0x1c, 0xb1, 0x0d, 0x7f, 0x50, 0x5b, 0x5b, 0xea, 0xe4, 0x9a, - 0x6b, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x47, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0x33, 0xa4, 0x16, - 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, - 0x48, 0x24, 0xf6, 0xe3, 0xcc, 0x7d, 0x93, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x53, - 0x6a, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0xdf, 0x06, 0xe1, 0xe4, 0x62, 0xb5, 0x9c, 0x33, 0xaf, - 0xce, 0xc1, 0x40, 0x3d, 0x70, 0x77, 0x49, 0x20, 0xfa, 0x59, 0x71, 0x59, 0x62, 0x50, 0x2c, 0xb0, - 0xe8, 0x65, 0x18, 0xe5, 0xe7, 0xe3, 0x65, 0xc7, 0xab, 0xc7, 0xdb, 0xa3, 0xa0, 0x1e, 0xbd, 0xae, - 0xe1, 0xb0, 0x41, 0x79, 0xc8, 0x49, 0x75, 0x2e, 0xb1, 0x18, 0xf3, 0xce, 0xde, 0x2f, 0x58, 0x30, - 0xc9, 0xab, 0x99, 0x8f, 0xa2, 0xc0, 0xbd, 0xd9, 0x8e, 0x48, 0x38, 0xd3, 0xcf, 0x76, 0xba, 0xc5, - 0xac, 0xde, 0xca, 0xed, 0x81, 0xb9, 0xeb, 0x09, 0x2e, 0x7c, 0x13, 0x9c, 0x11, 0xf5, 0x4e, 0x26, - 0xd1, 0x38, 0x55, 0x2d, 0xfa, 0x01, 0x0b, 0x66, 0x6b, 0xbe, 0x17, 0x05, 0x7e, 0xa3, 0x41, 0x82, - 0x4a, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xf9, 0x3c, 0xc5, 0x64, 0x93, 0xed, 0x04, 0x39, 0x63, 0xa8, - 0x88, 0xc4, 0x18, 0x9e, 0xb9, 0xbb, 0x5f, 0x9a, 0x5d, 0xcc, 0x65, 0x85, 0x3b, 0x54, 0x83, 0x76, - 0x00, 0xd1, 0x93, 0xbd, 0x1a, 0x39, 0x5b, 0x24, 0xae, 0x7c, 0xb0, 0xf7, 0xca, 0x4f, 0xdc, 0xdd, - 0x2f, 0xa1, 0xf5, 0x14, 0x0b, 0x9c, 0xc1, 0x16, 0xbd, 0x03, 0xc7, 0x28, 0x34, 0xf5, 0xad, 0x43, - 0xbd, 0x57, 0x37, 0x73, 0x77, 0xbf, 0x74, 0x6c, 0x3d, 0x83, 0x09, 0xce, 0x64, 0x8d, 0xbe, 0xcf, - 0x82, 0x93, 0xf1, 0xe7, 0x2f, 0xdf, 0x6e, 0x39, 0x5e, 0x3d, 0xae, 0x78, 0xb8, 0xf7, 0x8a, 0xe9, - 0x9e, 0x7c, 0x72, 0x31, 0x8f, 0x13, 0xce, 0xaf, 0x04, 0x79, 0x30, 0x4d, 0x9b, 0x96, 0xac, 0x1b, - 0x7a, 0xaf, 0xfb, 0xa1, 0xbb, 0xfb, 0xa5, 0xe9, 0xf5, 0x34, 0x0f, 0x9c, 0xc5, 0x78, 0x76, 0x11, - 0x8e, 0x67, 0xce, 0x4e, 0x34, 0x09, 0xc5, 0x1d, 0xc2, 0x85, 0xc0, 0x61, 0x4c, 0x7f, 0xa2, 0x63, - 0xd0, 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0x95, 0xc2, 0xcb, 0x96, 0xfd, 0x2f, - 0x8a, 0x30, 0xb1, 0x58, 0x2d, 0xdf, 0xd3, 0xaa, 0xd7, 0x8f, 0xbd, 0x42, 0xc7, 0x63, 0x2f, 0x3e, - 0x44, 0x8b, 0xb9, 0x87, 0xe8, 0xf7, 0x66, 0x2c, 0xd9, 0x3e, 0xb6, 0x64, 0x3f, 0x9c, 0xb3, 0x64, - 0xef, 0xf3, 0x42, 0xdd, 0xcd, 0x99, 0xb5, 0xfd, 0x6c, 0x00, 0x33, 0x25, 0x24, 0x26, 0xfb, 0x25, - 0xb7, 0xda, 0x43, 0x4e, 0xdd, 0xfb, 0x33, 0x8e, 0x35, 0x18, 0x5d, 0x74, 0x5a, 0xce, 0x4d, 0xb7, - 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x13, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x0d, 0x2f, 0x1c, 0xbf, - 0xbb, 0x5f, 0x2a, 0xce, 0xd7, 0xa9, 0x98, 0x01, 0x8a, 0x6a, 0x0f, 0x53, 0x0a, 0xf4, 0x14, 0xf4, - 0xd5, 0x03, 0xbf, 0x35, 0x53, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x05, 0x7e, 0x2b, 0x41, 0xca, - 0x68, 0xec, 0xdf, 0x29, 0xc0, 0xa9, 0x45, 0xd2, 0xda, 0x5e, 0xa9, 0xe6, 0x9c, 0x17, 0xe7, 0x61, - 0xa8, 0xe9, 0x7b, 0x6e, 0xe4, 0x07, 0xa1, 0xa8, 0x9a, 0xcd, 0x88, 0x35, 0x01, 0xc3, 0x0a, 0x8b, - 0xce, 0x42, 0x5f, 0x2b, 0x16, 0x62, 0x47, 0xa5, 0x00, 0xcc, 0xc4, 0x57, 0x86, 0xa1, 0x14, 0xed, - 0x90, 0x04, 0x62, 0xc6, 0x28, 0x8a, 0x6b, 0x21, 0x09, 0x30, 0xc3, 0xc4, 0x92, 0x00, 0x95, 0x11, - 0xc4, 0x89, 0x90, 0x90, 0x04, 0x28, 0x06, 0x6b, 0x54, 0xa8, 0x02, 0xc3, 0x61, 0x62, 0x64, 0x7b, - 0x5a, 0x9a, 0x63, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2, 0xc2, - 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, 0xbe, 0x24, 0xee, - 0x57, 0xef, 0xfd, 0x67, 0x0b, 0x4e, 0x2d, 0xba, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1, 0x5c, - 0xe5, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x3e, 0x4c, 0x31, 0xfb, 0x6f, 0x2c, 0x40, 0xfc, - 0xb3, 0xdf, 0x73, 0x1f, 0x7b, 0x2d, 0xfd, 0xb1, 0xf7, 0x61, 0x5a, 0xd8, 0xff, 0xbf, 0x05, 0x23, - 0x8b, 0x0d, 0xc7, 0x6d, 0x8a, 0x4f, 0x5d, 0x84, 0x29, 0xa9, 0xb7, 0x62, 0x60, 0x4d, 0xf6, 0xa7, - 0x9b, 0xdb, 0x14, 0x4e, 0x22, 0x71, 0x9a, 0x1e, 0x7d, 0x02, 0x4e, 0x1a, 0xc0, 0x0d, 0xd2, 0x6c, - 0x35, 0x9c, 0x48, 0xbf, 0x15, 0xb0, 0xd3, 0x1f, 0xe7, 0x11, 0xe1, 0xfc, 0xf2, 0xf6, 0x2a, 0x8c, - 0x2f, 0x36, 0x5c, 0xe2, 0x45, 0xe5, 0xca, 0xa2, 0xef, 0x6d, 0xba, 0x5b, 0xe8, 0x15, 0x18, 0x8f, - 0xdc, 0x26, 0xf1, 0xdb, 0x51, 0x95, 0xd4, 0x7c, 0x8f, 0xdd, 0xb5, 0xad, 0xf3, 0xfd, 0x0b, 0xe8, - 0xee, 0x7e, 0x69, 0x7c, 0xc3, 0xc0, 0xe0, 0x04, 0xa5, 0xfd, 0x33, 0x74, 0xa7, 0x6d, 0xb4, 0xc3, - 0x88, 0x04, 0x1b, 0x41, 0x3b, 0x8c, 0x16, 0xda, 0x54, 0x5a, 0xae, 0x04, 0x3e, 0xed, 0x40, 0xd7, - 0xf7, 0xd0, 0x29, 0x43, 0x81, 0x30, 0x24, 0x95, 0x07, 0x42, 0x51, 0x30, 0x07, 0x10, 0xba, 0x5b, - 0x1e, 0x09, 0xb4, 0x4f, 0x1b, 0x67, 0x8b, 0x5b, 0x41, 0xb1, 0x46, 0x81, 0x1a, 0x30, 0xd6, 0x70, - 0x6e, 0x92, 0x46, 0x95, 0x34, 0x48, 0x2d, 0xf2, 0x03, 0xa1, 0x91, 0x79, 0xbe, 0xb7, 0x9b, 0xcb, - 0xaa, 0x5e, 0x74, 0x61, 0xea, 0xee, 0x7e, 0x69, 0xcc, 0x00, 0x61, 0x93, 0x39, 0xdd, 0xec, 0xfc, - 0x16, 0xfd, 0x0a, 0xa7, 0xa1, 0x5f, 0x97, 0xaf, 0x0a, 0x18, 0x56, 0x58, 0xb5, 0xd9, 0xf5, 0xe5, - 0x6d, 0x76, 0xf6, 0x9f, 0xd1, 0xa5, 0xe1, 0x37, 0x5b, 0xbe, 0x47, 0xbc, 0x68, 0xd1, 0xf7, 0xea, - 0x5c, 0x97, 0xf6, 0x8a, 0xa1, 0xec, 0x39, 0x97, 0x50, 0xf6, 0x9c, 0x48, 0x97, 0xd0, 0xf4, 0x3d, - 0x1f, 0x86, 0x81, 0x30, 0x72, 0xa2, 0x76, 0x28, 0x3a, 0xee, 0x11, 0xb9, 0x50, 0xaa, 0x0c, 0x7a, - 0xb0, 0x5f, 0x9a, 0x50, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, - 0xb6, 0xa4, 0xa0, 0x33, 0x21, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, - 0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x63, 0x82, 0xb0, 0x7f, 0x99, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xda, - 0x82, 0x09, 0xd5, 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x02, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, - 0x60, 0x30, 0xf2, 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, - 0xcd, 0xfe, 0x4d, 0x0b, 0xa6, 0x13, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x3b, 0xf5, 0x55, 0x73, - 0x3d, 0x4e, 0x3e, 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, - 0x11, 0x69, 0xca, 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0x52, 0xa6, 0x25, 0x31, - 0x67, 0x60, 0xff, 0x4e, 0x11, 0x86, 0xf9, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0xc3, - 0xb0, 0xdb, 0x6c, 0xb6, 0x23, 0xe7, 0xa6, 0x38, 0xa9, 0x87, 0xf8, 0xae, 0x59, 0x96, 0x40, 0x1c, - 0xe3, 0x51, 0x19, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x91, 0xfd, 0x95, 0xa2, 0xed, 0x73, 0x4b, - 0x4e, 0xe4, 0x70, 0x21, 0x59, 0xad, 0x2b, 0x0a, 0xc2, 0x8c, 0x05, 0x72, 0x00, 0x6e, 0xba, 0x9e, - 0x13, 0xec, 0x51, 0xd8, 0x4c, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x05, 0x45, 0xcf, 0xd9, 0xaa, - 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xfb, 0x12, 0x0c, 0x2b, 0xe2, 0xc3, 0xc8, 0xba, 0xb3, 0x1f, - 0x81, 0x89, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xea, 0xa2, 0xf2, 0xaf, 0xb3, 0x2d, 0x43, 0xb4, 0x7a, - 0xd9, 0xdb, 0x15, 0x47, 0xcc, 0x1d, 0x38, 0xd6, 0xc8, 0x38, 0xa4, 0xc4, 0xb8, 0xf6, 0x7e, 0xa8, - 0x9d, 0x12, 0x9f, 0x7d, 0x2c, 0x0b, 0x8b, 0x33, 0xeb, 0x30, 0x76, 0xc4, 0x42, 0xa7, 0x1d, 0x91, - 0xee, 0x77, 0xc7, 0x54, 0xe3, 0xaf, 0x90, 0x3d, 0xb5, 0xa9, 0x7e, 0x2b, 0x9b, 0x7f, 0x9a, 0xf7, - 0x3e, 0xdf, 0x2e, 0x47, 0x04, 0x83, 0xe2, 0x15, 0xb2, 0xc7, 0x87, 0x42, 0xff, 0xba, 0x62, 0xc7, - 0xaf, 0xfb, 0xaa, 0x05, 0x63, 0xea, 0xeb, 0x8e, 0x60, 0x5f, 0x58, 0x30, 0xf7, 0x85, 0xd3, 0x1d, - 0x27, 0x78, 0xce, 0x8e, 0xf0, 0xf5, 0x02, 0x9c, 0x54, 0x34, 0xf4, 0xda, 0xc7, 0xff, 0x88, 0x59, - 0x75, 0x01, 0x86, 0x3d, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, - 0xe7, 0xc5, 0x87, 0xf6, 0xa8, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x02, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, - 0x30, 0x1f, 0x94, 0xbd, 0x7d, 0xad, 0xbc, 0x74, 0xb0, 0x5f, 0x7a, 0x24, 0xcf, 0x48, 0x46, 0x4f, - 0xb6, 0x70, 0xee, 0x5a, 0x79, 0x09, 0xd3, 0xc2, 0x68, 0x1e, 0x26, 0xa4, 0x28, 0x73, 0x9d, 0x4a, - 0xd2, 0xbe, 0x27, 0xce, 0x21, 0xa5, 0xde, 0xc7, 0x26, 0x1a, 0x27, 0xe9, 0xd1, 0x12, 0x4c, 0xee, - 0xb4, 0x6f, 0x92, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x21, 0x5c, 0xf9, 0x3d, 0x1c, 0x5f, 0xba, 0xaf, - 0x24, 0xf0, 0x38, 0x55, 0xc2, 0xfe, 0x07, 0x76, 0x1e, 0x88, 0xde, 0xd3, 0xe4, 0x9b, 0x6f, 0xe5, - 0x74, 0xee, 0x65, 0x56, 0x5c, 0x21, 0x7b, 0x1b, 0x3e, 0x95, 0x43, 0xb2, 0x67, 0x85, 0x31, 0xe7, - 0xfb, 0x3a, 0xce, 0xf9, 0x5f, 0x2a, 0xc0, 0x71, 0xd5, 0x03, 0x86, 0x7c, 0xff, 0xed, 0xde, 0x07, - 0x17, 0x61, 0xa4, 0x4e, 0x36, 0x9d, 0x76, 0x23, 0x52, 0x96, 0x98, 0x7e, 0x6e, 0x1c, 0x5c, 0x8a, - 0xc1, 0x58, 0xa7, 0x39, 0x44, 0xb7, 0xfd, 0xc2, 0x18, 0x3b, 0x88, 0x23, 0x87, 0xce, 0x71, 0xb5, - 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x28, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c, 0x79, 0xab, 0x4c, - 0x81, 0x98, 0xe3, 0xd0, 0xe3, 0x30, 0x58, 0xf3, 0x9b, 0x4d, 0xc7, 0xab, 0xb3, 0x23, 0x6f, 0x78, - 0x61, 0x84, 0xca, 0x6e, 0x8b, 0x1c, 0x84, 0x25, 0x8e, 0x0a, 0xdf, 0x4e, 0xb0, 0xc5, 0xd5, 0x53, - 0x42, 0xf8, 0x9e, 0x0f, 0xb6, 0x42, 0xcc, 0xa0, 0xf4, 0x76, 0x7d, 0xcb, 0x0f, 0x76, 0x5c, 0x6f, - 0x6b, 0xc9, 0x0d, 0xc4, 0x92, 0x50, 0x67, 0xe1, 0x0d, 0x85, 0xc1, 0x1a, 0x15, 0x5a, 0x81, 0xfe, - 0x96, 0x1f, 0x44, 0xe1, 0xcc, 0x00, 0xeb, 0xee, 0x47, 0x72, 0x36, 0x22, 0xfe, 0xb5, 0x15, 0x3f, - 0x88, 0xe2, 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0x8b, 0xa3, 0x55, 0x18, 0x24, 0xde, 0xee, 0x4a, 0xe0, - 0x37, 0x67, 0xa6, 0xf3, 0x39, 0x2d, 0x73, 0x12, 0x3e, 0xcd, 0x62, 0x19, 0x55, 0x80, 0xb1, 0x64, - 0x81, 0x3e, 0x0c, 0x45, 0xe2, 0xed, 0xce, 0x0c, 0x32, 0x4e, 0xb3, 0x39, 0x9c, 0xae, 0x3b, 0x41, - 0xbc, 0xe7, 0x2f, 0x7b, 0xbb, 0x98, 0x96, 0x41, 0x1f, 0x87, 0x61, 0xb9, 0x61, 0x84, 0x42, 0xef, - 0x9b, 0x39, 0x61, 0xe5, 0x36, 0x83, 0xc9, 0x3b, 0x6d, 0x37, 0x20, 0x4d, 0xe2, 0x45, 0x61, 0xbc, - 0x43, 0x4a, 0x6c, 0x88, 0x63, 0x6e, 0xa8, 0x06, 0xa3, 0x01, 0x09, 0xdd, 0x3b, 0xa4, 0xe2, 0x37, - 0xdc, 0xda, 0xde, 0xcc, 0x43, 0xac, 0x79, 0x4f, 0x76, 0xec, 0x32, 0xac, 0x15, 0x88, 0xed, 0x12, - 0x3a, 0x14, 0x1b, 0x4c, 0xd1, 0x9b, 0x30, 0x16, 0x90, 0x30, 0x72, 0x82, 0x48, 0xd4, 0x32, 0xa3, - 0xec, 0x88, 0x63, 0x58, 0x47, 0xf0, 0xeb, 0x44, 0x5c, 0x4d, 0x8c, 0xc1, 0x26, 0x07, 0xf4, 0x71, - 0x69, 0x24, 0x59, 0xf3, 0xdb, 0x5e, 0x14, 0xce, 0x0c, 0xb3, 0x76, 0x67, 0x5a, 0xd3, 0xaf, 0xc7, - 0x74, 0x49, 0x2b, 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x93, 0x30, 0xc6, 0xff, 0x73, 0x23, 0x70, - 0x38, 0x73, 0x9c, 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0x2e, 0x1c, 0x17, 0xcc, 0xc7, 0x74, 0x68, - 0x88, 0x4d, 0x6e, 0x08, 0xc3, 0x58, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, - 0x42, 0xa7, 0x7d, 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, - 0x74, 0x0d, 0xc6, 0x03, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x48, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, - 0x42, 0x38, 0xc1, 0x04, 0x5d, 0x85, 0x51, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, - 0xe6, 0x02, 0x51, 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x03, 0x86, 0x1b, 0xee, 0x26, 0xa9, 0xed, - 0xd5, 0x1a, 0x64, 0x66, 0x94, 0x71, 0xcb, 0xdc, 0x0c, 0x57, 0x25, 0x11, 0x97, 0xcf, 0xd5, 0x5f, - 0x1c, 0x17, 0x47, 0xd7, 0xe1, 0x44, 0x44, 0x82, 0xa6, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, - 0x66, 0xcb, 0x1f, 0x63, 0xb3, 0xeb, 0x8c, 0x18, 0x8d, 0x13, 0x1b, 0x99, 0x54, 0x38, 0xa7, 0x34, - 0xba, 0x0d, 0x33, 0x19, 0x18, 0x3e, 0x6f, 0x8f, 0x31, 0xce, 0xaf, 0x09, 0xce, 0x33, 0x1b, 0x39, - 0x74, 0x07, 0x1d, 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0x13, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, - 0x51, 0xe1, 0x38, 0xab, 0xf0, 0x71, 0x29, 0x47, 0x94, 0x4d, 0xf4, 0xc1, 0x7e, 0x09, 0xe2, 0x7f, - 0x38, 0x59, 0x1a, 0xdd, 0x64, 0x66, 0xe3, 0x76, 0xe0, 0x46, 0x7b, 0x74, 0x55, 0x91, 0xdb, 0xd1, - 0xcc, 0x44, 0x47, 0x15, 0x9a, 0x4e, 0xaa, 0x6c, 0xcb, 0x3a, 0x10, 0x27, 0x19, 0xd2, 0xa3, 0x20, - 0x8c, 0xea, 0xae, 0x37, 0x33, 0xc9, 0xef, 0x53, 0x72, 0x27, 0xad, 0x52, 0x20, 0xe6, 0x38, 0x66, - 0x32, 0xa6, 0x3f, 0xae, 0xd2, 0x13, 0x77, 0x8a, 0x11, 0xc6, 0x26, 0x63, 0x89, 0xc0, 0x31, 0x0d, - 0x15, 0x82, 0xa3, 0x68, 0x6f, 0x06, 0x31, 0x52, 0xb5, 0x21, 0x6e, 0x6c, 0x7c, 0x1c, 0x53, 0xb8, - 0x7d, 0x13, 0xc6, 0xd5, 0x36, 0xc1, 0xfa, 0x04, 0x95, 0xa0, 0x9f, 0x89, 0x7d, 0x42, 0xe1, 0x3b, - 0x4c, 0x9b, 0xc0, 0x44, 0x42, 0xcc, 0xe1, 0xac, 0x09, 0xee, 0x1d, 0xb2, 0xb0, 0x17, 0x11, 0xae, - 0x8b, 0x28, 0x6a, 0x4d, 0x90, 0x08, 0x1c, 0xd3, 0xd8, 0xff, 0x9d, 0x8b, 0xcf, 0xf1, 0x29, 0xd1, - 0xc3, 0xb9, 0xf8, 0x0c, 0x0c, 0x31, 0x57, 0x15, 0x3f, 0xe0, 0xf6, 0xe4, 0xfe, 0x58, 0x60, 0xbe, - 0x2c, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, 0xac, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, - 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x43, 0xcc, 0x2b, 0xab, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, - 0x93, 0xa1, 0x8a, 0x80, 0x1f, 0x68, 0xbf, 0xb1, 0xa2, 0x46, 0xe7, 0x60, 0x80, 0x36, 0xa1, 0x5c, - 0x11, 0xc7, 0xa9, 0xd2, 0x5d, 0x5e, 0x66, 0x50, 0x2c, 0xb0, 0xf6, 0x6f, 0x5a, 0x4c, 0x96, 0x4a, - 0xef, 0xf9, 0xe8, 0x32, 0x3b, 0x34, 0xd8, 0x09, 0xa2, 0xe9, 0x0e, 0x1f, 0xd3, 0x4e, 0x02, 0x85, - 0x3b, 0x48, 0xfc, 0xc7, 0x46, 0x49, 0xf4, 0x56, 0xf2, 0x64, 0xe0, 0x02, 0xc5, 0x0b, 0xb2, 0x0b, - 0x92, 0xa7, 0xc3, 0xc3, 0xf1, 0x11, 0x47, 0xdb, 0xd3, 0xe9, 0x88, 0xb0, 0xff, 0xd7, 0x82, 0x36, - 0x4b, 0xaa, 0x91, 0x13, 0x11, 0x54, 0x81, 0xc1, 0x5b, 0x8e, 0x1b, 0xb9, 0xde, 0x96, 0x90, 0xfb, - 0x3a, 0x1f, 0x74, 0xac, 0xd0, 0x0d, 0x5e, 0x80, 0x4b, 0x2f, 0xe2, 0x0f, 0x96, 0x6c, 0x28, 0xc7, - 0xa0, 0xed, 0x79, 0x94, 0x63, 0xa1, 0x57, 0x8e, 0x98, 0x17, 0xe0, 0x1c, 0xc5, 0x1f, 0x2c, 0xd9, - 0xa0, 0xb7, 0x01, 0xe4, 0x0e, 0x41, 0xea, 0x42, 0x77, 0xf8, 0x4c, 0x77, 0xa6, 0x1b, 0xaa, 0x0c, - 0x57, 0x4e, 0xc6, 0xff, 0xb1, 0xc6, 0xcf, 0x8e, 0xb4, 0x31, 0xd5, 0x1b, 0x83, 0x3e, 0x41, 0x97, - 0xa8, 0x13, 0x44, 0xa4, 0x3e, 0x1f, 0x89, 0xce, 0x79, 0xaa, 0xb7, 0xcb, 0xe1, 0x86, 0xdb, 0x24, - 0xfa, 0x72, 0x16, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0x95, 0x22, 0xcc, 0xe4, 0x35, 0x97, 0x2e, 0x1a, - 0x72, 0xdb, 0x8d, 0x16, 0xa9, 0x58, 0x6b, 0x99, 0x8b, 0x66, 0x59, 0xc0, 0xb1, 0xa2, 0xa0, 0xb3, - 0x37, 0x74, 0xb7, 0xe4, 0xdd, 0xbe, 0x3f, 0x9e, 0xbd, 0x55, 0x06, 0xc5, 0x02, 0x4b, 0xe9, 0x02, - 0xe2, 0x84, 0xc2, 0x5d, 0x50, 0x9b, 0xe5, 0x98, 0x41, 0xb1, 0xc0, 0xea, 0x5a, 0xc6, 0xbe, 0x2e, - 0x5a, 0x46, 0xa3, 0x8b, 0xfa, 0xef, 0x6f, 0x17, 0xa1, 0x4f, 0x01, 0x6c, 0xba, 0x9e, 0x1b, 0x6e, - 0x33, 0xee, 0x03, 0x87, 0xe6, 0xae, 0x84, 0xe2, 0x15, 0xc5, 0x05, 0x6b, 0x1c, 0xd1, 0x8b, 0x30, - 0xa2, 0x36, 0x90, 0xf2, 0x12, 0x73, 0x56, 0xd0, 0x9c, 0xbf, 0xe2, 0xdd, 0x74, 0x09, 0xeb, 0x74, - 0xf6, 0x67, 0x92, 0xf3, 0x45, 0xac, 0x00, 0xad, 0x7f, 0xad, 0x5e, 0xfb, 0xb7, 0xd0, 0xb9, 0x7f, - 0xed, 0xdf, 0x1d, 0x84, 0x09, 0xa3, 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x89, 0x1e, 0x40, 0x4e, - 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, - 0xe1, 0x86, 0x13, 0x32, 0x8d, 0x25, 0x11, 0xeb, 0xae, 0x17, 0x66, 0xf1, 0x85, 0xd0, 0x09, 0x23, - 0xed, 0xd4, 0xe7, 0xbc, 0x63, 0x96, 0xf4, 0xa4, 0xa4, 0xf2, 0x95, 0xf4, 0x47, 0x55, 0x8d, 0xa0, - 0x42, 0xd8, 0x1e, 0xe6, 0x38, 0xf4, 0x32, 0xdb, 0x5a, 0xe9, 0xac, 0x58, 0xa4, 0xd2, 0x28, 0x9b, - 0x66, 0xfd, 0x86, 0x90, 0xad, 0x70, 0xd8, 0xa0, 0x8c, 0xef, 0x64, 0x03, 0x1d, 0xee, 0x64, 0x4f, - 0xc2, 0x20, 0xfb, 0xa1, 0x66, 0x80, 0x1a, 0x8d, 0x32, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, - 0xde, 0x26, 0x0c, 0xbd, 0xf5, 0x89, 0x49, 0xcd, 0x1c, 0x45, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, - 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, - 0xed, 0x57, 0xbb, 0x76, 0x7b, 0x3b, 0x9c, 0x9b, 0x4f, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, - 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0x56, 0xdd, 0x30, 0xfa, 0xdc, 0x9f, 0x27, 0x0e, 0xa7, 0x8c, 0x26, - 0xa1, 0x6b, 0xfa, 0xe5, 0x6b, 0xe4, 0x90, 0x97, 0xaf, 0xb1, 0xdc, 0x8b, 0xd7, 0x77, 0x27, 0x2e, - 0x30, 0xa3, 0xec, 0xcb, 0x1f, 0xef, 0x72, 0x81, 0x11, 0xea, 0xf4, 0x1e, 0xae, 0x31, 0xb3, 0x6d, - 0x78, 0x28, 0xa7, 0x8b, 0x32, 0x14, 0xbc, 0x4b, 0xba, 0x82, 0xb7, 0x8b, 0x5a, 0x70, 0x4e, 0x7e, - 0xc4, 0xdc, 0x9b, 0x6d, 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0x57, 0x08, 0x3f, 0x05, 0xe3, 0x4b, 0x0e, - 0x69, 0xfa, 0xde, 0xb2, 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x19, 0xe8, 0x63, 0xd2, 0x0d, 0xdf, - 0xdb, 0xfb, 0x68, 0xd3, 0x31, 0x83, 0xd8, 0x5b, 0x70, 0x7c, 0xc9, 0xbf, 0xe5, 0xdd, 0x72, 0x82, - 0xfa, 0x7c, 0xa5, 0xac, 0x29, 0xac, 0xd6, 0xa5, 0xc2, 0xc4, 0xca, 0xbf, 0x8e, 0x6a, 0x25, 0x79, - 0x2f, 0xac, 0xb8, 0x0d, 0x92, 0xa3, 0x56, 0xfc, 0x3f, 0x0a, 0x46, 0x4d, 0x31, 0xbd, 0x32, 0x8a, - 0x59, 0xb9, 0x1e, 0x00, 0x6f, 0xc2, 0xd0, 0xa6, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x45, 0xef, 0x3c, - 0x91, 0xef, 0x23, 0xb8, 0x42, 0x29, 0x95, 0xf5, 0x8e, 0xa9, 0x5b, 0x56, 0x44, 0x61, 0xac, 0xd8, - 0xa0, 0x1d, 0x98, 0x94, 0x7d, 0x28, 0xb1, 0x62, 0xc3, 0x79, 0xb2, 0xd3, 0xcc, 0x32, 0x99, 0x33, - 0x7f, 0x69, 0x9c, 0x60, 0x83, 0x53, 0x8c, 0xd1, 0x29, 0xe8, 0x6b, 0xd2, 0xa3, 0xb5, 0x8f, 0x75, - 0x3f, 0xd3, 0xaf, 0x30, 0x55, 0x11, 0x83, 0xda, 0x3f, 0x61, 0xc1, 0x43, 0xa9, 0x9e, 0x11, 0x2a, - 0xb3, 0xfb, 0x3c, 0x0a, 0x49, 0x15, 0x56, 0xa1, 0xbb, 0x0a, 0xcb, 0xfe, 0xff, 0x2c, 0x38, 0xb6, - 0xdc, 0x6c, 0x45, 0x7b, 0x4b, 0xae, 0x69, 0xae, 0x7f, 0x09, 0x06, 0x9a, 0xa4, 0xee, 0xb6, 0x9b, - 0x62, 0xe4, 0x4a, 0xf2, 0xf8, 0x59, 0x63, 0xd0, 0x83, 0xfd, 0xd2, 0x58, 0x35, 0xf2, 0x03, 0x67, - 0x8b, 0x70, 0x00, 0x16, 0xe4, 0xec, 0x10, 0x77, 0xef, 0x90, 0x55, 0xb7, 0xe9, 0x46, 0xf7, 0x36, - 0xdb, 0x85, 0xa5, 0x5d, 0x32, 0xc1, 0x31, 0x3f, 0xfb, 0x1b, 0x16, 0x4c, 0xc8, 0x79, 0x3f, 0x5f, - 0xaf, 0x07, 0x24, 0x0c, 0xd1, 0x2c, 0x14, 0xdc, 0x96, 0x68, 0x25, 0x88, 0x56, 0x16, 0xca, 0x15, - 0x5c, 0x70, 0x5b, 0xf2, 0xbe, 0xc0, 0x4e, 0xb8, 0xa2, 0xe9, 0x74, 0x70, 0x59, 0xc0, 0xb1, 0xa2, - 0x40, 0xe7, 0x61, 0xc8, 0xf3, 0xeb, 0x5c, 0xe4, 0x16, 0x46, 0x5c, 0x4a, 0xb9, 0x2e, 0x60, 0x58, - 0x61, 0x51, 0x05, 0x86, 0xb9, 0x4b, 0x6a, 0x3c, 0x69, 0x7b, 0x72, 0x6c, 0x65, 0x5f, 0xb6, 0x21, - 0x4b, 0xe2, 0x98, 0x89, 0xfd, 0xdb, 0x16, 0x8c, 0xca, 0x2f, 0xeb, 0xf1, 0x32, 0x44, 0x97, 0x56, - 0x7c, 0x11, 0x8a, 0x97, 0x16, 0xbd, 0xcc, 0x30, 0x8c, 0x71, 0x87, 0x29, 0x1e, 0xea, 0x0e, 0x73, - 0x11, 0x46, 0x9c, 0x56, 0xab, 0x62, 0x5e, 0x80, 0xd8, 0x54, 0x9a, 0x8f, 0xc1, 0x58, 0xa7, 0xb1, - 0x7f, 0xbc, 0x00, 0xe3, 0xf2, 0x0b, 0xaa, 0xed, 0x9b, 0x21, 0x89, 0xd0, 0x06, 0x0c, 0x3b, 0x7c, - 0x94, 0x88, 0x9c, 0xe4, 0x8f, 0x66, 0x2b, 0xe6, 0x8c, 0x21, 0x8d, 0x25, 0xb9, 0x79, 0x59, 0x1a, - 0xc7, 0x8c, 0x50, 0x03, 0xa6, 0x3c, 0x3f, 0x62, 0xa7, 0xba, 0xc2, 0x77, 0xb2, 0x95, 0x26, 0xb9, - 0x9f, 0x14, 0xdc, 0xa7, 0xd6, 0x93, 0x5c, 0x70, 0x9a, 0x31, 0x5a, 0x96, 0xca, 0xce, 0x62, 0xbe, - 0x96, 0x4a, 0x1f, 0xb8, 0x6c, 0x5d, 0xa7, 0xfd, 0x1b, 0x16, 0x0c, 0x4b, 0xb2, 0xa3, 0x30, 0x8b, - 0xaf, 0xc1, 0x60, 0xc8, 0x06, 0x41, 0x76, 0x8d, 0xdd, 0xa9, 0xe1, 0x7c, 0xbc, 0x62, 0x61, 0x85, - 0xff, 0x0f, 0xb1, 0xe4, 0xc1, 0x6c, 0x5d, 0xaa, 0xf9, 0xef, 0x11, 0x5b, 0x97, 0x6a, 0x4f, 0xce, - 0xa1, 0xf4, 0x97, 0xac, 0xcd, 0x9a, 0xf2, 0x98, 0xca, 0xd4, 0xad, 0x80, 0x6c, 0xba, 0xb7, 0x93, - 0x32, 0x75, 0x85, 0x41, 0xb1, 0xc0, 0xa2, 0xb7, 0x61, 0xb4, 0x26, 0x8d, 0x1c, 0xf1, 0x0a, 0x3f, - 0xd7, 0xd1, 0xe0, 0xa6, 0x6c, 0xb3, 0x5c, 0x49, 0xb7, 0xa8, 0x95, 0xc7, 0x06, 0x37, 0xd3, 0xe5, - 0xaa, 0xd8, 0xcd, 0xe5, 0x2a, 0xe6, 0x9b, 0xef, 0x80, 0xf4, 0x93, 0x16, 0x0c, 0x70, 0xe5, 0x76, - 0x6f, 0xb6, 0x05, 0xcd, 0x54, 0x1d, 0xf7, 0xdd, 0x75, 0x0a, 0x14, 0x92, 0x06, 0x5a, 0x83, 0x61, - 0xf6, 0x83, 0x29, 0xe7, 0x8b, 0xf9, 0x0f, 0xb4, 0x78, 0xad, 0x7a, 0x03, 0xaf, 0xcb, 0x62, 0x38, - 0xe6, 0x60, 0xff, 0x58, 0x91, 0xee, 0x6e, 0x31, 0xa9, 0x71, 0xe8, 0x5b, 0x0f, 0xee, 0xd0, 0x2f, - 0x3c, 0xa8, 0x43, 0x7f, 0x0b, 0x26, 0x6a, 0x9a, 0x61, 0x3b, 0x1e, 0xc9, 0xf3, 0x1d, 0x27, 0x89, - 0x66, 0x03, 0xe7, 0xea, 0xbf, 0x45, 0x93, 0x09, 0x4e, 0x72, 0x45, 0x9f, 0x80, 0x51, 0x3e, 0xce, - 0xa2, 0x16, 0xee, 0xb5, 0xf6, 0x78, 0xfe, 0x7c, 0xd1, 0xab, 0xe0, 0xea, 0x62, 0xad, 0x38, 0x36, - 0x98, 0xd9, 0x7f, 0x6b, 0x01, 0x5a, 0x6e, 0x6d, 0x93, 0x26, 0x09, 0x9c, 0x46, 0x6c, 0x9f, 0xfa, - 0xa2, 0x05, 0x33, 0x24, 0x05, 0x5e, 0xf4, 0x9b, 0x4d, 0x71, 0x1b, 0xcd, 0x51, 0x98, 0x2c, 0xe7, - 0x94, 0x51, 0x4f, 0xc6, 0x66, 0xf2, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x0d, 0xa6, 0xf9, 0x29, 0xa9, - 0x10, 0x9a, 0x9b, 0xd8, 0xc3, 0x82, 0xf1, 0xf4, 0x46, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x63, - 0x0c, 0x72, 0x5b, 0xf1, 0xbe, 0x61, 0xee, 0x7d, 0xc3, 0xdc, 0xfb, 0x86, 0xb9, 0xf7, 0x0d, 0x73, - 0xef, 0x1b, 0xe6, 0xde, 0x37, 0xcc, 0xbd, 0x47, 0x0d, 0x73, 0xff, 0x9b, 0x05, 0xc7, 0xd5, 0xf1, - 0x65, 0x5c, 0xd8, 0x3f, 0x0b, 0xd3, 0x7c, 0xb9, 0x19, 0xde, 0xde, 0xe2, 0xb8, 0xbe, 0x98, 0x39, - 0x73, 0x13, 0xaf, 0x12, 0x8c, 0x82, 0xfc, 0x79, 0x57, 0x06, 0x02, 0x67, 0x55, 0x63, 0xff, 0xca, - 0x10, 0xf4, 0x2f, 0xef, 0x12, 0x2f, 0x3a, 0x82, 0xab, 0x4d, 0x0d, 0xc6, 0x5d, 0x6f, 0xd7, 0x6f, - 0xec, 0x92, 0x3a, 0xc7, 0x1f, 0xe6, 0x06, 0x7e, 0x42, 0xb0, 0x1e, 0x2f, 0x1b, 0x2c, 0x70, 0x82, - 0xe5, 0x83, 0x30, 0x6f, 0x5c, 0x82, 0x01, 0x7e, 0xf8, 0x08, 0xdb, 0x46, 0xe6, 0x9e, 0xcd, 0x3a, - 0x51, 0x1c, 0xa9, 0xb1, 0xe9, 0x85, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6f, 0xba, 0x41, - 0x18, 0x6d, 0xb8, 0x4d, 0x7a, 0x34, 0x34, 0x5b, 0xf7, 0x60, 0xce, 0x50, 0xfd, 0xb0, 0x62, 0x70, - 0xc2, 0x09, 0xce, 0x68, 0x0b, 0xc6, 0x1a, 0x8e, 0x5e, 0xd5, 0xe0, 0xa1, 0xab, 0x52, 0xa7, 0xc3, - 0xaa, 0xce, 0x08, 0x9b, 0x7c, 0xe9, 0x72, 0xaa, 0x31, 0x8d, 0xfc, 0x10, 0x53, 0x67, 0xa8, 0xe5, - 0xc4, 0x55, 0xf1, 0x1c, 0x47, 0x05, 0x34, 0xe6, 0x29, 0x3f, 0x6c, 0x0a, 0x68, 0x9a, 0x3f, 0xfc, - 0xa7, 0x61, 0x98, 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xa1, 0xb7, 0xb6, 0xae, 0xb9, 0xb5, - 0xc0, 0x37, 0x0d, 0x49, 0xcb, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x45, 0x18, 0x08, 0x49, 0xe0, 0x2a, - 0x65, 0x75, 0x87, 0x61, 0x64, 0x64, 0xfc, 0xfd, 0x20, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, - 0xa6, 0x8a, 0x65, 0x87, 0x81, 0x36, 0xbd, 0xe6, 0x19, 0x14, 0x0b, 0x2c, 0x7a, 0x03, 0x06, 0x03, - 0xd2, 0x60, 0x96, 0xca, 0xb1, 0xde, 0x27, 0x39, 0x37, 0x7c, 0xf2, 0x72, 0x58, 0x32, 0x40, 0x57, - 0x00, 0x05, 0x84, 0x0a, 0x78, 0xae, 0xb7, 0xa5, 0xfc, 0xc7, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, - 0x53, 0xc8, 0xa7, 0xa3, 0x38, 0xa3, 0x18, 0xba, 0x04, 0x53, 0x0a, 0x5a, 0xf6, 0xc2, 0xc8, 0xa1, - 0x1b, 0xdc, 0x04, 0xe3, 0xa5, 0xf4, 0x2b, 0x38, 0x49, 0x80, 0xd3, 0x65, 0xec, 0x9f, 0xb7, 0x80, - 0xf7, 0xf3, 0x11, 0x68, 0x15, 0x5e, 0x37, 0xb5, 0x0a, 0x27, 0x73, 0x47, 0x2e, 0x47, 0xa3, 0xf0, - 0xf3, 0x16, 0x8c, 0x68, 0x23, 0x1b, 0xcf, 0x59, 0xab, 0xc3, 0x9c, 0x6d, 0xc3, 0x24, 0x9d, 0xe9, - 0x57, 0x6f, 0x86, 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0xf7, 0x36, 0x31, 0x95, 0xaf, 0xea, - 0x6a, 0x82, 0x21, 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0xb9, 0xf6, 0xd6, 0xd4, 0x98, 0x27, - 0x5c, 0x7b, 0xd5, 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x6d, 0x3f, 0x8c, 0x92, 0xae, 0xbd, 0x97, - 0xfd, 0x30, 0xc2, 0x0c, 0x63, 0x3f, 0x0f, 0xb0, 0x7c, 0x9b, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, - 0x56, 0xfe, 0xa5, 0xc7, 0xfe, 0x23, 0x0b, 0xc6, 0x57, 0x16, 0x8d, 0x93, 0x6b, 0x0e, 0x80, 0xdf, - 0xd4, 0x6e, 0xdc, 0x58, 0x97, 0xfe, 0x25, 0xdc, 0xc4, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x27, 0xa1, - 0xd8, 0x68, 0x7b, 0x42, 0xed, 0x39, 0x48, 0x8f, 0xc7, 0xd5, 0xb6, 0x87, 0x29, 0x4c, 0x7b, 0x36, - 0x56, 0xec, 0xf9, 0xd9, 0x58, 0xd7, 0xe8, 0x35, 0xa8, 0x04, 0xfd, 0xb7, 0x6e, 0xb9, 0x75, 0xfe, - 0x28, 0x5f, 0xf8, 0xbe, 0xdc, 0xb8, 0x51, 0x5e, 0x0a, 0x31, 0x87, 0xdb, 0x5f, 0x2a, 0xc2, 0xec, - 0x4a, 0x83, 0xdc, 0x7e, 0x97, 0x81, 0x09, 0x7a, 0x7d, 0xf4, 0x76, 0x38, 0x05, 0xd2, 0x61, 0x1f, - 0x36, 0x76, 0xef, 0x8f, 0x4d, 0x18, 0xe4, 0x9e, 0xad, 0x32, 0x4c, 0x41, 0xa6, 0x3d, 0x31, 0xbf, - 0x43, 0xe6, 0xb8, 0x87, 0xac, 0xb0, 0x27, 0xaa, 0x03, 0x53, 0x40, 0xb1, 0x64, 0x3e, 0xfb, 0x0a, - 0x8c, 0xea, 0x94, 0x87, 0x7a, 0x62, 0xfc, 0xfd, 0x45, 0x98, 0xa4, 0x2d, 0x78, 0xa0, 0x03, 0x71, - 0x2d, 0x3d, 0x10, 0xf7, 0xfb, 0x99, 0x69, 0xf7, 0xd1, 0x78, 0x3b, 0x39, 0x1a, 0x17, 0xf3, 0x46, - 0xe3, 0xa8, 0xc7, 0xe0, 0x07, 0x2c, 0x98, 0x5e, 0x69, 0xf8, 0xb5, 0x9d, 0xc4, 0x53, 0xd0, 0x17, - 0x61, 0x84, 0x6e, 0xc7, 0xa1, 0x11, 0x15, 0xc5, 0x88, 0x93, 0x23, 0x50, 0x58, 0xa7, 0xd3, 0x8a, - 0x5d, 0xbb, 0x56, 0x5e, 0xca, 0x0a, 0xaf, 0x23, 0x50, 0x58, 0xa7, 0xb3, 0xff, 0xc0, 0x82, 0xd3, - 0x97, 0x16, 0x97, 0xe3, 0xa9, 0x98, 0x8a, 0xf0, 0x73, 0x0e, 0x06, 0x5a, 0x75, 0xad, 0x29, 0xb1, - 0x5a, 0x78, 0x89, 0xb5, 0x42, 0x60, 0xdf, 0x2b, 0xc1, 0xb4, 0xae, 0x01, 0x5c, 0xc2, 0x95, 0x45, - 0xb1, 0xef, 0x4a, 0x2b, 0x90, 0x95, 0x6b, 0x05, 0x7a, 0x1c, 0x06, 0xe9, 0xb9, 0xe0, 0xd6, 0x64, - 0xbb, 0xb9, 0xc7, 0x00, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xce, 0x82, 0xe9, 0x4b, 0x6e, 0x44, 0x0f, - 0xed, 0x64, 0x08, 0x1b, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0x10, 0x36, 0x58, 0x61, - 0xb0, 0x46, 0xc5, 0x3f, 0x68, 0xd7, 0x65, 0x4f, 0x35, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, - 0x14, 0xb4, 0xbf, 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0xb5, 0x24, 0x11, - 0x38, 0xa6, 0xb1, 0xff, 0xda, 0x82, 0xd2, 0x25, 0xfe, 0xe0, 0x74, 0x33, 0xcc, 0xd9, 0x74, 0x9f, - 0x87, 0x61, 0x22, 0x0d, 0x04, 0xf2, 0xf1, 0xad, 0x14, 0x44, 0x95, 0xe5, 0x80, 0x47, 0xd2, 0x51, - 0x74, 0x3d, 0xbc, 0x57, 0x3f, 0xdc, 0x83, 0xe3, 0x15, 0x40, 0x44, 0xaf, 0x4b, 0x0f, 0x2d, 0xc4, - 0x62, 0x94, 0x2c, 0xa7, 0xb0, 0x38, 0xa3, 0x84, 0xfd, 0x13, 0x16, 0x1c, 0x57, 0x1f, 0xfc, 0x9e, - 0xfb, 0x4c, 0xfb, 0x6b, 0x05, 0x18, 0xbb, 0xbc, 0xb1, 0x51, 0xb9, 0x44, 0x22, 0x6d, 0x56, 0x76, - 0x36, 0xfb, 0x63, 0xcd, 0x7a, 0xd9, 0xe9, 0x8e, 0xd8, 0x8e, 0xdc, 0xc6, 0x1c, 0x0f, 0x98, 0x37, - 0x57, 0xf6, 0xa2, 0xab, 0x41, 0x35, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa6, 0x4b, 0x99, 0xa5, 0x98, - 0x27, 0xb3, 0xa0, 0xe7, 0x61, 0x80, 0x45, 0xec, 0x93, 0x83, 0xf0, 0xb0, 0xba, 0x62, 0x31, 0xe8, - 0xc1, 0x7e, 0x69, 0xf8, 0x1a, 0x2e, 0xf3, 0x3f, 0x58, 0x90, 0xa2, 0x6b, 0x30, 0xb2, 0x1d, 0x45, - 0xad, 0xcb, 0xc4, 0xa9, 0x93, 0x40, 0xee, 0xb2, 0x67, 0xb2, 0x76, 0x59, 0xda, 0x09, 0x9c, 0x2c, - 0xde, 0x98, 0x62, 0x58, 0x88, 0x75, 0x3e, 0x76, 0x15, 0x20, 0xc6, 0xdd, 0x27, 0xc3, 0x8d, 0xbd, - 0x01, 0xc3, 0xf4, 0x73, 0xe7, 0x1b, 0xae, 0xd3, 0xd9, 0x34, 0xfe, 0x34, 0x0c, 0x4b, 0xc3, 0x77, - 0x28, 0xe2, 0x69, 0xb0, 0x13, 0x49, 0xda, 0xc5, 0x43, 0x1c, 0xe3, 0xed, 0xc7, 0x40, 0x38, 0xaf, - 0x76, 0x62, 0x69, 0x6f, 0xc2, 0x31, 0xe6, 0x85, 0xeb, 0x44, 0xdb, 0xc6, 0x1c, 0xed, 0x3e, 0x19, - 0x9e, 0x11, 0xf7, 0x3a, 0xfe, 0x65, 0x33, 0xda, 0xeb, 0xe7, 0x51, 0xc9, 0x31, 0xbe, 0xe3, 0xd9, - 0x7f, 0xd5, 0x07, 0x0f, 0x97, 0xab, 0xf9, 0x81, 0xa0, 0x5e, 0x86, 0x51, 0x2e, 0x2e, 0xd2, 0xa9, - 0xe1, 0x34, 0x44, 0xbd, 0x4a, 0x03, 0xba, 0xa1, 0xe1, 0xb0, 0x41, 0x89, 0x4e, 0x43, 0xd1, 0x7d, - 0xc7, 0x4b, 0xbe, 0x0d, 0x2c, 0xbf, 0xb9, 0x8e, 0x29, 0x9c, 0xa2, 0xa9, 0xe4, 0xc9, 0xb7, 0x74, - 0x85, 0x56, 0xd2, 0xe7, 0xeb, 0x30, 0xee, 0x86, 0xb5, 0xd0, 0x2d, 0x7b, 0x74, 0x9d, 0x6a, 0x2b, - 0x5d, 0xe9, 0x1c, 0x68, 0xa3, 0x15, 0x16, 0x27, 0xa8, 0xb5, 0xf3, 0xa5, 0xbf, 0x67, 0xe9, 0xb5, - 0x6b, 0x18, 0x0a, 0xba, 0xfd, 0xb7, 0xd8, 0xd7, 0x85, 0x4c, 0x05, 0x2f, 0xb6, 0x7f, 0xfe, 0xc1, - 0x21, 0x96, 0x38, 0x7a, 0xa1, 0xab, 0x6d, 0x3b, 0xad, 0xf9, 0x76, 0xb4, 0xbd, 0xe4, 0x86, 0x35, - 0x7f, 0x97, 0x04, 0x7b, 0xec, 0x2e, 0x3e, 0x14, 0x5f, 0xe8, 0x14, 0x62, 0xf1, 0xf2, 0x7c, 0x85, - 0x52, 0xe2, 0x74, 0x19, 0x34, 0x0f, 0x13, 0x12, 0x58, 0x25, 0x21, 0x3b, 0x02, 0x46, 0x18, 0x1b, - 0xf5, 0x5a, 0x4f, 0x80, 0x15, 0x93, 0x24, 0xbd, 0x29, 0xe0, 0xc2, 0xfd, 0x10, 0x70, 0x5f, 0x82, - 0x31, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x3f, 0xe2, 0xd7, 0x6e, 0xa6, 0x60, 0x2e, 0xeb, - 0x08, 0x6c, 0xd2, 0xd9, 0xff, 0xa1, 0x0f, 0xa6, 0xd8, 0xb0, 0xbd, 0x3f, 0xc3, 0xbe, 0x93, 0x66, - 0xd8, 0xb5, 0xf4, 0x0c, 0xbb, 0x1f, 0x92, 0xfb, 0x3d, 0x4f, 0xb3, 0xcf, 0xc0, 0xb0, 0x7a, 0xa0, - 0x28, 0x5f, 0x28, 0x5b, 0x39, 0x2f, 0x94, 0xbb, 0x9f, 0xde, 0xd2, 0x25, 0xad, 0x98, 0xe9, 0x92, - 0xf6, 0x15, 0x0b, 0x62, 0xc3, 0x02, 0x7a, 0x13, 0x86, 0x5b, 0x3e, 0x73, 0xa1, 0x0d, 0xa4, 0x5f, - 0xfa, 0x63, 0x1d, 0x2d, 0x13, 0x3c, 0x16, 0x5e, 0xc0, 0x7b, 0xa1, 0x22, 0x8b, 0xe2, 0x98, 0x0b, - 0xba, 0x02, 0x83, 0xad, 0x80, 0x54, 0x23, 0x16, 0xa8, 0xa9, 0x77, 0x86, 0x7c, 0xd6, 0xf0, 0x82, - 0x58, 0x72, 0xb0, 0x7f, 0xb1, 0x00, 0x93, 0x49, 0x52, 0xf4, 0x1a, 0xf4, 0x91, 0xdb, 0xa4, 0x26, - 0xda, 0x9b, 0x79, 0x14, 0xc7, 0xaa, 0x09, 0xde, 0x01, 0xf4, 0x3f, 0x66, 0xa5, 0xd0, 0x65, 0x18, - 0xa4, 0xe7, 0xf0, 0x25, 0x15, 0x94, 0xf0, 0x91, 0xbc, 0xb3, 0x5c, 0x09, 0x34, 0xbc, 0x71, 0x02, - 0x84, 0x65, 0x71, 0xe6, 0x07, 0x56, 0x6b, 0x55, 0xe9, 0x15, 0x27, 0xea, 0x74, 0x13, 0xdf, 0x58, - 0xac, 0x70, 0x22, 0xc1, 0x8d, 0xfb, 0x81, 0x49, 0x20, 0x8e, 0x99, 0xa0, 0x8f, 0x42, 0x7f, 0xd8, - 0x20, 0xa4, 0x25, 0x0c, 0xfd, 0x99, 0xca, 0xc5, 0x2a, 0x25, 0x10, 0x9c, 0x98, 0x32, 0x82, 0x01, - 0x30, 0x2f, 0x68, 0xff, 0x92, 0x05, 0xc0, 0x1d, 0xe7, 0x1c, 0x6f, 0x8b, 0x1c, 0x81, 0x3e, 0x7e, - 0x09, 0xfa, 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x0f, 0x8f, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0x67, - 0xe9, 0x3f, 0xcc, 0x4a, 0xdb, 0x3f, 0x08, 0x30, 0x1e, 0x93, 0x95, 0x23, 0xd2, 0x44, 0xcf, 0x1a, - 0x71, 0x51, 0x4e, 0x26, 0xe2, 0xa2, 0x0c, 0x33, 0x6a, 0x4d, 0xf5, 0xfb, 0x19, 0x28, 0x36, 0x9d, - 0xdb, 0x42, 0xb7, 0xf7, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0x73, 0x6b, 0xce, 0x6d, 0x7e, 0xfd, 0x7d, - 0x5a, 0xae, 0xb1, 0x35, 0xe7, 0x76, 0x57, 0x1f, 0x66, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x4f, - 0x58, 0x4f, 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x60, 0x50, - 0xb8, 0x6c, 0x8a, 0x10, 0x73, 0x17, 0x7a, 0xa8, 0x4f, 0x78, 0x7c, 0xf2, 0x3a, 0x2f, 0xc8, 0xeb, - 0xbd, 0x80, 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0x77, 0x0b, 0xc6, 0xc5, 0x6f, 0x4c, 0xde, 0x69, - 0x93, 0x30, 0x12, 0xe2, 0xef, 0x87, 0x7a, 0x6f, 0x83, 0x28, 0xc8, 0x9b, 0xf2, 0x21, 0x79, 0x52, - 0x99, 0xc8, 0xae, 0x2d, 0x4a, 0xb4, 0x02, 0xfd, 0xa2, 0x05, 0xc7, 0x9a, 0xce, 0x6d, 0x5e, 0x23, - 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xfa, 0xf0, 0x5a, 0x6f, 0xc3, 0x9f, 0x2a, 0xce, 0x1b, 0x29, - 0xed, 0x9c, 0xc7, 0xb2, 0x48, 0xba, 0x36, 0x35, 0xb3, 0x5d, 0xb3, 0x9b, 0x30, 0x24, 0xe7, 0xdb, - 0x83, 0xf4, 0x0f, 0x67, 0xf5, 0x88, 0xb9, 0xf6, 0x40, 0xeb, 0xf9, 0x0c, 0x8c, 0xea, 0x73, 0xec, - 0x81, 0xd6, 0xf5, 0x0e, 0x4c, 0x67, 0xcc, 0xa5, 0x07, 0x5a, 0xe5, 0x2d, 0x38, 0x99, 0x3b, 0x3f, - 0x1e, 0xa8, 0x7f, 0xff, 0xd7, 0x2c, 0x7d, 0x1f, 0x3c, 0x02, 0xa3, 0xc8, 0xa2, 0x69, 0x14, 0x39, - 0xd3, 0x79, 0xe5, 0xe4, 0x58, 0x46, 0xde, 0xd6, 0x1b, 0x4d, 0x77, 0x75, 0xf4, 0x06, 0x0c, 0x34, - 0x28, 0x44, 0x3a, 0xfe, 0xda, 0xdd, 0x57, 0x64, 0x2c, 0x8e, 0x32, 0x78, 0x88, 0x05, 0x07, 0xfb, - 0x57, 0x2d, 0xe8, 0x3b, 0x82, 0x9e, 0xc0, 0x66, 0x4f, 0x3c, 0x9b, 0xcb, 0x5a, 0x04, 0xff, 0x9f, - 0xc3, 0xce, 0xad, 0xe5, 0xdb, 0x11, 0xf1, 0x42, 0x76, 0xa6, 0x67, 0x76, 0xcc, 0xbe, 0x05, 0xd3, - 0xab, 0xbe, 0x53, 0x5f, 0x70, 0x1a, 0x8e, 0x57, 0x23, 0x41, 0xd9, 0xdb, 0x3a, 0x94, 0xd7, 0x7a, - 0xa1, 0xab, 0xd7, 0xfa, 0xcb, 0x30, 0xe0, 0xb6, 0xb4, 0xe8, 0xe1, 0x67, 0x69, 0x07, 0x96, 0x2b, - 0x22, 0x70, 0x38, 0x32, 0x2a, 0x67, 0x50, 0x2c, 0xe8, 0xe9, 0xc8, 0x73, 0x77, 0xb1, 0xbe, 0xfc, - 0x91, 0xa7, 0x52, 0x7c, 0x32, 0xc6, 0x94, 0xe1, 0xd8, 0xbc, 0x0d, 0x46, 0x15, 0xe2, 0x59, 0x19, - 0x86, 0x41, 0x97, 0x7f, 0xa9, 0x18, 0xfe, 0x27, 0xb2, 0xa5, 0xeb, 0x54, 0xc7, 0x68, 0x0f, 0xa6, - 0x38, 0x00, 0x4b, 0x46, 0xf6, 0xcb, 0x90, 0x19, 0x13, 0xa4, 0xbb, 0xe6, 0xc4, 0xfe, 0x38, 0x4c, - 0xb1, 0x92, 0x87, 0xd4, 0x4a, 0xd8, 0x09, 0x7d, 0x6f, 0x46, 0x20, 0x58, 0xfb, 0xdf, 0x5a, 0x80, - 0xd6, 0xfc, 0xba, 0xbb, 0xb9, 0x27, 0x98, 0xf3, 0xef, 0x7f, 0x07, 0x4a, 0xfc, 0xda, 0x97, 0x0c, - 0x96, 0xba, 0xd8, 0x70, 0xc2, 0x50, 0xd3, 0x35, 0x3f, 0x21, 0xea, 0x2d, 0x6d, 0x74, 0x26, 0xc7, - 0xdd, 0xf8, 0xa1, 0x37, 0x13, 0x91, 0xe0, 0x3e, 0x9c, 0x8a, 0x04, 0xf7, 0x44, 0xa6, 0xc7, 0x47, - 0xba, 0xf5, 0x32, 0x42, 0x9c, 0xfd, 0x05, 0x0b, 0x26, 0xd6, 0x13, 0xc1, 0x3f, 0xcf, 0x31, 0xf3, - 0x77, 0x86, 0x0d, 0xa5, 0xca, 0xa0, 0x58, 0x60, 0xef, 0xbb, 0x8e, 0xf1, 0x1f, 0x2c, 0x88, 0x63, - 0x10, 0x1d, 0x81, 0x54, 0xbb, 0x68, 0x48, 0xb5, 0x99, 0x37, 0x04, 0xd5, 0x9c, 0x3c, 0xa1, 0x16, - 0x5d, 0x51, 0x63, 0xd2, 0xe1, 0x72, 0x10, 0xb3, 0xe1, 0xeb, 0x6c, 0xdc, 0x1c, 0x38, 0x35, 0x1a, - 0x7f, 0x52, 0x00, 0xa4, 0x68, 0x7b, 0x8e, 0x1e, 0x98, 0x2e, 0x71, 0x7f, 0xa2, 0x07, 0xee, 0x02, - 0x62, 0x0e, 0x1c, 0x81, 0xe3, 0x85, 0x9c, 0xad, 0x2b, 0xb4, 0xaa, 0x87, 0xf3, 0x0e, 0x99, 0x95, - 0xcf, 0x09, 0x57, 0x53, 0xdc, 0x70, 0x46, 0x0d, 0x9a, 0x63, 0x4e, 0x7f, 0xaf, 0x8e, 0x39, 0x03, - 0x5d, 0xde, 0xc5, 0x7e, 0xd5, 0x82, 0x31, 0xd5, 0x4d, 0xef, 0x91, 0xc7, 0x0d, 0xaa, 0x3d, 0x39, - 0xe7, 0x4a, 0x45, 0x6b, 0x32, 0x3b, 0x6f, 0xbf, 0x8b, 0xbd, 0x6f, 0x76, 0x1a, 0xee, 0x1d, 0xa2, - 0xc2, 0xf2, 0x96, 0xc4, 0x7b, 0x65, 0x01, 0x3d, 0xd8, 0x2f, 0x8d, 0xa9, 0x7f, 0x3c, 0xac, 0x66, - 0x5c, 0xc4, 0xfe, 0x69, 0xba, 0xd8, 0xcd, 0xa9, 0x88, 0x5e, 0x84, 0xfe, 0xd6, 0xb6, 0x13, 0x92, - 0xc4, 0x23, 0xb0, 0xfe, 0x0a, 0x05, 0x1e, 0xec, 0x97, 0xc6, 0x55, 0x01, 0x06, 0xc1, 0x9c, 0xba, - 0xf7, 0x98, 0x8c, 0xe9, 0xc9, 0xd9, 0x35, 0x26, 0xe3, 0xdf, 0x5a, 0xd0, 0xb7, 0x4e, 0x4f, 0xaf, - 0x07, 0xbf, 0x05, 0xbc, 0x6e, 0x6c, 0x01, 0xa7, 0xf2, 0x12, 0xd4, 0xe4, 0xae, 0xfe, 0x95, 0xc4, - 0xea, 0x3f, 0x93, 0xcb, 0xa1, 0xf3, 0xc2, 0x6f, 0xc2, 0x08, 0x4b, 0x7b, 0x23, 0x1e, 0xbc, 0x3d, - 0x6f, 0x2c, 0xf8, 0x52, 0x62, 0xc1, 0x4f, 0x68, 0xa4, 0xda, 0x4a, 0x7f, 0x12, 0x06, 0xc5, 0x0b, - 0xaa, 0xe4, 0x33, 0x71, 0x41, 0x8b, 0x25, 0xde, 0xfe, 0xc9, 0x22, 0x18, 0x69, 0x76, 0xd0, 0x6f, - 0x58, 0x30, 0x17, 0x70, 0xcf, 0xea, 0xfa, 0x52, 0x3b, 0x70, 0xbd, 0xad, 0x6a, 0x6d, 0x9b, 0xd4, - 0xdb, 0x0d, 0xd7, 0xdb, 0x2a, 0x6f, 0x79, 0xbe, 0x02, 0x2f, 0xdf, 0x26, 0xb5, 0x36, 0xb3, 0x7a, - 0x76, 0xc9, 0xe9, 0xa3, 0x5e, 0x28, 0x3c, 0x77, 0x77, 0xbf, 0x34, 0x87, 0x0f, 0xc5, 0x1b, 0x1f, - 0xb2, 0x2d, 0xe8, 0x0f, 0x2c, 0xb8, 0xc0, 0xd3, 0xbd, 0xf4, 0xde, 0xfe, 0x0e, 0x4a, 0x84, 0x8a, - 0x64, 0x15, 0x33, 0xd9, 0x20, 0x41, 0x73, 0xe1, 0x25, 0xd1, 0xa1, 0x17, 0x2a, 0x87, 0xab, 0x0b, - 0x1f, 0xb6, 0x71, 0xf6, 0x3f, 0x2d, 0xc2, 0x98, 0x88, 0xdd, 0x27, 0xce, 0x80, 0x17, 0x8d, 0x29, - 0xf1, 0x48, 0x62, 0x4a, 0x4c, 0x19, 0xc4, 0xf7, 0x67, 0xfb, 0x0f, 0x61, 0x8a, 0x6e, 0xce, 0x97, - 0x89, 0x13, 0x44, 0x37, 0x89, 0xc3, 0xfd, 0xed, 0x8a, 0x87, 0xde, 0xfd, 0x95, 0xe2, 0x77, 0x35, - 0xc9, 0x0c, 0xa7, 0xf9, 0x7f, 0x27, 0x9d, 0x39, 0x1e, 0x4c, 0xa6, 0xc2, 0x2f, 0xbe, 0x05, 0xc3, - 0xea, 0xf9, 0x8f, 0xd8, 0x74, 0x3a, 0x47, 0x31, 0x4d, 0x72, 0xe0, 0x7a, 0xc5, 0xf8, 0xe9, 0x59, - 0xcc, 0xce, 0xfe, 0x47, 0x05, 0xa3, 0x42, 0x3e, 0x88, 0xeb, 0x30, 0xe4, 0x84, 0x2c, 0xb2, 0x72, - 0xbd, 0x93, 0xea, 0x37, 0x55, 0x0d, 0x7b, 0x82, 0x35, 0x2f, 0x4a, 0x62, 0xc5, 0x03, 0x5d, 0xe6, - 0x5e, 0x8d, 0xbb, 0xa4, 0x93, 0xde, 0x37, 0xc5, 0x0d, 0xa4, 0xdf, 0xe3, 0x2e, 0xc1, 0xa2, 0x3c, - 0xfa, 0x24, 0x77, 0x3b, 0xbd, 0xe2, 0xf9, 0xb7, 0xbc, 0x4b, 0xbe, 0x2f, 0xe3, 0xb4, 0xf4, 0xc6, - 0x70, 0x4a, 0x3a, 0x9b, 0xaa, 0xe2, 0xd8, 0xe4, 0xd6, 0x5b, 0x3c, 0xe3, 0xcf, 0x02, 0x4b, 0x6f, - 0x61, 0xbe, 0xb6, 0x0f, 0x11, 0x81, 0x09, 0x11, 0x18, 0x52, 0xc2, 0x44, 0xdf, 0x65, 0xde, 0x70, - 0xcd, 0xd2, 0xb1, 0x85, 0xe2, 0x8a, 0xc9, 0x02, 0x27, 0x79, 0xda, 0x3f, 0x6b, 0x01, 0x7b, 0x79, - 0x7c, 0x04, 0xf2, 0xc8, 0x47, 0x4c, 0x79, 0x64, 0x26, 0xaf, 0x93, 0x73, 0x44, 0x91, 0x17, 0xf8, - 0xcc, 0xaa, 0x04, 0xfe, 0xed, 0x3d, 0xe1, 0x2b, 0xd4, 0xfd, 0x72, 0x65, 0x7f, 0xc9, 0x02, 0x96, - 0xa1, 0x05, 0xf3, 0xbb, 0xb4, 0xd4, 0xec, 0x77, 0x37, 0x83, 0x7f, 0x0c, 0x86, 0x36, 0x89, 0x13, - 0xb5, 0x03, 0x11, 0x67, 0xca, 0xec, 0x0b, 0xa3, 0xc1, 0x26, 0xef, 0x15, 0x51, 0x4a, 0xbc, 0x20, - 0x14, 0xff, 0xb0, 0xe2, 0x66, 0x87, 0x30, 0x9b, 0x5f, 0x0a, 0x5d, 0x83, 0x87, 0x02, 0x52, 0x6b, - 0x07, 0x21, 0x9d, 0xa7, 0xe2, 0x56, 0x22, 0xde, 0xe0, 0x58, 0xec, 0xf6, 0xf2, 0xf0, 0xdd, 0xfd, - 0xd2, 0x43, 0x38, 0x9b, 0x04, 0xe7, 0x95, 0xb5, 0xbf, 0x87, 0x1f, 0xb6, 0x2a, 0x34, 0x6e, 0x13, - 0xa6, 0x3c, 0xed, 0x3f, 0x3d, 0x5a, 0xe4, 0x1d, 0xfa, 0xb1, 0x6e, 0xc7, 0x29, 0x3b, 0x87, 0xb4, - 0xe7, 0xcd, 0x09, 0x36, 0x38, 0xcd, 0xd9, 0xfe, 0x29, 0x0b, 0x1e, 0xd2, 0x09, 0xb5, 0x17, 0x54, - 0xdd, 0xec, 0x50, 0x4b, 0x30, 0xe4, 0xb7, 0x48, 0xe0, 0x44, 0x7e, 0x20, 0xce, 0x8f, 0xf3, 0x72, - 0x92, 0x5d, 0x15, 0xf0, 0x03, 0x91, 0x5c, 0x44, 0x72, 0x97, 0x70, 0xac, 0x4a, 0xd2, 0x4b, 0x36, - 0x53, 0x7e, 0x85, 0xe2, 0xad, 0x1c, 0xdb, 0x0d, 0x98, 0x4b, 0x43, 0x88, 0x05, 0xc6, 0xfe, 0x2b, - 0x8b, 0x4f, 0x31, 0xbd, 0xe9, 0xe8, 0x1d, 0x98, 0x6c, 0x3a, 0x51, 0x6d, 0x7b, 0xf9, 0x76, 0x2b, - 0xe0, 0x56, 0x3d, 0xd9, 0x4f, 0x4f, 0x77, 0xeb, 0x27, 0xed, 0x23, 0x63, 0x9f, 0xda, 0xb5, 0x04, - 0x33, 0x9c, 0x62, 0x8f, 0x6e, 0xc2, 0x08, 0x83, 0xb1, 0x67, 0xa0, 0x61, 0x27, 0x21, 0x21, 0xaf, - 0x36, 0xe5, 0x15, 0xb2, 0x16, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0x2b, 0x45, 0xbe, 0xee, 0x99, 0x50, - 0xff, 0x24, 0x0c, 0xb6, 0xfc, 0xfa, 0x62, 0x79, 0x09, 0x8b, 0x51, 0x50, 0x07, 0x4a, 0x85, 0x83, - 0xb1, 0xc4, 0xa3, 0xf3, 0x30, 0x24, 0x7e, 0x4a, 0x2b, 0x2c, 0x9b, 0xe6, 0x82, 0x2e, 0xc4, 0x0a, - 0x8b, 0x9e, 0x03, 0x68, 0x05, 0xfe, 0xae, 0x5b, 0x67, 0x71, 0x67, 0x8a, 0xa6, 0x43, 0x57, 0x45, - 0x61, 0xb0, 0x46, 0x85, 0x5e, 0x85, 0xb1, 0xb6, 0x17, 0x72, 0xc1, 0x44, 0x8b, 0xee, 0xad, 0x5c, - 0x8d, 0xae, 0xe9, 0x48, 0x6c, 0xd2, 0xa2, 0x79, 0x18, 0x88, 0x1c, 0xe6, 0xa0, 0xd4, 0x9f, 0xef, - 0x77, 0xbd, 0x41, 0x29, 0xf4, 0xcc, 0x5f, 0xb4, 0x00, 0x16, 0x05, 0xd1, 0x5b, 0xf2, 0x45, 0x36, - 0xdf, 0xe2, 0xc5, 0x83, 0x87, 0xde, 0x8e, 0x03, 0xed, 0x3d, 0xb6, 0x78, 0x48, 0x61, 0xf0, 0x42, - 0xaf, 0x00, 0x90, 0xdb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xdc, 0x0a, 0x95, 0x84, 0xb0, 0xe4, 0xaf, - 0xfb, 0xd1, 0xb5, 0x90, 0x2c, 0x2b, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x06, 0x00, 0xb1, 0x04, 0x8f, - 0xee, 0xc0, 0x50, 0xcd, 0x69, 0x39, 0x35, 0x9e, 0xd6, 0xb2, 0x98, 0xf7, 0x50, 0x36, 0x2e, 0x31, - 0xb7, 0x28, 0xc8, 0xb9, 0xe1, 0x41, 0x06, 0x48, 0x1e, 0x92, 0xe0, 0xae, 0xc6, 0x06, 0x55, 0x1f, - 0xfa, 0xbc, 0x05, 0x23, 0x22, 0xbc, 0x0e, 0x1b, 0xa1, 0x42, 0xbe, 0xad, 0x48, 0xab, 0x7f, 0x3e, - 0x2e, 0xc1, 0x9b, 0xf0, 0xbc, 0x9c, 0xa1, 0x1a, 0xa6, 0x6b, 0x2b, 0xf4, 0x8a, 0xd1, 0x07, 0xe5, - 0xa5, 0xb1, 0x68, 0x74, 0xa5, 0xba, 0x34, 0x0e, 0xb3, 0xd3, 0x42, 0xbf, 0x2f, 0x5e, 0x33, 0xee, - 0x8b, 0x7d, 0xf9, 0x4f, 0x4e, 0x0d, 0x41, 0xb6, 0xdb, 0x55, 0x11, 0x55, 0xf4, 0xf0, 0x13, 0xfd, - 0xf9, 0xef, 0x24, 0xb5, 0x1b, 0x53, 0x97, 0xd0, 0x13, 0x9f, 0x81, 0x89, 0xba, 0x29, 0x0e, 0x88, - 0x99, 0xf8, 0x44, 0x1e, 0xdf, 0x84, 0xf4, 0x10, 0x0b, 0x00, 0x09, 0x04, 0x4e, 0x32, 0x46, 0x15, - 0x1e, 0x8d, 0xa4, 0xec, 0x6d, 0xfa, 0xe2, 0xd1, 0x8d, 0x9d, 0x3b, 0x96, 0x7b, 0x61, 0x44, 0x9a, - 0x94, 0x32, 0x3e, 0xe7, 0xd7, 0x45, 0x59, 0xac, 0xb8, 0xa0, 0x37, 0x60, 0x80, 0x3d, 0x94, 0x0b, - 0x67, 0x86, 0xf2, 0x55, 0xf2, 0x66, 0xdc, 0xc7, 0x78, 0x41, 0xb2, 0xbf, 0x21, 0x16, 0x1c, 0xd0, - 0x65, 0xf9, 0x0c, 0x35, 0x2c, 0x7b, 0xd7, 0x42, 0xc2, 0x9e, 0xa1, 0x0e, 0x2f, 0x3c, 0x16, 0xbf, - 0x30, 0xe5, 0xf0, 0xcc, 0xfc, 0xa0, 0x46, 0x49, 0x2a, 0x4f, 0x89, 0xff, 0x32, 0xed, 0xa8, 0x88, - 0x52, 0x95, 0xd9, 0x3c, 0x33, 0x35, 0x69, 0xdc, 0x9d, 0xd7, 0x4d, 0x16, 0x38, 0xc9, 0x93, 0xca, - 0xa6, 0x7c, 0xd5, 0x8b, 0x67, 0x3b, 0xdd, 0xf6, 0x0e, 0x7e, 0x25, 0x67, 0xa7, 0x11, 0x87, 0x60, - 0x51, 0x1e, 0xb9, 0x30, 0x11, 0x18, 0x22, 0x82, 0x0c, 0x2e, 0x75, 0xae, 0x37, 0x39, 0x44, 0x0b, - 0x5b, 0x6e, 0xb2, 0xc1, 0x49, 0xbe, 0xb3, 0x3b, 0x30, 0x66, 0x6c, 0x10, 0x0f, 0xd4, 0xe4, 0xe5, - 0xc1, 0x64, 0x72, 0x37, 0x78, 0xa0, 0x96, 0xae, 0xbf, 0xe8, 0x83, 0x71, 0x73, 0xf6, 0xa2, 0x0b, - 0x30, 0x2c, 0x98, 0xa8, 0x2c, 0x41, 0x6a, 0x41, 0xae, 0x49, 0x04, 0x8e, 0x69, 0x58, 0x72, 0x28, - 0x56, 0x5c, 0x73, 0x09, 0x8f, 0x93, 0x43, 0x29, 0x0c, 0xd6, 0xa8, 0xe8, 0x6d, 0xee, 0xa6, 0xef, - 0x47, 0xea, 0xec, 0x53, 0x53, 0x7c, 0x81, 0x41, 0xb1, 0xc0, 0xd2, 0x33, 0x6f, 0x87, 0x04, 0x1e, - 0x69, 0x98, 0x41, 0xe7, 0xd5, 0x99, 0x77, 0x45, 0x47, 0x62, 0x93, 0x96, 0x9e, 0xdc, 0x7e, 0xc8, - 0xd6, 0x8c, 0xb8, 0x33, 0xc6, 0x2e, 0xf6, 0x55, 0x1e, 0x2c, 0x40, 0xe2, 0xd1, 0xc7, 0xe1, 0x21, - 0x15, 0xe0, 0x4d, 0xcc, 0x08, 0x59, 0xe3, 0x80, 0xa1, 0xe2, 0x79, 0x68, 0x31, 0x9b, 0x0c, 0xe7, - 0x95, 0x47, 0xaf, 0xc3, 0xb8, 0xb8, 0x57, 0x48, 0x8e, 0x83, 0xa6, 0xbf, 0xd8, 0x15, 0x03, 0x8b, - 0x13, 0xd4, 0x32, 0x6c, 0x3e, 0x13, 0xed, 0x25, 0x87, 0xa1, 0x74, 0xd8, 0x7c, 0x1d, 0x8f, 0x53, - 0x25, 0xd0, 0x3c, 0x4c, 0x70, 0x71, 0xcf, 0xf5, 0xb6, 0xf8, 0x98, 0x88, 0x07, 0x7c, 0x6a, 0x21, - 0x5c, 0x35, 0xd1, 0x38, 0x49, 0x8f, 0x5e, 0x86, 0x51, 0x27, 0xa8, 0x6d, 0xbb, 0x11, 0xa9, 0x51, - 0x69, 0x9c, 0xb9, 0x6c, 0x69, 0x0e, 0x77, 0xf3, 0x1a, 0x0e, 0x1b, 0x94, 0xf6, 0x1d, 0x98, 0xce, - 0x88, 0x22, 0x42, 0x27, 0x8e, 0xd3, 0x72, 0xe5, 0x37, 0x25, 0xbc, 0xda, 0xe7, 0x2b, 0x65, 0xf9, - 0x35, 0x1a, 0x15, 0x9d, 0x9d, 0x2c, 0xda, 0x88, 0x96, 0xd0, 0x58, 0xcd, 0xce, 0x15, 0x89, 0xc0, - 0x31, 0x8d, 0xfd, 0x77, 0x05, 0x98, 0xc8, 0xb0, 0x56, 0xb1, 0xa4, 0xba, 0x89, 0x0b, 0x4e, 0x9c, - 0x43, 0xd7, 0xcc, 0xc2, 0x50, 0x38, 0x44, 0x16, 0x86, 0x62, 0xb7, 0x2c, 0x0c, 0x7d, 0xef, 0x26, - 0x0b, 0x83, 0xd9, 0x63, 0xfd, 0x3d, 0xf5, 0x58, 0x46, 0xe6, 0x86, 0x81, 0x43, 0x66, 0x6e, 0x30, - 0x3a, 0x7d, 0xb0, 0x87, 0x4e, 0xff, 0xb1, 0x02, 0x4c, 0x26, 0x0d, 0x5d, 0x47, 0xa0, 0x2c, 0x7e, - 0xc3, 0x50, 0x16, 0x9f, 0xef, 0xe5, 0xc1, 0x75, 0xae, 0xe2, 0x18, 0x27, 0x14, 0xc7, 0x4f, 0xf5, - 0xc4, 0xad, 0xb3, 0x12, 0xf9, 0xff, 0x2e, 0xc0, 0xf1, 0x4c, 0xfb, 0xdf, 0x11, 0xf4, 0xcd, 0x55, - 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x31, 0x7a, 0x6e, 0x07, 0xdd, 0x48, 0x74, 0xd0, 0x85, 0xde, 0x59, - 0x76, 0xee, 0xa5, 0x6f, 0x14, 0xe1, 0x4c, 0x66, 0xb9, 0x58, 0xd7, 0xba, 0x62, 0xe8, 0x5a, 0x9f, - 0x4b, 0xe8, 0x5a, 0xed, 0xce, 0xa5, 0xef, 0x8f, 0xf2, 0x55, 0x3c, 0xca, 0x66, 0xa1, 0x25, 0xee, - 0x51, 0xf1, 0x6a, 0x3c, 0xca, 0x56, 0x8c, 0xb0, 0xc9, 0xf7, 0x3b, 0x49, 0xe1, 0xfa, 0xfb, 0x16, - 0x9c, 0xcc, 0x1c, 0x9b, 0x23, 0x50, 0xb0, 0xad, 0x9b, 0x0a, 0xb6, 0x27, 0x7b, 0x9e, 0xad, 0x39, - 0x1a, 0xb7, 0x2f, 0x0c, 0xe4, 0x7c, 0x0b, 0x53, 0x1a, 0x5c, 0x85, 0x11, 0xa7, 0x56, 0x23, 0x61, - 0xb8, 0xe6, 0xd7, 0x55, 0xc0, 0xf6, 0x67, 0xd9, 0x95, 0x2e, 0x06, 0x1f, 0xec, 0x97, 0x66, 0x93, - 0x2c, 0x62, 0x34, 0xd6, 0x39, 0xa0, 0x4f, 0xc2, 0x50, 0x28, 0x73, 0xed, 0xf5, 0xdd, 0x7b, 0xae, - 0x3d, 0xa6, 0x8f, 0x50, 0x4a, 0x11, 0xc5, 0x12, 0x7d, 0xb7, 0x1e, 0xe4, 0xa7, 0x83, 0x46, 0x8f, - 0x37, 0xf2, 0x1e, 0x42, 0xfd, 0x3c, 0x07, 0xb0, 0xab, 0x6e, 0x1f, 0x49, 0x85, 0x87, 0x76, 0x2f, - 0xd1, 0xa8, 0xd0, 0x47, 0x61, 0x32, 0xe4, 0xf1, 0x2d, 0x63, 0x8f, 0x0d, 0x3e, 0x17, 0x59, 0x88, - 0xb0, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68, 0x45, 0xd6, 0xca, 0x7c, 0x73, 0xf8, 0xf4, 0x3c, 0x17, - 0xd7, 0x28, 0xfc, 0x73, 0x8e, 0x25, 0x07, 0x81, 0x75, 0xbf, 0x56, 0x12, 0x7d, 0x12, 0x80, 0x4e, - 0x22, 0xa1, 0xf8, 0x18, 0xcc, 0xdf, 0x42, 0xe9, 0xde, 0x52, 0xcf, 0x74, 0x58, 0x67, 0xaf, 0xa9, - 0x97, 0x14, 0x13, 0xac, 0x31, 0x44, 0x0e, 0x8c, 0xc5, 0xff, 0xe2, 0xbc, 0xd7, 0xe7, 0x73, 0x6b, - 0x48, 0x32, 0x67, 0xda, 0xf6, 0x25, 0x9d, 0x05, 0x36, 0x39, 0xa2, 0x4f, 0xc0, 0xc9, 0xdd, 0x5c, - 0x37, 0x98, 0xe1, 0x38, 0x95, 0x65, 0xbe, 0xf3, 0x4b, 0x7e, 0x79, 0xfb, 0x5f, 0x02, 0x3c, 0xdc, - 0x61, 0xa7, 0x47, 0xf3, 0xa6, 0x09, 0xfb, 0xe9, 0xa4, 0x36, 0x62, 0x36, 0xb3, 0xb0, 0xa1, 0x9e, - 0x48, 0x2c, 0xa8, 0xc2, 0xbb, 0x5e, 0x50, 0x3f, 0x62, 0x69, 0x7a, 0x22, 0xee, 0x43, 0xfc, 0x91, - 0x43, 0x9e, 0x60, 0xf7, 0x51, 0x71, 0xb4, 0x99, 0xa1, 0x7d, 0x79, 0xae, 0xe7, 0xe6, 0xf4, 0xae, - 0x8e, 0xf9, 0x5a, 0x76, 0x48, 0x6a, 0xae, 0x98, 0xb9, 0x74, 0xd8, 0xef, 0x3f, 0xaa, 0xf0, 0xd4, - 0x7f, 0x62, 0xc1, 0xc9, 0x14, 0x98, 0xb7, 0x81, 0x84, 0x22, 0xa8, 0xd9, 0xfa, 0xbb, 0x6e, 0xbc, - 0x64, 0xc8, 0xbf, 0xe1, 0xb2, 0xf8, 0x86, 0x93, 0xb9, 0x74, 0xc9, 0xa6, 0x7f, 0xf1, 0xcf, 0x4b, - 0xd3, 0xac, 0x02, 0x93, 0x10, 0xe7, 0x37, 0x1d, 0xb5, 0xe0, 0x6c, 0xad, 0x1d, 0x04, 0xf1, 0x64, - 0xcd, 0x58, 0x9c, 0xfc, 0xae, 0xf7, 0xd8, 0xdd, 0xfd, 0xd2, 0xd9, 0xc5, 0x2e, 0xb4, 0xb8, 0x2b, - 0x37, 0xe4, 0x01, 0x6a, 0xa6, 0x9c, 0xcd, 0x44, 0xba, 0xfb, 0x4c, 0xdd, 0x49, 0xda, 0x35, 0x8d, - 0xbf, 0x9a, 0xcd, 0x70, 0x59, 0xcb, 0xe0, 0x7c, 0xb4, 0xda, 0x93, 0x6f, 0x4d, 0x38, 0xf0, 0xd9, - 0x55, 0x38, 0xd3, 0x79, 0x32, 0x1d, 0xea, 0xc5, 0xfe, 0x1f, 0x59, 0x70, 0xba, 0x63, 0x58, 0xa8, - 0x6f, 0xc3, 0xcb, 0x82, 0xfd, 0x39, 0x0b, 0x1e, 0xc9, 0x2c, 0x61, 0xf8, 0x35, 0x5e, 0x80, 0xe1, - 0x5a, 0x22, 0x59, 0x73, 0x1c, 0x20, 0x45, 0x25, 0x6a, 0x8e, 0x69, 0x0c, 0xf7, 0xc5, 0x42, 0x57, - 0xf7, 0xc5, 0xdf, 0xb6, 0x20, 0x75, 0xd4, 0x1f, 0x81, 0xe4, 0x59, 0x36, 0x25, 0xcf, 0xc7, 0x7a, - 0xe9, 0xcd, 0x1c, 0xa1, 0xf3, 0x6f, 0x26, 0xe0, 0x44, 0xce, 0x83, 0xdb, 0x5d, 0x98, 0xda, 0xaa, - 0x11, 0x33, 0xc2, 0x42, 0xa7, 0xc8, 0x63, 0x1d, 0xc3, 0x31, 0xf0, 0x1c, 0xd9, 0x29, 0x12, 0x9c, - 0xae, 0x02, 0x7d, 0xce, 0x82, 0x63, 0xce, 0xad, 0x70, 0x99, 0xde, 0x20, 0xdc, 0xda, 0x42, 0xc3, - 0xaf, 0xed, 0x50, 0xc1, 0x4c, 0x2e, 0xab, 0x17, 0x32, 0x15, 0xc8, 0x37, 0xaa, 0x29, 0x7a, 0xa3, - 0xfa, 0x99, 0xbb, 0xfb, 0xa5, 0x63, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x84, 0x45, 0x4e, 0x22, 0x27, - 0xda, 0xee, 0x14, 0x03, 0x24, 0xeb, 0x65, 0x34, 0x17, 0x89, 0x25, 0x06, 0x2b, 0x3e, 0xe8, 0xd3, - 0x30, 0xbc, 0x25, 0x9f, 0xfb, 0x67, 0x88, 0xdc, 0x71, 0x47, 0x76, 0x0e, 0x82, 0xc0, 0xfd, 0x41, - 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa1, 0xe8, 0x6d, 0x86, 0x22, 0x12, 0x59, 0xb6, 0x5b, 0xaa, - 0xe9, 0xf8, 0xcb, 0x23, 0xed, 0xac, 0xaf, 0x54, 0x31, 0x2d, 0x88, 0x2e, 0x43, 0x31, 0xb8, 0x59, - 0x17, 0xd6, 0x8f, 0xcc, 0x45, 0x8a, 0x17, 0x96, 0x72, 0x5a, 0xc5, 0x38, 0xe1, 0x85, 0x25, 0x4c, - 0x59, 0xa0, 0x0a, 0xf4, 0xb3, 0x57, 0xaa, 0x42, 0xb4, 0xcd, 0xbc, 0xca, 0x77, 0x78, 0xed, 0xcd, - 0x5f, 0xc0, 0x31, 0x02, 0xcc, 0x19, 0xa1, 0x0d, 0x18, 0xa8, 0xb1, 0x04, 0xf4, 0x42, 0x96, 0xfd, - 0x60, 0xa6, 0x9d, 0xa3, 0x43, 0x66, 0x7e, 0xa1, 0xf6, 0x67, 0x14, 0x58, 0xf0, 0x62, 0x5c, 0x49, - 0x6b, 0x7b, 0x53, 0x9e, 0x58, 0xd9, 0x5c, 0x49, 0x6b, 0x7b, 0xa5, 0xda, 0x91, 0x2b, 0xa3, 0xc0, - 0x82, 0x17, 0x7a, 0x05, 0x0a, 0x9b, 0x35, 0xf1, 0x02, 0x35, 0xd3, 0xe0, 0x61, 0x06, 0x4b, 0x5a, - 0x18, 0xb8, 0xbb, 0x5f, 0x2a, 0xac, 0x2c, 0xe2, 0xc2, 0x66, 0x0d, 0xad, 0xc3, 0xe0, 0x26, 0x0f, - 0xaf, 0x22, 0x6c, 0x1a, 0x4f, 0x64, 0x47, 0x7e, 0x49, 0x45, 0x60, 0xe1, 0xaf, 0x19, 0x05, 0x02, - 0x4b, 0x26, 0x2c, 0x45, 0x8e, 0x0a, 0x13, 0x23, 0xa2, 0x54, 0xce, 0x1d, 0x2e, 0xb4, 0x0f, 0xbf, - 0x6a, 0xc4, 0xc1, 0x66, 0xb0, 0xc6, 0x91, 0xce, 0x6a, 0xe7, 0x4e, 0x3b, 0x60, 0x29, 0x0c, 0x44, - 0x38, 0xb3, 0xcc, 0x59, 0x3d, 0x2f, 0x89, 0x3a, 0xcd, 0x6a, 0x45, 0x84, 0x63, 0xa6, 0x68, 0x07, - 0xc6, 0x76, 0xc3, 0xd6, 0x36, 0x91, 0x4b, 0x9a, 0x45, 0x37, 0xcb, 0x91, 0x66, 0xaf, 0x0b, 0x42, - 0x37, 0x88, 0xda, 0x4e, 0x23, 0xb5, 0x0b, 0xb1, 0x6b, 0xcd, 0x75, 0x9d, 0x19, 0x36, 0x79, 0xd3, - 0xee, 0x7f, 0xa7, 0xed, 0xdf, 0xdc, 0x8b, 0x88, 0x08, 0x2e, 0x99, 0xd9, 0xfd, 0x6f, 0x72, 0x92, - 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x75, 0xd1, 0x3d, 0x6c, 0xf7, 0x9c, 0xcc, 0x8f, 0x5c, - 0x3d, 0x2f, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31, 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, - 0xf7, 0x12, 0x3b, 0xf4, 0x54, 0xfe, 0x2e, 0x59, 0xc9, 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, - 0xb3, 0x2e, 0x54, 0x87, 0xf1, 0x96, 0x1f, 0x44, 0xb7, 0xfc, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x51, - 0x6a, 0x50, 0x8a, 0x1a, 0x59, 0xdc, 0x56, 0x13, 0x83, 0x13, 0x3c, 0xd1, 0xc7, 0x60, 0x30, 0xac, - 0x39, 0x0d, 0x52, 0xbe, 0x3a, 0x33, 0x9d, 0x7f, 0xfc, 0x54, 0x39, 0x49, 0xce, 0xec, 0xe2, 0xd1, - 0x71, 0x38, 0x09, 0x96, 0xec, 0xd0, 0x0a, 0xf4, 0xb3, 0xd4, 0xb3, 0x2c, 0x12, 0x6a, 0x4e, 0x00, - 0xee, 0xd4, 0x1b, 0x13, 0xbe, 0x37, 0x31, 0x30, 0xe6, 0xc5, 0xe9, 0x1a, 0x10, 0x9a, 0x02, 0x3f, - 0x9c, 0x39, 0x9e, 0xbf, 0x06, 0x84, 0x82, 0xe1, 0x6a, 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, - 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xe8, 0xe0, 0x3f, 0x98, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, - 0xa4, 0x94, 0x85, 0xfd, 0x9b, 0x43, 0x69, 0x99, 0x85, 0x69, 0x98, 0xfe, 0x67, 0x2b, 0xe5, 0xe7, - 0xf0, 0xa1, 0x5e, 0x15, 0xde, 0xf7, 0xf1, 0xe2, 0xfa, 0x39, 0x0b, 0x4e, 0xb4, 0x32, 0x3f, 0x44, - 0x08, 0x00, 0xbd, 0xe9, 0xcd, 0xf9, 0xa7, 0xab, 0xa8, 0xb9, 0xd9, 0x78, 0x9c, 0x53, 0x53, 0x52, - 0x39, 0x50, 0x7c, 0xd7, 0xca, 0x81, 0x35, 0x18, 0xaa, 0xf1, 0x9b, 0x9c, 0x8c, 0xf6, 0xde, 0x53, - 0xcc, 0x47, 0x26, 0x4a, 0x88, 0x2b, 0xe0, 0x26, 0x56, 0x2c, 0xd0, 0x8f, 0x5a, 0x70, 0x3a, 0xd9, - 0x74, 0x4c, 0x18, 0x5a, 0x84, 0xda, 0xe5, 0x6a, 0xad, 0x15, 0xf1, 0xfd, 0x29, 0xf9, 0xdf, 0x20, - 0x3e, 0xe8, 0x46, 0x80, 0x3b, 0x57, 0x86, 0x96, 0x32, 0xf4, 0x6a, 0x03, 0xa6, 0x45, 0xb1, 0x07, - 0xdd, 0xda, 0x0b, 0x30, 0xda, 0xf4, 0xdb, 0x5e, 0x24, 0xdc, 0x0d, 0x85, 0xc3, 0x13, 0x73, 0xf4, - 0x59, 0xd3, 0xe0, 0xd8, 0xa0, 0x4a, 0x68, 0xe4, 0x86, 0xee, 0x59, 0x23, 0xf7, 0x36, 0x8c, 0x7a, - 0x9a, 0x7f, 0x7c, 0xa7, 0x1b, 0xac, 0xd0, 0x2e, 0x6a, 0xd4, 0xbc, 0x95, 0x3a, 0x04, 0x1b, 0xdc, - 0x3a, 0x6b, 0xcb, 0xe0, 0xdd, 0x69, 0xcb, 0x8e, 0xf4, 0x4a, 0x6c, 0xff, 0x42, 0x21, 0xe3, 0xc6, - 0xc0, 0xb5, 0x72, 0xaf, 0x99, 0x5a, 0xb9, 0x73, 0x49, 0xad, 0x5c, 0xca, 0x54, 0x65, 0x28, 0xe4, - 0x7a, 0xcf, 0x79, 0xd7, 0x73, 0x1c, 0xdf, 0xef, 0xb7, 0xe0, 0x21, 0x66, 0xfb, 0xa0, 0x15, 0xbc, - 0x6b, 0x7b, 0x07, 0x73, 0x05, 0x5d, 0xcd, 0x66, 0x87, 0xf3, 0xea, 0xb1, 0x1b, 0x70, 0xb6, 0xdb, - 0xb9, 0xcb, 0x1c, 0x6b, 0xeb, 0xca, 0x39, 0x22, 0x76, 0xac, 0xad, 0x97, 0x97, 0x30, 0xc3, 0xf4, - 0x1a, 0xa5, 0xce, 0xfe, 0x8f, 0x16, 0x14, 0x2b, 0x7e, 0xfd, 0x08, 0x6e, 0xf4, 0x1f, 0x31, 0x6e, - 0xf4, 0x0f, 0x67, 0x9f, 0xf8, 0xf5, 0x5c, 0x63, 0xdf, 0x72, 0xc2, 0xd8, 0x77, 0x3a, 0x8f, 0x41, - 0x67, 0xd3, 0xde, 0x4f, 0x17, 0x61, 0xa4, 0xe2, 0xd7, 0xd5, 0x3a, 0xfb, 0x67, 0xf7, 0xf2, 0xaa, - 0x25, 0x37, 0xc9, 0x90, 0xc6, 0x99, 0x79, 0xe1, 0xca, 0x38, 0x07, 0xdf, 0x66, 0x8f, 0x5b, 0x6e, - 0x10, 0x77, 0x6b, 0x3b, 0x22, 0xf5, 0xe4, 0xe7, 0x1c, 0xdd, 0xe3, 0x96, 0x6f, 0x16, 0x61, 0x22, - 0x51, 0x3b, 0x6a, 0xc0, 0x58, 0x43, 0x37, 0x25, 0x89, 0x79, 0x7a, 0x4f, 0x56, 0x28, 0xf1, 0x38, - 0x40, 0x03, 0x61, 0x93, 0x39, 0x9a, 0x03, 0x50, 0xbe, 0x15, 0x52, 0xdb, 0xcf, 0xae, 0x35, 0xca, - 0xf9, 0x22, 0xc4, 0x1a, 0x05, 0x7a, 0x11, 0x46, 0x22, 0xbf, 0xe5, 0x37, 0xfc, 0xad, 0xbd, 0x2b, - 0x44, 0x06, 0x30, 0x54, 0x8e, 0xbe, 0x1b, 0x31, 0x0a, 0xeb, 0x74, 0xe8, 0x36, 0x4c, 0x29, 0x26, - 0xd5, 0xfb, 0x60, 0x5e, 0x63, 0x6a, 0x93, 0xf5, 0x24, 0x47, 0x9c, 0xae, 0x04, 0xbd, 0x02, 0xe3, - 0xcc, 0xe3, 0x98, 0x95, 0xbf, 0x42, 0xf6, 0x64, 0x60, 0x5b, 0x26, 0x61, 0xaf, 0x19, 0x18, 0x9c, - 0xa0, 0x44, 0x8b, 0x30, 0xd5, 0x74, 0xc3, 0x44, 0xf1, 0x01, 0x56, 0x9c, 0x35, 0x60, 0x2d, 0x89, - 0xc4, 0x69, 0x7a, 0xfb, 0xe7, 0xc4, 0x18, 0x7b, 0x91, 0xfb, 0xfe, 0x72, 0x7c, 0x6f, 0x2f, 0xc7, - 0x6f, 0x58, 0x30, 0x49, 0x6b, 0x67, 0x6e, 0x94, 0x52, 0x90, 0x52, 0xa9, 0x0f, 0xac, 0x0e, 0xa9, - 0x0f, 0xce, 0xd1, 0x6d, 0xbb, 0xee, 0xb7, 0x23, 0xa1, 0x1d, 0xd5, 0xf6, 0x65, 0x0a, 0xc5, 0x02, - 0x2b, 0xe8, 0x48, 0x10, 0x88, 0x47, 0xe0, 0x3a, 0x1d, 0x09, 0x02, 0x2c, 0xb0, 0x32, 0x33, 0x42, - 0x5f, 0x76, 0x66, 0x04, 0x1e, 0xe0, 0x5a, 0x78, 0xc1, 0x09, 0x91, 0x56, 0x0b, 0x70, 0x2d, 0xdd, - 0xe3, 0x62, 0x1a, 0xfb, 0x6b, 0x45, 0x18, 0xad, 0xf8, 0xf5, 0xd8, 0xb1, 0xe3, 0x05, 0xc3, 0xb1, - 0xe3, 0x6c, 0xc2, 0xb1, 0x63, 0x52, 0xa7, 0x7d, 0xdf, 0x8d, 0xe3, 0x5b, 0xe5, 0xc6, 0xf1, 0x5b, - 0x16, 0x1b, 0xb5, 0xa5, 0xf5, 0x2a, 0xf7, 0xca, 0x45, 0x17, 0x61, 0x84, 0xed, 0x70, 0x2c, 0xea, - 0x80, 0xf4, 0x76, 0x60, 0x99, 0x0a, 0xd7, 0x63, 0x30, 0xd6, 0x69, 0xd0, 0x79, 0x18, 0x0a, 0x89, - 0x13, 0xd4, 0xb6, 0xd5, 0xf6, 0x2e, 0x5c, 0x13, 0x38, 0x0c, 0x2b, 0x2c, 0x7a, 0x33, 0x8e, 0xad, - 0x5c, 0xcc, 0x77, 0xf1, 0xd5, 0xdb, 0xc3, 0x97, 0x48, 0x7e, 0x40, 0x65, 0xfb, 0x06, 0xa0, 0x34, - 0x7d, 0x0f, 0xcf, 0x9e, 0x4a, 0x66, 0xf4, 0xcf, 0xe1, 0x54, 0xe4, 0xcf, 0xbf, 0xb7, 0x60, 0xbc, - 0xe2, 0xd7, 0xe9, 0xd2, 0xfd, 0x4e, 0x5a, 0xa7, 0x7a, 0x60, 0xf9, 0x81, 0x0e, 0x81, 0xe5, 0x1f, - 0x85, 0xfe, 0x8a, 0x5f, 0xef, 0x12, 0xa1, 0xf4, 0xff, 0xb1, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc0, - 0xf0, 0xf2, 0x9a, 0x69, 0x78, 0x79, 0x28, 0x67, 0xde, 0xe4, 0xd8, 0x5a, 0xfe, 0xaf, 0x3e, 0x18, - 0xa3, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd, 0x66, 0xf5, 0xd0, 0x6d, 0xf4, 0x1a, 0xe0, 0x37, - 0x1a, 0xfe, 0xad, 0xe4, 0xb0, 0xae, 0x30, 0x28, 0x16, 0x58, 0xf4, 0x0c, 0x0c, 0xb5, 0x02, 0xb2, - 0xeb, 0xfa, 0x42, 0xbe, 0xd6, 0xcc, 0x58, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, 0xbc, 0x43, 0xd7, - 0xa3, 0xb2, 0x44, 0xcd, 0xf7, 0xea, 0xdc, 0x36, 0x51, 0x14, 0xd9, 0x8f, 0x34, 0x38, 0x36, 0xa8, - 0xd0, 0x0d, 0x18, 0x66, 0xff, 0xd9, 0xb6, 0x73, 0xf8, 0xc4, 0xee, 0x22, 0x1f, 0xac, 0x60, 0x80, - 0x63, 0x5e, 0xe8, 0x39, 0x80, 0x48, 0x66, 0x10, 0x09, 0x45, 0xa4, 0x4a, 0x75, 0x17, 0x51, 0xb9, - 0x45, 0x42, 0xac, 0x51, 0xa1, 0xa7, 0x61, 0x38, 0x72, 0xdc, 0xc6, 0xaa, 0xeb, 0x31, 0xfb, 0x3d, - 0x6d, 0xbf, 0x48, 0xcb, 0x2a, 0x80, 0x38, 0xc6, 0x53, 0x59, 0x90, 0xc5, 0x20, 0x5a, 0xd8, 0x8b, - 0x44, 0x06, 0xb2, 0x22, 0x97, 0x05, 0x57, 0x15, 0x14, 0x6b, 0x14, 0x68, 0x1b, 0x4e, 0xb9, 0x1e, - 0xcb, 0x14, 0x44, 0xaa, 0x3b, 0x6e, 0x6b, 0x63, 0xb5, 0x7a, 0x9d, 0x04, 0xee, 0xe6, 0xde, 0x82, - 0x53, 0xdb, 0x21, 0x9e, 0x4c, 0xd9, 0xfd, 0x98, 0x68, 0xe2, 0xa9, 0x72, 0x07, 0x5a, 0xdc, 0x91, - 0x93, 0xfd, 0x3c, 0x9b, 0xef, 0x57, 0xab, 0xe8, 0x29, 0x63, 0xeb, 0x38, 0xa1, 0x6f, 0x1d, 0x07, - 0xfb, 0xa5, 0x81, 0xab, 0x55, 0x2d, 0x10, 0xce, 0xcb, 0x70, 0xbc, 0xe2, 0xd7, 0x2b, 0x7e, 0x10, - 0xad, 0xf8, 0xc1, 0x2d, 0x27, 0xa8, 0xcb, 0xe9, 0x55, 0x92, 0xa1, 0x80, 0xe8, 0xfe, 0xd9, 0xcf, - 0x77, 0x17, 0x23, 0xcc, 0xcf, 0xf3, 0x4c, 0x62, 0x3b, 0xe4, 0x1b, 0xcf, 0x1a, 0x93, 0x1d, 0x54, - 0xae, 0xad, 0x4b, 0x4e, 0x44, 0xd0, 0x55, 0x18, 0xab, 0xe9, 0xc7, 0xa8, 0x28, 0xfe, 0xa4, 0x3c, - 0xc8, 0x8c, 0x33, 0x36, 0xf3, 0xdc, 0x35, 0xcb, 0xdb, 0xdf, 0x23, 0x2a, 0xe1, 0x8a, 0x08, 0xee, - 0xd2, 0xda, 0x4b, 0x56, 0x7b, 0x99, 0x8c, 0xa7, 0x90, 0x1f, 0x68, 0x91, 0xdb, 0x95, 0x3b, 0x26, - 0xe3, 0xb1, 0xbf, 0x17, 0x4e, 0x24, 0xab, 0xef, 0x39, 0xb5, 0xfe, 0x22, 0x4c, 0x05, 0x7a, 0x41, - 0x2d, 0xb3, 0xe1, 0x71, 0x9e, 0x40, 0x25, 0x81, 0xc4, 0x69, 0x7a, 0xfb, 0x45, 0x98, 0xa2, 0x97, - 0x5f, 0x25, 0xc8, 0xb1, 0x5e, 0xee, 0x1e, 0x13, 0xe9, 0x8f, 0x07, 0xd8, 0x41, 0x94, 0x48, 0x73, - 0x85, 0x3e, 0x05, 0xe3, 0x21, 0x59, 0x75, 0xbd, 0xf6, 0x6d, 0xa9, 0x5b, 0xeb, 0xf0, 0xb8, 0xb9, - 0xba, 0xac, 0x53, 0xf2, 0xfb, 0x83, 0x09, 0xc3, 0x09, 0x6e, 0xa8, 0x09, 0xe3, 0xb7, 0x5c, 0xaf, - 0xee, 0xdf, 0x0a, 0x25, 0xff, 0xa1, 0x7c, 0x45, 0xfd, 0x0d, 0x4e, 0x99, 0x68, 0xa3, 0x51, 0xdd, - 0x0d, 0x83, 0x19, 0x4e, 0x30, 0xa7, 0x8b, 0x3d, 0x68, 0x7b, 0xf3, 0xe1, 0xb5, 0x90, 0xf0, 0x47, - 0xaa, 0x62, 0xb1, 0x63, 0x09, 0xc4, 0x31, 0x9e, 0x2e, 0x76, 0xf6, 0xe7, 0x52, 0xe0, 0xb7, 0x79, - 0x4e, 0x25, 0xb1, 0xd8, 0xb1, 0x82, 0x62, 0x8d, 0x82, 0x6e, 0x86, 0xec, 0xdf, 0xba, 0xef, 0x61, - 0xdf, 0x8f, 0xe4, 0xf6, 0xc9, 0x72, 0x02, 0x6a, 0x70, 0x6c, 0x50, 0xa1, 0x15, 0x40, 0x61, 0xbb, - 0xd5, 0x6a, 0x30, 0xd7, 0x45, 0xa7, 0xc1, 0x58, 0x71, 0xb7, 0xab, 0x22, 0xf7, 0x6e, 0xa9, 0xa6, - 0xb0, 0x38, 0xa3, 0x04, 0x3d, 0x17, 0x37, 0x45, 0x53, 0xfb, 0x59, 0x53, 0xb9, 0x51, 0xaf, 0xca, - 0xdb, 0x29, 0x71, 0x68, 0x19, 0x06, 0xc3, 0xbd, 0xb0, 0x16, 0x35, 0xc2, 0x4e, 0x19, 0x18, 0xab, - 0x8c, 0x44, 0x4b, 0x00, 0xcc, 0x8b, 0x60, 0x59, 0x16, 0xd5, 0x60, 0x5a, 0x70, 0x5c, 0xdc, 0x76, - 0x3c, 0x95, 0x17, 0x8e, 0x7b, 0xef, 0x5d, 0xbc, 0xbb, 0x5f, 0x9a, 0x16, 0x35, 0xeb, 0xe8, 0x83, - 0xfd, 0x12, 0x5d, 0x1c, 0x19, 0x18, 0x9c, 0xc5, 0x8d, 0x4f, 0xbe, 0x5a, 0xcd, 0x6f, 0xb6, 0x2a, - 0x81, 0xbf, 0xe9, 0x36, 0x48, 0x27, 0xc3, 0x68, 0xd5, 0xa0, 0x14, 0x93, 0xcf, 0x80, 0xe1, 0x04, - 0x37, 0x74, 0x13, 0x26, 0x9c, 0x56, 0x6b, 0x3e, 0x68, 0xfa, 0x81, 0xac, 0x60, 0x24, 0x5f, 0xc3, - 0x3e, 0x6f, 0x92, 0xf2, 0xb4, 0x70, 0x09, 0x20, 0x4e, 0x32, 0xb4, 0xbf, 0x87, 0xc9, 0xa7, 0x55, - 0x77, 0xcb, 0x63, 0xef, 0xc6, 0x51, 0x13, 0xc6, 0x5a, 0x6c, 0x07, 0x13, 0xd9, 0x94, 0xc4, 0x7a, - 0x7a, 0xa1, 0x47, 0x1d, 0xdb, 0x2d, 0x96, 0x0f, 0xd2, 0xf0, 0xb5, 0xac, 0xe8, 0xec, 0xb0, 0xc9, - 0xdd, 0xfe, 0x57, 0x27, 0x99, 0x84, 0x53, 0xe5, 0x8a, 0xb3, 0x41, 0xf1, 0x0a, 0x4e, 0x5c, 0x95, - 0x67, 0xf3, 0x55, 0xd4, 0xf1, 0xd0, 0x8b, 0x97, 0x74, 0x58, 0x96, 0x45, 0x9f, 0x84, 0x71, 0x7a, - 0xf3, 0x54, 0x52, 0x46, 0x38, 0x73, 0x2c, 0x3f, 0x6e, 0x91, 0xa2, 0xd2, 0x33, 0xad, 0xe9, 0x85, - 0x71, 0x82, 0x19, 0x7a, 0x93, 0xb9, 0x1f, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x9e, 0x86, 0x92, - 0xad, 0xc6, 0x04, 0xb5, 0x61, 0x3a, 0x9d, 0x4f, 0x36, 0x9c, 0xb1, 0xf3, 0x45, 0xf8, 0x74, 0x4a, - 0xd8, 0x38, 0x25, 0x56, 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0xed, 0xb3, 0x68, 0x28, - 0xb7, 0x53, 0x19, 0x3f, 0xc7, 0x3a, 0x26, 0xfa, 0xdc, 0x82, 0xd3, 0x5a, 0xc2, 0xc4, 0x4b, 0x81, - 0xc3, 0xdc, 0x5f, 0x5c, 0xb6, 0x65, 0x6b, 0xb2, 0xd7, 0x23, 0x77, 0xf7, 0x4b, 0xa7, 0x37, 0x3a, - 0x11, 0xe2, 0xce, 0x7c, 0xd0, 0x55, 0x38, 0xce, 0xa3, 0x6e, 0x2c, 0x11, 0xa7, 0xde, 0x70, 0x3d, - 0x25, 0xdc, 0xf1, 0x6d, 0xe5, 0xe4, 0xdd, 0xfd, 0xd2, 0xf1, 0xf9, 0x2c, 0x02, 0x9c, 0x5d, 0x0e, - 0xbd, 0x06, 0xc3, 0x75, 0x2f, 0x14, 0x7d, 0x30, 0x60, 0xe4, 0xa4, 0x1c, 0x5e, 0x5a, 0xaf, 0xaa, - 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, 0xe2, 0xd6, 0x15, 0xa5, 0x12, 0x1b, 0x4c, 0x05, 0x63, - 0x4c, 0x6a, 0x8d, 0x8d, 0xd7, 0xf6, 0xdc, 0xac, 0xa8, 0x5e, 0x86, 0x19, 0x0f, 0xf1, 0x0d, 0xc6, - 0xe8, 0x0d, 0x40, 0x22, 0xf7, 0xc9, 0x7c, 0x8d, 0xa5, 0xea, 0xd2, 0x5c, 0x1e, 0xd5, 0x4d, 0xb7, - 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, 0x65, 0xba, 0x73, 0xe9, 0x50, 0xb1, 0x33, 0xaa, 0xcc, 0xc7, - 0x4b, 0xa4, 0x15, 0x10, 0xe6, 0xa5, 0x67, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0xa7, 0x9c, 0x76, - 0xe4, 0x33, 0xc3, 0x95, 0x49, 0xba, 0xe1, 0xef, 0x10, 0x8f, 0xd9, 0x8c, 0x87, 0x58, 0x90, 0xc7, - 0x53, 0xf3, 0x1d, 0xe8, 0x70, 0x47, 0x2e, 0x54, 0xea, 0xa7, 0x7d, 0xa1, 0xd9, 0x94, 0x8c, 0x87, - 0xc3, 0xdc, 0xd0, 0x2a, 0x29, 0xd0, 0x8b, 0x30, 0xb2, 0xed, 0x87, 0xd1, 0x3a, 0x89, 0x6e, 0xf9, - 0xc1, 0x8e, 0x08, 0xb6, 0x1e, 0x27, 0xb8, 0x88, 0x51, 0x58, 0xa7, 0xa3, 0xd7, 0x7a, 0xe6, 0xd1, - 0x54, 0x5e, 0x62, 0xce, 0x24, 0x43, 0xf1, 0x1e, 0x73, 0x99, 0x83, 0xb1, 0xc4, 0x4b, 0xd2, 0x72, - 0x65, 0x91, 0x39, 0x86, 0x24, 0x48, 0xcb, 0x95, 0x45, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, - 0x40, 0x2a, 0x81, 0x5f, 0x23, 0xa1, 0x96, 0x56, 0xe5, 0x61, 0x1e, 0x4a, 0x9e, 0x4e, 0xd7, 0x6a, - 0x16, 0x01, 0xce, 0x2e, 0x87, 0x48, 0x3a, 0x59, 0xe8, 0x78, 0xbe, 0x45, 0x2f, 0x2d, 0x33, 0xf5, - 0x98, 0x2f, 0xd4, 0x83, 0x49, 0x95, 0xa6, 0x94, 0x07, 0x8f, 0x0f, 0x67, 0x26, 0xd8, 0xdc, 0xee, - 0x3d, 0xf2, 0xbc, 0xb2, 0x91, 0x96, 0x13, 0x9c, 0x70, 0x8a, 0xb7, 0x11, 0x45, 0x74, 0xb2, 0x6b, - 0x14, 0xd1, 0x0b, 0x30, 0x1c, 0xb6, 0x6f, 0xd6, 0xfd, 0xa6, 0xe3, 0x7a, 0xcc, 0x31, 0x44, 0xbb, - 0x5f, 0x56, 0x25, 0x02, 0xc7, 0x34, 0x68, 0x05, 0x86, 0x1c, 0x69, 0x00, 0x45, 0xf9, 0x01, 0xd2, - 0x94, 0xd9, 0x93, 0xc7, 0x0c, 0x92, 0x26, 0x4f, 0x55, 0x16, 0xbd, 0x0a, 0x63, 0x22, 0x56, 0x84, - 0xc8, 0xec, 0x3d, 0x6d, 0xbe, 0xb2, 0xad, 0xea, 0x48, 0x6c, 0xd2, 0xa2, 0x6b, 0x30, 0x12, 0xf9, - 0x0d, 0xf6, 0x54, 0x94, 0x8a, 0x92, 0x27, 0xf2, 0xe3, 0x98, 0x6e, 0x28, 0x32, 0x5d, 0x35, 0xaf, - 0x8a, 0x62, 0x9d, 0x0f, 0xda, 0xe0, 0xf3, 0x9d, 0x25, 0x51, 0x21, 0xa1, 0x48, 0x0d, 0x7d, 0x3a, - 0xcf, 0xab, 0x8f, 0x91, 0x99, 0xcb, 0x41, 0x94, 0xc4, 0x3a, 0x1b, 0x74, 0x09, 0xa6, 0x5a, 0x81, - 0xeb, 0xb3, 0x39, 0xa1, 0x0c, 0xba, 0x33, 0x66, 0xca, 0xc4, 0x4a, 0x92, 0x00, 0xa7, 0xcb, 0xb0, - 0x50, 0x1f, 0x02, 0x38, 0x73, 0x92, 0xa7, 0x7d, 0xe2, 0xd7, 0x75, 0x0e, 0xc3, 0x0a, 0x8b, 0xd6, - 0xd8, 0x4e, 0xcc, 0x35, 0x4d, 0x33, 0xb3, 0xf9, 0x31, 0xd9, 0x74, 0x8d, 0x14, 0x17, 0x90, 0xd5, - 0x5f, 0x1c, 0x73, 0x40, 0x75, 0x2d, 0xdb, 0x32, 0xbd, 0x66, 0x84, 0x33, 0xa7, 0x3a, 0xb8, 0x95, - 0x26, 0x6e, 0x7e, 0xb1, 0x40, 0x60, 0x80, 0x43, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, 0x93, 0xe2, 0x1d, - 0x7c, 0xdc, 0x4d, 0xa7, 0xe3, 0xa7, 0x37, 0x38, 0x81, 0xc3, 0x29, 0x6a, 0x9e, 0x76, 0xc9, 0xb9, - 0xd9, 0x20, 0x62, 0xeb, 0x5b, 0x75, 0xbd, 0x9d, 0x70, 0xe6, 0x0c, 0xdb, 0x1f, 0x44, 0xda, 0xa5, - 0x24, 0x16, 0x67, 0x94, 0x40, 0x1b, 0x30, 0xd9, 0x0a, 0x08, 0x69, 0xb2, 0xcb, 0x84, 0x38, 0xcf, - 0x4a, 0x3c, 0xd2, 0x0d, 0x6d, 0x49, 0x25, 0x81, 0x3b, 0xc8, 0x80, 0xe1, 0x14, 0x07, 0x74, 0x0b, - 0x86, 0xfc, 0x5d, 0x12, 0x6c, 0x13, 0xa7, 0x3e, 0x73, 0xb6, 0xc3, 0x83, 0x30, 0x71, 0xb8, 0x5d, - 0x15, 0xb4, 0x09, 0x7f, 0x19, 0x09, 0xee, 0xee, 0x2f, 0x23, 0x2b, 0x43, 0xff, 0x8b, 0x05, 0x27, - 0xa5, 0x05, 0xaa, 0xda, 0xa2, 0xbd, 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0xd8, 0x2c, 0x8f, 0xe4, - 0xc7, 0x2b, 0xd9, 0xc8, 0x29, 0xa4, 0x94, 0xdd, 0x27, 0xf3, 0x28, 0x42, 0x9c, 0x5f, 0x23, 0xbd, - 0xfe, 0x86, 0x24, 0x92, 0x9b, 0xd1, 0x7c, 0xb8, 0xf2, 0xe6, 0xd2, 0xfa, 0xcc, 0xa3, 0x3c, 0xb0, - 0x0c, 0x5d, 0x0c, 0xd5, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0x45, 0x28, 0xf8, 0xe1, 0xcc, 0x63, 0x1d, - 0x12, 0x74, 0xfb, 0xf5, 0xab, 0x55, 0xee, 0x37, 0x79, 0xb5, 0x8a, 0x0b, 0x7e, 0x28, 0x53, 0x1f, - 0xd1, 0x3b, 0x5f, 0x38, 0xf3, 0x38, 0x57, 0x8d, 0xca, 0xd4, 0x47, 0x0c, 0x88, 0x63, 0x3c, 0xda, - 0x86, 0x89, 0xd0, 0xb8, 0x5b, 0x87, 0x33, 0xe7, 0x58, 0x4f, 0x3d, 0x9e, 0x37, 0x68, 0x06, 0xb5, - 0x96, 0x93, 0xc4, 0xe4, 0x82, 0x93, 0x6c, 0xf9, 0xea, 0xd2, 0x6e, 0xf7, 0xe1, 0xcc, 0x13, 0x5d, - 0x56, 0x97, 0x46, 0xac, 0xaf, 0x2e, 0x9d, 0x07, 0x4e, 0xf0, 0x9c, 0xfd, 0x2e, 0x98, 0x4a, 0x89, - 0x4b, 0x87, 0x79, 0x23, 0x30, 0xbb, 0x03, 0x63, 0xc6, 0x94, 0x7c, 0xa0, 0x2e, 0x24, 0xbf, 0x3f, - 0x0c, 0xc3, 0xca, 0xb4, 0x8f, 0x2e, 0x98, 0x5e, 0x23, 0x27, 0x93, 0x5e, 0x23, 0x43, 0x15, 0xbf, - 0x6e, 0x38, 0x8a, 0x6c, 0x64, 0x04, 0x22, 0xcd, 0xdb, 0x00, 0x7b, 0x7f, 0xc8, 0xa4, 0x99, 0x2b, - 0x8a, 0x3d, 0xbb, 0x9f, 0xf4, 0x75, 0xb4, 0x80, 0x5c, 0x82, 0x29, 0xcf, 0x67, 0x32, 0x3a, 0xa9, - 0x4b, 0x01, 0x8c, 0xc9, 0x59, 0xc3, 0x7a, 0x3c, 0xaf, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, - 0x41, 0x29, 0x69, 0x72, 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0x99, 0xc9, - 0xfc, 0xbb, 0x21, 0x2f, 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0x0b, 0x43, 0xcb, 0xaf, 0x97, - 0x2b, 0x42, 0xcc, 0xd7, 0x42, 0x84, 0xd7, 0xcb, 0x15, 0xcc, 0x71, 0x68, 0x1e, 0x06, 0xd8, 0x0f, - 0x19, 0x27, 0x25, 0x6f, 0x99, 0x96, 0x2b, 0x5a, 0xea, 0x45, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0x83, - 0x4c, 0xef, 0x46, 0x4c, 0x83, 0x3c, 0x78, 0x8f, 0x1a, 0x64, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x6d, - 0x38, 0x6e, 0xdc, 0x47, 0xd5, 0xcb, 0x2e, 0xc8, 0x37, 0x2e, 0x27, 0x88, 0x17, 0x4e, 0x8b, 0x46, - 0x1f, 0x2f, 0x67, 0x71, 0xc2, 0xd9, 0x15, 0xa0, 0x06, 0x4c, 0xd5, 0x52, 0xb5, 0x0e, 0xf5, 0x5e, - 0xab, 0x9a, 0x17, 0xe9, 0x1a, 0xd3, 0x8c, 0xd1, 0xab, 0x30, 0xf4, 0x8e, 0xcf, 0x1d, 0xc1, 0xc4, - 0xd5, 0x44, 0x46, 0x15, 0x19, 0x7a, 0xf3, 0x6a, 0x95, 0xc1, 0x0f, 0xf6, 0x4b, 0x23, 0x15, 0xbf, - 0x2e, 0xff, 0x62, 0x55, 0x00, 0xfd, 0x90, 0x05, 0xb3, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x58, 0xef, - 0x8d, 0xb6, 0x45, 0xa5, 0xb3, 0xcb, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe9, 0x7a, 0x0a, - 0xdd, 0x3b, 0x44, 0xe4, 0xad, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0xb0, 0x5f, 0x9a, 0xe0, 0x3b, - 0xa3, 0x7b, 0x47, 0x05, 0x33, 0xe7, 0x05, 0xd0, 0xf7, 0xc2, 0xf1, 0x20, 0xad, 0xa5, 0x25, 0x52, - 0x08, 0x7f, 0xaa, 0x97, 0x5d, 0x36, 0x39, 0xe0, 0x38, 0x8b, 0x21, 0xce, 0xae, 0xc7, 0xfe, 0x75, - 0x8b, 0xe9, 0xd0, 0x45, 0xb3, 0x48, 0xd8, 0x6e, 0x1c, 0x45, 0xb6, 0xfc, 0x65, 0xc3, 0x3e, 0x7d, - 0xcf, 0xde, 0x53, 0xbf, 0x6b, 0x31, 0xef, 0xa9, 0x23, 0x7c, 0x07, 0xf6, 0x26, 0x0c, 0x45, 0xa2, - 0xb6, 0x4e, 0x09, 0xfe, 0xb5, 0x46, 0x31, 0x0f, 0x32, 0x75, 0xc9, 0x91, 0x50, 0xac, 0xd8, 0xd8, - 0xff, 0x84, 0x8f, 0x80, 0xc4, 0x1c, 0x81, 0x19, 0x70, 0xc9, 0x34, 0x03, 0x96, 0xba, 0x7c, 0x41, - 0x8e, 0x39, 0xf0, 0x1f, 0x9b, 0xed, 0x66, 0xca, 0xbd, 0xf7, 0xba, 0xdb, 0x9e, 0xfd, 0x05, 0x0b, - 0x20, 0xce, 0x1e, 0xd1, 0x43, 0x9e, 0xda, 0x97, 0xe9, 0xb5, 0xc6, 0x8f, 0xfc, 0x9a, 0xdf, 0x10, - 0x46, 0x90, 0x53, 0xb1, 0x25, 0x92, 0xc3, 0x0f, 0xb4, 0xdf, 0x58, 0x51, 0xa3, 0x92, 0x0c, 0xe7, - 0x5a, 0x8c, 0x6d, 0xe3, 0x46, 0x28, 0xd7, 0x2f, 0x5b, 0x70, 0x2c, 0xeb, 0x51, 0x01, 0xbd, 0x24, - 0x73, 0x35, 0xa7, 0x72, 0xa9, 0x54, 0xa3, 0x79, 0x5d, 0xc0, 0xb1, 0xa2, 0xe8, 0x39, 0x01, 0xf0, - 0xe1, 0x32, 0x1b, 0x5c, 0x85, 0xb1, 0x4a, 0x40, 0x34, 0xf9, 0xe2, 0x75, 0x1e, 0xad, 0x87, 0xb7, - 0xe7, 0x99, 0x43, 0x47, 0xea, 0xb1, 0xbf, 0x52, 0x80, 0x63, 0xdc, 0x31, 0x68, 0x7e, 0xd7, 0x77, - 0xeb, 0x15, 0xbf, 0x2e, 0x9e, 0x82, 0xbe, 0x05, 0xa3, 0x2d, 0x4d, 0x37, 0xdd, 0x29, 0x4a, 0xb7, - 0xae, 0xc3, 0x8e, 0xb5, 0x69, 0x3a, 0x14, 0x1b, 0xbc, 0x50, 0x1d, 0x46, 0xc9, 0xae, 0x5b, 0x53, - 0xde, 0x25, 0x85, 0x43, 0x1f, 0xd2, 0xaa, 0x96, 0x65, 0x8d, 0x0f, 0x36, 0xb8, 0xf6, 0xec, 0xce, - 0xab, 0x89, 0x68, 0x7d, 0x5d, 0x3c, 0x4a, 0x7e, 0xdc, 0x82, 0x87, 0x72, 0x62, 0x7a, 0xd3, 0xea, - 0x6e, 0x31, 0x17, 0x2c, 0x31, 0x6d, 0x55, 0x75, 0xdc, 0x31, 0x0b, 0x0b, 0x2c, 0xfa, 0x18, 0x00, - 0x77, 0xac, 0x22, 0x5e, 0xad, 0x6b, 0xf0, 0x63, 0x23, 0x5a, 0xab, 0x16, 0x78, 0x53, 0x96, 0xc7, - 0x1a, 0x2f, 0xfb, 0xcb, 0x7d, 0xd0, 0xcf, 0x1c, 0x79, 0x50, 0x05, 0x06, 0xb7, 0x79, 0x5c, 0xb8, - 0x8e, 0xe3, 0x46, 0x69, 0x65, 0xa0, 0xb9, 0x78, 0xdc, 0x34, 0x28, 0x96, 0x6c, 0xd0, 0x1a, 0x4c, - 0xf3, 0x2c, 0x84, 0x8d, 0x25, 0xd2, 0x70, 0xf6, 0xa4, 0xda, 0x97, 0x27, 0xd6, 0x57, 0xea, 0xef, - 0x72, 0x9a, 0x04, 0x67, 0x95, 0x43, 0xaf, 0xc3, 0x38, 0xbd, 0x86, 0xfb, 0xed, 0x48, 0x72, 0xe2, - 0xf9, 0x07, 0xd5, 0xcd, 0x64, 0xc3, 0xc0, 0xe2, 0x04, 0x35, 0x7a, 0x15, 0xc6, 0x5a, 0x29, 0x05, - 0x77, 0x7f, 0xac, 0x09, 0x32, 0x95, 0xda, 0x26, 0x2d, 0x7b, 0x57, 0xd0, 0x66, 0xaf, 0x28, 0x36, - 0xb6, 0x03, 0x12, 0x6e, 0xfb, 0x8d, 0x3a, 0x93, 0x80, 0xfb, 0xb5, 0x77, 0x05, 0x09, 0x3c, 0x4e, - 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4, 0xb2, 0x92, 0xc0, - 0xe3, 0x54, 0x89, 0xee, 0x9a, 0xfb, 0xc1, 0xfb, 0xa3, 0xb9, 0xb7, 0x7f, 0xa6, 0x00, 0xc6, 0xd0, - 0x7e, 0x07, 0xe7, 0x45, 0x7c, 0x0d, 0xfa, 0xb6, 0x82, 0x56, 0x4d, 0x38, 0xad, 0x65, 0x7e, 0x59, - 0x9c, 0x14, 0x9d, 0x7f, 0x19, 0xfd, 0x8f, 0x59, 0x29, 0xba, 0xc6, 0x8f, 0x57, 0x02, 0x9f, 0x1e, - 0x72, 0x32, 0x74, 0xa4, 0x7a, 0xbe, 0x33, 0x28, 0x03, 0x51, 0x74, 0x08, 0xb2, 0x2c, 0xde, 0x20, - 0x70, 0x0e, 0x86, 0x7f, 0x57, 0x55, 0x84, 0x9b, 0x91, 0x5c, 0xd0, 0x45, 0x18, 0x11, 0xa9, 0xea, - 0xd8, 0x2b, 0x13, 0xbe, 0x98, 0x98, 0x3f, 0xda, 0x52, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd, 0xc3, 0x05, - 0x98, 0xce, 0x78, 0x26, 0xc8, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0xde, 0x75, 0xed, 0x18, 0xe1, - 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0xe1, 0x08, 0xec, 0x21, - 0x33, 0x98, 0x9f, 0x85, 0xbe, 0x76, 0x48, 0x64, 0xa0, 0x74, 0x75, 0x6c, 0x33, 0xd3, 0x39, 0xc3, - 0xd0, 0x2b, 0xe0, 0x96, 0xb2, 0x42, 0x6b, 0x57, 0x40, 0x6e, 0x87, 0xe6, 0x38, 0xda, 0xb8, 0x88, - 0x78, 0x8e, 0x17, 0x89, 0x8b, 0x62, 0x1c, 0xe7, 0x97, 0x41, 0xb1, 0xc0, 0xda, 0x5f, 0x2a, 0xc2, - 0xc9, 0xdc, 0x87, 0xc3, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0x39, 0xfa, 0xf1, 0xd8, 0xbe, - 0xa4, 0xb5, 0xbd, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x73, 0xd0, 0xcf, 0x94, 0xe2, 0xa9, 0x0c, 0xf4, - 0x0b, 0x4b, 0x3c, 0x02, 0x23, 0x47, 0x6b, 0xa7, 0x7a, 0xb1, 0xe3, 0xa9, 0xfe, 0x28, 0x95, 0x60, - 0xfc, 0x46, 0xf2, 0x40, 0xa1, 0xcd, 0xf5, 0xfd, 0x06, 0x66, 0x48, 0xf4, 0xb8, 0xe8, 0xaf, 0x84, - 0x67, 0x1b, 0x76, 0xea, 0x7e, 0xa8, 0x75, 0xda, 0x93, 0x30, 0xb8, 0x43, 0xf6, 0x02, 0xd7, 0xdb, - 0x4a, 0x7a, 0x3c, 0x5e, 0xe1, 0x60, 0x2c, 0xf1, 0x66, 0x32, 0xe4, 0xc1, 0xfb, 0x91, 0x0c, 0x59, - 0x9f, 0x01, 0x43, 0x5d, 0xc5, 0x93, 0x1f, 0x29, 0xc2, 0x04, 0x5e, 0x58, 0x7a, 0x7f, 0x20, 0xae, - 0xa5, 0x07, 0xe2, 0x7e, 0xe4, 0x0c, 0x3e, 0xdc, 0x68, 0xfc, 0xb2, 0x05, 0x13, 0x2c, 0x61, 0x9e, - 0x88, 0xfa, 0xe1, 0xfa, 0xde, 0x11, 0x5c, 0x05, 0x1e, 0x85, 0xfe, 0x80, 0x56, 0x9a, 0x4c, 0x3d, - 0xcf, 0x5a, 0x82, 0x39, 0x0e, 0x9d, 0x82, 0x3e, 0xd6, 0x04, 0x3a, 0x78, 0xa3, 0x7c, 0x0b, 0x5e, - 0x72, 0x22, 0x07, 0x33, 0x28, 0x8b, 0x3f, 0x88, 0x49, 0xab, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, - 0xef, 0x8d, 0x90, 0x22, 0x99, 0x4d, 0x7b, 0x77, 0xf1, 0x07, 0xb3, 0x59, 0x76, 0xbe, 0x66, 0xff, - 0x75, 0x01, 0xce, 0x64, 0x96, 0xeb, 0x39, 0xfe, 0x60, 0xe7, 0xd2, 0x0f, 0x32, 0xf7, 0x57, 0xf1, - 0x08, 0xfd, 0xc9, 0xfb, 0x7a, 0x95, 0xfe, 0xfb, 0x7b, 0x08, 0x0b, 0x98, 0xd9, 0x65, 0xef, 0x91, - 0xb0, 0x80, 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x43, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x3c, - 0xdd, 0x67, 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x0f, 0x13, - 0x4d, 0xd7, 0xa3, 0x9b, 0xcf, 0x9e, 0x29, 0x8a, 0x2b, 0x5b, 0xc6, 0x9a, 0x89, 0xc6, 0x49, 0x7a, - 0xe4, 0x6a, 0x21, 0x03, 0xf9, 0xd7, 0xbd, 0x7a, 0xa8, 0x55, 0x37, 0x67, 0xba, 0x73, 0xa8, 0x5e, - 0xcc, 0x08, 0x1f, 0xb8, 0xa6, 0xe9, 0x89, 0x8a, 0xbd, 0xeb, 0x89, 0x46, 0xb3, 0x75, 0x44, 0xb3, - 0xaf, 0xc2, 0xd8, 0x3d, 0xdb, 0x46, 0xec, 0x6f, 0x14, 0xe1, 0xe1, 0x0e, 0xcb, 0x9e, 0xef, 0xf5, - 0xc6, 0x18, 0x68, 0x7b, 0x7d, 0x6a, 0x1c, 0x2a, 0x70, 0x6c, 0xb3, 0xdd, 0x68, 0xec, 0xb1, 0x87, - 0x53, 0xa4, 0x2e, 0x29, 0x84, 0x4c, 0x29, 0x95, 0x23, 0xc7, 0x56, 0x32, 0x68, 0x70, 0x66, 0x49, - 0x7a, 0xc5, 0xa2, 0x27, 0xc9, 0x9e, 0x62, 0x95, 0xb8, 0x62, 0x61, 0x1d, 0x89, 0x4d, 0x5a, 0x74, - 0x09, 0xa6, 0x9c, 0x5d, 0xc7, 0xe5, 0x29, 0x1e, 0x24, 0x03, 0x7e, 0xc7, 0x52, 0xba, 0xe8, 0xf9, - 0x24, 0x01, 0x4e, 0x97, 0x41, 0x6f, 0x00, 0xf2, 0x6f, 0xb2, 0xc7, 0x18, 0xf5, 0x4b, 0xc4, 0x13, - 0x56, 0x77, 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xc1, 0xeb, - 0x06, 0xf2, 0x83, 0xd7, 0x75, 0xde, 0x17, 0xbb, 0xa6, 0x9d, 0xbb, 0x08, 0x63, 0x87, 0x74, 0x31, - 0xb6, 0xff, 0x9d, 0x05, 0x4a, 0x41, 0x6c, 0x06, 0x9f, 0x7e, 0x95, 0xf9, 0x40, 0x73, 0xd5, 0xb6, - 0x16, 0x6f, 0xea, 0xb8, 0xe6, 0x03, 0x1d, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0xbb, 0x6c, - 0xdc, 0x0a, 0x44, 0x6c, 0x4c, 0x45, 0x81, 0x3e, 0x0e, 0x83, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, - 0xec, 0xd0, 0xc6, 0xb8, 0x78, 0xeb, 0x5c, 0xe2, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x48, 0x21, 0xee, - 0x93, 0x37, 0xdb, 0x7e, 0xe4, 0x1c, 0xc1, 0x49, 0x7e, 0xc9, 0x38, 0xc9, 0x1f, 0xcf, 0x1e, 0x68, - 0xad, 0x49, 0xb9, 0x27, 0xf8, 0xd5, 0xc4, 0x09, 0xfe, 0x44, 0x77, 0x56, 0x9d, 0x4f, 0xee, 0x5f, - 0xb3, 0x60, 0xca, 0xa0, 0x3f, 0x82, 0x03, 0x64, 0xc5, 0x3c, 0x40, 0x1e, 0xe9, 0xfa, 0x0d, 0x39, - 0x07, 0xc7, 0x0f, 0x16, 0x13, 0x6d, 0x67, 0x07, 0xc6, 0x3b, 0xd0, 0xb7, 0xed, 0x04, 0x75, 0x71, - 0x2f, 0xbe, 0xd0, 0x53, 0x5f, 0xcf, 0x5d, 0x76, 0x02, 0xe1, 0xa9, 0xf0, 0x8c, 0xec, 0x75, 0x0a, - 0xea, 0xea, 0xa5, 0xc0, 0xaa, 0x42, 0x2f, 0xc3, 0x40, 0x58, 0xf3, 0x5b, 0xea, 0x5d, 0x16, 0xcb, - 0x65, 0x5c, 0x65, 0x90, 0x83, 0xfd, 0x12, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0xd1, 0x5b, 0x30, - 0xc6, 0x7e, 0x29, 0xb7, 0xc1, 0x62, 0xbe, 0x06, 0xa3, 0xaa, 0x13, 0x72, 0x9f, 0x5a, 0x03, 0x84, - 0x4d, 0x56, 0xb3, 0x5b, 0x30, 0xac, 0x3e, 0xeb, 0x81, 0x5a, 0xbb, 0xff, 0x4d, 0x11, 0xa6, 0x33, - 0xe6, 0x1c, 0x0a, 0x8d, 0x91, 0xb8, 0xd8, 0xe3, 0x54, 0x7d, 0x97, 0x63, 0x11, 0xb2, 0x0b, 0x54, - 0x5d, 0xcc, 0xad, 0x9e, 0x2b, 0xbd, 0x16, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, - 0x91, 0x75, 0x35, 0xad, 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0x6f, 0xf5, 0xc1, 0xb1, 0xac, 0x98, - 0xc5, 0xe8, 0xb3, 0x89, 0x04, 0xe9, 0x2f, 0x74, 0xea, 0x61, 0xbd, 0x24, 0xcf, 0x9a, 0x2e, 0x42, - 0x85, 0xce, 0x99, 0x29, 0xd3, 0xbb, 0x76, 0xb3, 0xa8, 0x93, 0x85, 0xf0, 0x09, 0x78, 0x62, 0x7b, - 0xb9, 0x7d, 0x7c, 0xa8, 0xe7, 0x06, 0x88, 0x8c, 0xf8, 0x61, 0xc2, 0x25, 0x49, 0x82, 0xbb, 0xbb, - 0x24, 0xc9, 0x9a, 0x51, 0x19, 0x06, 0x6a, 0xdc, 0xd7, 0xa5, 0xd8, 0x7d, 0x0b, 0xe3, 0x8e, 0x2e, - 0x6a, 0x03, 0x16, 0x0e, 0x2e, 0x82, 0xc1, 0xac, 0x0b, 0x23, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, - 0x43, 0x0f, 0x3e, 0xad, 0x0b, 0x1e, 0xe8, 0x04, 0xfa, 0x71, 0x0b, 0x12, 0x8f, 0x6a, 0x94, 0x52, - 0xce, 0xca, 0x55, 0xca, 0x9d, 0x85, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xf6, 0x6d, 0xec, 0x37, 0x08, - 0x66, 0x18, 0x4a, 0x11, 0xc5, 0xaa, 0x96, 0x51, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x42, 0x7f, - 0x83, 0xec, 0x92, 0x46, 0x32, 0x49, 0xe2, 0x2a, 0x05, 0x62, 0x8e, 0xb3, 0x7f, 0xb9, 0x0f, 0x4e, - 0x77, 0x8c, 0xa7, 0x45, 0x2f, 0x63, 0x5b, 0x4e, 0x44, 0x6e, 0x39, 0x7b, 0xc9, 0x1c, 0x66, 0x97, - 0x38, 0x18, 0x4b, 0x3c, 0x7b, 0x62, 0xca, 0xf3, 0x83, 0x24, 0x54, 0x98, 0x22, 0x2d, 0x88, 0xc0, - 0x9a, 0x2a, 0xb1, 0xe2, 0xfd, 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, - 0x6f, 0x57, 0xe3, 0x3c, 0x32, 0xd5, 0x55, 0x81, 0xc1, 0x1a, 0x15, 0x5a, 0x82, 0xc9, 0x56, 0xe0, - 0x47, 0x5c, 0x23, 0xbc, 0xc4, 0x3d, 0x67, 0xfb, 0xcd, 0x50, 0x46, 0x95, 0x04, 0x1e, 0xa7, 0x4a, - 0xa0, 0x17, 0x61, 0x44, 0x84, 0x37, 0xaa, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, - 0x46, 0x61, 0x9d, 0x4e, 0x2b, 0xc6, 0xd4, 0xcc, 0x83, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, - 0x84, 0x42, 0x1f, 0xea, 0x29, 0x14, 0x7a, 0xac, 0x96, 0x1b, 0xee, 0xd9, 0xea, 0x09, 0x5d, 0x15, - 0x59, 0x5f, 0xed, 0x83, 0x69, 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x96, 0x9e, 0x2e, 0xf7, 0x43, - 0x71, 0xf7, 0xfe, 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xa3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0x9f, 0x72, - 0x93, 0x40, 0xbe, 0x98, 0x2b, 0xf9, 0xc5, 0x71, 0x92, 0xdf, 0x5d, 0x3a, 0x48, 0xfb, 0x8f, 0x2d, - 0x78, 0xa4, 0x2b, 0x47, 0xb4, 0x0c, 0xc3, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, - 0x89, 0xc8, 0x91, 0x6e, 0xe3, 0x92, 0x68, 0x39, 0x95, 0x6d, 0xf3, 0xc9, 0x8c, 0x6c, 0x9b, 0xc7, - 0x8d, 0xee, 0xb9, 0xc7, 0x74, 0x9b, 0x5f, 0xa4, 0x27, 0x8e, 0xf9, 0x72, 0xee, 0x43, 0x86, 0xd2, - 0xd1, 0x4e, 0x28, 0x1d, 0x91, 0x49, 0xad, 0x9d, 0x21, 0x1f, 0x85, 0x49, 0x16, 0xf7, 0x90, 0xbd, - 0xf3, 0x10, 0x4f, 0xee, 0x0a, 0xb1, 0x2f, 0xf7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0xf6, 0x5f, 0x16, - 0x61, 0x80, 0x2f, 0xbf, 0x23, 0xb8, 0x5e, 0x3e, 0x0d, 0xc3, 0x6e, 0xb3, 0xd9, 0xe6, 0x09, 0x14, - 0xfb, 0x63, 0xcf, 0xe0, 0xb2, 0x04, 0xe2, 0x18, 0x8f, 0x56, 0x84, 0xbe, 0xbb, 0x43, 0x68, 0x65, - 0xde, 0xf0, 0xb9, 0x25, 0x27, 0x72, 0xb8, 0xac, 0xa4, 0xce, 0xd9, 0x58, 0x33, 0x8e, 0x3e, 0x05, - 0x10, 0x46, 0x81, 0xeb, 0x6d, 0x51, 0x98, 0x88, 0xbf, 0xff, 0x54, 0x07, 0x6e, 0x55, 0x45, 0xcc, - 0x79, 0xc6, 0x7b, 0x8e, 0x42, 0x60, 0x8d, 0x23, 0x9a, 0x33, 0x4e, 0xfa, 0xd9, 0xc4, 0xd8, 0x01, - 0xe7, 0x1a, 0x8f, 0xd9, 0xec, 0x4b, 0x30, 0xac, 0x98, 0x77, 0xd3, 0x7e, 0x8d, 0xea, 0x62, 0xd1, - 0x47, 0x60, 0x22, 0xd1, 0xb6, 0x43, 0x29, 0xcf, 0x7e, 0xc5, 0x82, 0x09, 0xde, 0x98, 0x65, 0x6f, - 0x57, 0x9c, 0x06, 0x77, 0xe0, 0x58, 0x23, 0x63, 0x57, 0x16, 0xc3, 0xdf, 0xfb, 0x2e, 0xae, 0x94, - 0x65, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x3a, 0x4f, 0x57, 0x1c, 0xdd, 0x75, 0x9d, 0x86, 0x88, 0xa1, - 0x30, 0xca, 0x57, 0x1b, 0x87, 0x61, 0x85, 0xb5, 0xff, 0xd4, 0x82, 0x29, 0xde, 0xf2, 0x2b, 0x64, - 0x4f, 0xed, 0x4d, 0xdf, 0xca, 0xb6, 0x8b, 0xd4, 0xbd, 0x85, 0x9c, 0xd4, 0xbd, 0xfa, 0xa7, 0x15, - 0x3b, 0x7e, 0xda, 0x57, 0x2c, 0x10, 0x33, 0xe4, 0x08, 0xf4, 0x19, 0xdf, 0x65, 0xea, 0x33, 0x66, - 0xf3, 0x17, 0x41, 0x8e, 0x22, 0xe3, 0xef, 0x2d, 0x98, 0xe4, 0x04, 0xb1, 0xad, 0xfe, 0x5b, 0x3a, - 0x0e, 0x0b, 0xe6, 0x17, 0x65, 0x3a, 0x5f, 0x5e, 0x21, 0x7b, 0x1b, 0x7e, 0xc5, 0x89, 0xb6, 0xb3, - 0x3f, 0xca, 0x18, 0xac, 0xbe, 0x8e, 0x83, 0x55, 0x97, 0x0b, 0xc8, 0x48, 0x37, 0xd7, 0x25, 0x08, - 0xc1, 0x61, 0xd3, 0xcd, 0xd9, 0x7f, 0x65, 0x01, 0xe2, 0xd5, 0x18, 0x82, 0x1b, 0x15, 0x87, 0x18, - 0x54, 0x3b, 0xe8, 0xe2, 0xad, 0x49, 0x61, 0xb0, 0x46, 0x75, 0x5f, 0xba, 0x27, 0xe1, 0x70, 0x51, - 0xec, 0xee, 0x70, 0x71, 0x88, 0x1e, 0xfd, 0xca, 0x20, 0x24, 0x5f, 0xf6, 0xa1, 0xeb, 0x30, 0x5a, - 0x73, 0x5a, 0xce, 0x4d, 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0x3b, 0x79, 0x63, 0x2d, 0x6a, 0x74, 0xc2, - 0x44, 0xae, 0x41, 0xb0, 0xc1, 0x07, 0xcd, 0x01, 0xb4, 0x02, 0x77, 0xd7, 0x6d, 0x90, 0x2d, 0xa6, - 0x76, 0x61, 0x51, 0x5b, 0xb8, 0x6b, 0x98, 0x84, 0x62, 0x8d, 0x22, 0x23, 0x54, 0x43, 0xf1, 0x01, - 0x87, 0x6a, 0x80, 0x23, 0x0b, 0xd5, 0xd0, 0x77, 0xa8, 0x50, 0x0d, 0x43, 0x87, 0x0e, 0xd5, 0xd0, - 0xdf, 0x53, 0xa8, 0x06, 0x0c, 0x27, 0xa4, 0xec, 0x49, 0xff, 0xaf, 0xb8, 0x0d, 0x22, 0x2e, 0x1c, - 0x3c, 0xd4, 0xcc, 0xec, 0xdd, 0xfd, 0xd2, 0x09, 0x9c, 0x49, 0x81, 0x73, 0x4a, 0xa2, 0x8f, 0xc1, - 0x8c, 0xd3, 0x68, 0xf8, 0xb7, 0xd4, 0xa0, 0x2e, 0x87, 0x35, 0xa7, 0xc1, 0x4d, 0x20, 0x83, 0x8c, - 0xeb, 0xa9, 0xbb, 0xfb, 0xa5, 0x99, 0xf9, 0x1c, 0x1a, 0x9c, 0x5b, 0x1a, 0xbd, 0x06, 0xc3, 0xad, - 0xc0, 0xaf, 0xad, 0x69, 0xcf, 0x8f, 0xcf, 0xd0, 0x0e, 0xac, 0x48, 0xe0, 0xc1, 0x7e, 0x69, 0x4c, - 0xfd, 0x61, 0x07, 0x7e, 0x5c, 0x20, 0x23, 0xf6, 0xc2, 0xc8, 0x83, 0x8e, 0xbd, 0x30, 0x7a, 0xbf, - 0x63, 0x2f, 0xec, 0xc0, 0x74, 0x95, 0x04, 0xae, 0xd3, 0x70, 0xef, 0x50, 0x99, 0x5c, 0xee, 0x81, - 0x1b, 0x30, 0x1c, 0x24, 0x76, 0xfd, 0x9e, 0x42, 0x2a, 0x6b, 0x59, 0xc5, 0xe4, 0x2e, 0x1f, 0x33, - 0xb2, 0xff, 0xab, 0x05, 0x83, 0xe2, 0xb5, 0xe0, 0x11, 0x48, 0xa6, 0xf3, 0x86, 0xe1, 0xa3, 0x94, - 0x3d, 0x28, 0xac, 0x31, 0xb9, 0x26, 0x8f, 0x72, 0xc2, 0xe4, 0xf1, 0x48, 0x27, 0x26, 0x9d, 0x8d, - 0x1d, 0xff, 0x67, 0x91, 0xde, 0x10, 0x8c, 0x77, 0xeb, 0x0f, 0xbe, 0x0b, 0xd6, 0x61, 0x30, 0x14, - 0xef, 0xa6, 0x0b, 0xf9, 0x2f, 0x4e, 0x92, 0x83, 0x18, 0x7b, 0xea, 0x89, 0x97, 0xd2, 0x92, 0x49, - 0xe6, 0x83, 0xec, 0xe2, 0x03, 0x7c, 0x90, 0xdd, 0xed, 0x65, 0x7f, 0xdf, 0xfd, 0x78, 0xd9, 0x6f, - 0x7f, 0x9d, 0x9d, 0xce, 0x3a, 0xfc, 0x08, 0x04, 0xb7, 0x4b, 0xe6, 0x39, 0x6e, 0x77, 0x98, 0x59, - 0xa2, 0x51, 0x39, 0x02, 0xdc, 0x2f, 0x59, 0x70, 0x3a, 0xe3, 0xab, 0x34, 0x69, 0xee, 0x19, 0x18, - 0x72, 0xda, 0x75, 0x57, 0xad, 0x65, 0xcd, 0xfc, 0x39, 0x2f, 0xe0, 0x58, 0x51, 0xa0, 0x45, 0x98, - 0x22, 0xb7, 0x5b, 0x2e, 0x37, 0x16, 0xeb, 0x0e, 0xce, 0x45, 0xfe, 0xc4, 0x74, 0x39, 0x89, 0xc4, - 0x69, 0x7a, 0x15, 0xe8, 0xaa, 0x98, 0x1b, 0xe8, 0xea, 0x17, 0x2c, 0x18, 0x51, 0x2f, 0x87, 0x1f, - 0x78, 0x6f, 0x7f, 0xd4, 0xec, 0xed, 0x87, 0x3b, 0xf4, 0x76, 0x4e, 0x37, 0xff, 0x51, 0x41, 0xb5, - 0xb7, 0xe2, 0x07, 0x51, 0x0f, 0x52, 0xe2, 0xbd, 0x3f, 0xce, 0xb8, 0x08, 0x23, 0x4e, 0xab, 0x25, - 0x11, 0xd2, 0xcb, 0x8e, 0x05, 0xc8, 0x8f, 0xc1, 0x58, 0xa7, 0x51, 0x6f, 0x45, 0x8a, 0xb9, 0x6f, - 0x45, 0xea, 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0xc2, 0x84, 0x53, 0x70, 0xfe, 0x7e, 0xd3, 0x8e, - 0xdc, 0xc6, 0x9c, 0xeb, 0x45, 0x61, 0x14, 0xcc, 0x95, 0xbd, 0xe8, 0x6a, 0xc0, 0xaf, 0xa9, 0x5a, - 0xa8, 0x38, 0xc5, 0x0b, 0x6b, 0x7c, 0x65, 0x94, 0x0c, 0x56, 0x47, 0xbf, 0xe9, 0xae, 0xb1, 0x2e, - 0xe0, 0x58, 0x51, 0xd8, 0x2f, 0xb1, 0xd3, 0x87, 0xf5, 0xe9, 0xe1, 0xc2, 0xa4, 0xfd, 0xf5, 0xa8, - 0x1a, 0x0d, 0x66, 0x78, 0x5d, 0xd2, 0x83, 0xb1, 0x75, 0xde, 0xec, 0x69, 0xc5, 0xfa, 0xab, 0xcb, - 0x38, 0x62, 0x1b, 0xfa, 0x44, 0xca, 0x05, 0xe7, 0xd9, 0x2e, 0xa7, 0xc6, 0x21, 0x9c, 0x6e, 0x58, - 0xb6, 0x2c, 0x96, 0x4b, 0xa8, 0x5c, 0x11, 0xeb, 0x42, 0xcb, 0x96, 0x25, 0x10, 0x38, 0xa6, 0xa1, - 0x02, 0x9b, 0xfa, 0x13, 0xce, 0xa0, 0x38, 0xa8, 0xb2, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, - 0xa5, 0x05, 0xb7, 0x3d, 0x3c, 0x9c, 0x50, 0x5a, 0xc8, 0xee, 0xd2, 0x34, 0x4d, 0x17, 0x61, 0x84, - 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, 0x7f, 0x1c, 0x07, 0x74, 0x39, 0x06, 0x63, 0x9d, - 0x06, 0x6d, 0xc0, 0x44, 0xc8, 0x75, 0x79, 0x2a, 0x94, 0x3f, 0xd7, 0x89, 0x3e, 0xa5, 0xde, 0x6c, - 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, 0x64, 0x91, 0x64, 0x81, 0x5e, 0x87, 0xf1, 0x86, - 0xef, 0xd4, 0x17, 0x9c, 0x86, 0xe3, 0xd5, 0x58, 0xff, 0x0c, 0x99, 0x39, 0xd7, 0x57, 0x0d, 0x2c, - 0x4e, 0x50, 0x53, 0x01, 0x51, 0x87, 0x88, 0x70, 0x73, 0x8e, 0xb7, 0x45, 0xc2, 0x99, 0x61, 0xf6, - 0x55, 0x4c, 0x40, 0x5c, 0xcd, 0xa1, 0xc1, 0xb9, 0xa5, 0xd1, 0xcb, 0x30, 0x2a, 0x3f, 0x5f, 0x0b, - 0xfc, 0x12, 0x3f, 0xbb, 0xd1, 0x70, 0xd8, 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, - 0x73, 0xd3, 0xad, 0x89, 0x68, 0x08, 0xfc, 0x89, 0xf2, 0x47, 0xe4, 0x7b, 0xc8, 0xe5, 0x2c, 0xa2, - 0x83, 0xfd, 0xd2, 0x29, 0xd1, 0x6b, 0x99, 0x78, 0x9c, 0xcd, 0x1b, 0xad, 0xc1, 0xf4, 0x36, 0x71, - 0x1a, 0xd1, 0xf6, 0xe2, 0x36, 0xa9, 0xed, 0xc8, 0x05, 0xc7, 0xa4, 0x46, 0xed, 0x79, 0xca, 0xe5, - 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x86, 0x99, 0x56, 0xfb, 0x66, 0xc3, 0x0d, 0xb7, 0xd7, 0xfd, - 0x88, 0x39, 0x3a, 0xcd, 0xd7, 0xeb, 0x01, 0x09, 0xf9, 0x0b, 0x56, 0x76, 0xf4, 0xca, 0x60, 0x3d, - 0x95, 0x1c, 0x3a, 0x9c, 0xcb, 0x01, 0xdd, 0x81, 0xe3, 0x89, 0x89, 0x20, 0xa2, 0x6e, 0x8c, 0xe7, - 0x27, 0xf2, 0xa9, 0x66, 0x15, 0x10, 0x01, 0x6c, 0xb2, 0x50, 0x38, 0xbb, 0x0a, 0xf4, 0x0a, 0x80, - 0xdb, 0x5a, 0x71, 0x9a, 0x6e, 0x83, 0x5e, 0x47, 0xa7, 0xd9, 0x1c, 0xa1, 0x57, 0x13, 0x28, 0x57, - 0x24, 0x94, 0xee, 0xcd, 0xe2, 0xdf, 0x1e, 0xd6, 0xa8, 0xd1, 0x2a, 0x8c, 0x8b, 0x7f, 0x7b, 0x62, - 0x48, 0xa7, 0x54, 0xce, 0xc7, 0x71, 0x59, 0x42, 0x8d, 0x63, 0x02, 0x82, 0x13, 0x65, 0xd1, 0x16, - 0x9c, 0x96, 0x09, 0x27, 0xf5, 0xf9, 0x29, 0xc7, 0x20, 0x64, 0xd9, 0x73, 0x86, 0xf8, 0xcb, 0x97, - 0xf9, 0x4e, 0x84, 0xb8, 0x33, 0x1f, 0x7a, 0xae, 0xeb, 0xd3, 0x9c, 0xbf, 0x6b, 0x3e, 0x1e, 0x47, - 0x4e, 0x5c, 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0xf9, 0x70, 0xdc, 0xf5, 0xb2, 0x66, 0xf5, 0x09, 0xc6, - 0xe8, 0xc3, 0xfc, 0x49, 0x77, 0xe7, 0x19, 0x9d, 0x89, 0xc7, 0xd9, 0x7c, 0x51, 0x19, 0xa6, 0x23, - 0x0e, 0x58, 0x72, 0x43, 0x9e, 0x9c, 0x83, 0x5e, 0xfb, 0x1e, 0xe2, 0x29, 0xf1, 0xe9, 0x6c, 0xde, - 0x48, 0xa3, 0x71, 0x56, 0x99, 0x77, 0xe7, 0xa6, 0xf8, 0x27, 0x16, 0x2d, 0xad, 0x09, 0xfa, 0xe8, - 0xd3, 0x30, 0xaa, 0xf7, 0x8f, 0x10, 0x5a, 0xce, 0x65, 0xcb, 0xc1, 0xda, 0xf6, 0xc2, 0xaf, 0x09, - 0x6a, 0x0b, 0xd1, 0x71, 0xd8, 0xe0, 0x88, 0x6a, 0x19, 0xa1, 0x18, 0x2e, 0xf4, 0x26, 0x14, 0xf5, - 0xee, 0xa5, 0x47, 0x20, 0x7b, 0xe5, 0xa0, 0x55, 0x18, 0xaa, 0x35, 0x5c, 0xe2, 0x45, 0xe5, 0x4a, - 0xa7, 0x80, 0x96, 0x8b, 0x82, 0x46, 0x2c, 0x45, 0x91, 0x53, 0x87, 0xc3, 0xb0, 0xe2, 0x60, 0xbf, - 0x0c, 0x23, 0xd5, 0x06, 0x21, 0x2d, 0xfe, 0xda, 0x08, 0x3d, 0xc9, 0x2e, 0x26, 0x4c, 0xb4, 0xb4, - 0x98, 0x68, 0xa9, 0xdf, 0x39, 0x98, 0x50, 0x29, 0xf1, 0xf6, 0xef, 0x14, 0xa0, 0xd4, 0x25, 0xb5, - 0x53, 0xc2, 0xde, 0x66, 0xf5, 0x64, 0x6f, 0x9b, 0x87, 0x89, 0xf8, 0x9f, 0xae, 0xca, 0x53, 0x2e, - 0xbb, 0xd7, 0x4d, 0x34, 0x4e, 0xd2, 0xf7, 0xfc, 0xfa, 0x42, 0x37, 0xd9, 0xf5, 0x75, 0x7d, 0x3f, - 0x64, 0x98, 0xea, 0xfb, 0x7b, 0xbf, 0x7b, 0xe7, 0x9a, 0x5d, 0xed, 0xaf, 0x17, 0xe0, 0xb8, 0xea, - 0xc2, 0xef, 0xdc, 0x8e, 0xbb, 0x96, 0xee, 0xb8, 0xfb, 0x60, 0xb4, 0xb6, 0xaf, 0xc2, 0x00, 0x8f, - 0xed, 0xd9, 0x83, 0xcc, 0xff, 0xa8, 0x19, 0x72, 0x5c, 0x89, 0x99, 0x46, 0xd8, 0xf1, 0x1f, 0xb2, - 0x60, 0x22, 0xf1, 0x8c, 0x0f, 0x61, 0xed, 0xad, 0xf7, 0xbd, 0xc8, 0xe5, 0x59, 0x12, 0xff, 0x59, - 0xe8, 0xdb, 0xf6, 0xc3, 0x28, 0xe9, 0xd1, 0x72, 0xd9, 0x0f, 0x23, 0xcc, 0x30, 0xf6, 0x9f, 0x59, - 0xd0, 0xbf, 0xe1, 0xb8, 0x5e, 0x24, 0xad, 0x1f, 0x56, 0x8e, 0xf5, 0xa3, 0x97, 0xef, 0x42, 0x2f, - 0xc2, 0x00, 0xd9, 0xdc, 0x24, 0xb5, 0x48, 0x8c, 0xaa, 0x8c, 0xf9, 0x30, 0xb0, 0xcc, 0xa0, 0x54, - 0x08, 0x65, 0x95, 0xf1, 0xbf, 0x58, 0x10, 0xa3, 0x1b, 0x30, 0x1c, 0xb9, 0x4d, 0x32, 0x5f, 0xaf, - 0x0b, 0x9f, 0x80, 0x7b, 0x08, 0x54, 0xb2, 0x21, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0xa5, 0x02, 0x40, - 0x1c, 0xb0, 0xac, 0xdb, 0x27, 0x2e, 0xa4, 0xac, 0xc5, 0xe7, 0x32, 0xac, 0xc5, 0x28, 0x66, 0x98, - 0x61, 0x2a, 0x56, 0xdd, 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x8b, 0x30, 0x15, 0x07, - 0x5c, 0x33, 0xe3, 0x4d, 0xb2, 0xf3, 0x7b, 0x23, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x59, 0x15, - 0x77, 0x4a, 0x9c, 0x85, 0xcc, 0xe1, 0x5d, 0xb7, 0xbe, 0x77, 0xe9, 0xa7, 0xd8, 0x1c, 0x5e, 0xc8, - 0x35, 0x87, 0xff, 0x94, 0x05, 0xc7, 0x92, 0xf5, 0xb0, 0xd7, 0xe1, 0x5f, 0xb0, 0xe0, 0x78, 0x9c, - 0xd9, 0x24, 0xed, 0x82, 0xf0, 0x42, 0xc7, 0x58, 0x5a, 0x39, 0x2d, 0x8e, 0x83, 0x8b, 0xac, 0x65, - 0xb1, 0xc6, 0xd9, 0x35, 0xda, 0xff, 0xa5, 0x0f, 0x66, 0xf2, 0x82, 0x70, 0xb1, 0xf7, 0x30, 0xce, - 0xed, 0xea, 0x0e, 0xb9, 0x25, 0x5e, 0x1d, 0xc4, 0xef, 0x61, 0x38, 0x18, 0x4b, 0x7c, 0x32, 0x99, - 0x4d, 0xa1, 0xc7, 0x64, 0x36, 0xdb, 0x30, 0x75, 0x6b, 0x9b, 0x78, 0xd7, 0xbc, 0xd0, 0x89, 0xdc, - 0x70, 0xd3, 0x65, 0x06, 0x74, 0x3e, 0x6f, 0x64, 0x42, 0xf6, 0xa9, 0x1b, 0x49, 0x82, 0x83, 0xfd, - 0xd2, 0x69, 0x03, 0x10, 0x37, 0x99, 0x6f, 0x24, 0x38, 0xcd, 0x34, 0x9d, 0x0b, 0xa8, 0xef, 0x01, - 0xe7, 0x02, 0x6a, 0xba, 0xc2, 0xed, 0x46, 0x3e, 0x76, 0x60, 0xd7, 0xd6, 0x35, 0x05, 0xc5, 0x1a, - 0x05, 0xfa, 0x24, 0x20, 0x3d, 0x99, 0x9b, 0x11, 0x03, 0xf5, 0xd9, 0xbb, 0xfb, 0x25, 0xb4, 0x9e, - 0xc2, 0x1e, 0xec, 0x97, 0xa6, 0x29, 0xb4, 0xec, 0xd1, 0xeb, 0x6f, 0x1c, 0x38, 0x2e, 0x83, 0x11, - 0xba, 0x01, 0x93, 0x14, 0xca, 0x56, 0x94, 0x0c, 0xb0, 0xca, 0xaf, 0xac, 0x4f, 0xdf, 0xdd, 0x2f, - 0x4d, 0xae, 0x27, 0x70, 0x79, 0xac, 0x53, 0x4c, 0x32, 0x52, 0x02, 0x0d, 0xf5, 0x9a, 0x12, 0xc8, - 0xfe, 0x82, 0x05, 0x27, 0xe9, 0x01, 0x57, 0x5f, 0xcd, 0xb1, 0xa2, 0x3b, 0x2d, 0x97, 0xdb, 0x69, - 0xc4, 0x51, 0xc3, 0x74, 0x75, 0x95, 0x32, 0xb7, 0xd2, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, - 0x9e, 0xdc, 0xe1, 0xaf, 0xb8, 0x5e, 0x1d, 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x6f, 0x2e, 0xbe, - 0x4a, 0xd7, 0x2a, 0x6d, 0xcb, 0xb7, 0xb4, 0x19, 0xe8, 0x69, 0xdd, 0xa6, 0x2a, 0xdc, 0x27, 0x73, - 0xed, 0xa9, 0x9f, 0xb7, 0x40, 0xbc, 0xd1, 0xee, 0xe1, 0x4c, 0x7e, 0x0b, 0x46, 0x77, 0xd3, 0x89, - 0x2e, 0xcf, 0xe6, 0x3f, 0x5a, 0x17, 0xe1, 0xeb, 0x95, 0x88, 0x6e, 0x24, 0xb5, 0x34, 0x78, 0xd9, - 0x75, 0x10, 0xd8, 0x25, 0xc2, 0xac, 0x1a, 0xdd, 0x5b, 0xf3, 0x1c, 0x40, 0x9d, 0xd1, 0xb2, 0xec, - 0xd7, 0x05, 0x53, 0xe2, 0x5a, 0x52, 0x18, 0xac, 0x51, 0xd9, 0x3f, 0x57, 0x84, 0x11, 0x99, 0x58, - 0xb1, 0xed, 0xf5, 0xa2, 0x7b, 0x3c, 0x54, 0xa6, 0x75, 0xf4, 0x36, 0x4c, 0x05, 0xa4, 0xd6, 0x0e, - 0x42, 0x77, 0x97, 0x48, 0xb4, 0x58, 0x24, 0x73, 0x3c, 0xa8, 0x7e, 0x02, 0x79, 0xc0, 0x02, 0x39, - 0x25, 0x80, 0xcc, 0x68, 0x9c, 0x66, 0x84, 0x2e, 0xc0, 0x30, 0x53, 0xbd, 0x57, 0x62, 0x85, 0xb0, - 0x52, 0x7c, 0xad, 0x49, 0x04, 0x8e, 0x69, 0xd8, 0xe5, 0xa0, 0x7d, 0x93, 0x91, 0x27, 0xde, 0x2b, - 0x57, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x4c, 0xf2, 0x72, 0x81, 0xdf, 0x72, 0xb6, 0xb8, 0x49, - 0xb0, 0x5f, 0x05, 0x81, 0x99, 0x5c, 0x4b, 0xe0, 0x0e, 0xf6, 0x4b, 0xc7, 0x92, 0x30, 0xd6, 0xec, - 0x14, 0x17, 0xe6, 0xf9, 0xc7, 0x2b, 0xa1, 0x67, 0x46, 0xca, 0x61, 0x30, 0x46, 0x61, 0x9d, 0xce, - 0xfe, 0x3b, 0x0b, 0xa6, 0xb4, 0xa1, 0xea, 0x39, 0xaf, 0x81, 0xd1, 0x49, 0x85, 0x1e, 0x3a, 0xe9, - 0x70, 0x31, 0x09, 0x32, 0x47, 0xb8, 0xef, 0x3e, 0x8d, 0xb0, 0xfd, 0x69, 0x40, 0xe9, 0xac, 0x9d, - 0xe8, 0x0d, 0xee, 0x2e, 0xef, 0x06, 0xa4, 0xde, 0xc9, 0xe0, 0xaf, 0xc7, 0x77, 0x91, 0xef, 0x2b, - 0x79, 0x29, 0xac, 0xca, 0xdb, 0x3f, 0xdc, 0x07, 0x93, 0xc9, 0x88, 0x12, 0xe8, 0x32, 0x0c, 0x70, - 0x29, 0x5d, 0xb0, 0xef, 0xe0, 0x4f, 0xa6, 0xc5, 0xa1, 0x60, 0xf2, 0x8a, 0x10, 0xf4, 0x45, 0x79, - 0xf4, 0x36, 0x8c, 0xd4, 0xfd, 0x5b, 0xde, 0x2d, 0x27, 0xa8, 0xcf, 0x57, 0xca, 0x62, 0x87, 0xc8, - 0x54, 0x40, 0x2d, 0xc5, 0x64, 0x7a, 0x6c, 0x0b, 0xe6, 0x3b, 0x11, 0xa3, 0xb0, 0xce, 0x0e, 0x6d, - 0xb0, 0x44, 0x34, 0x9b, 0xee, 0xd6, 0x9a, 0xd3, 0xea, 0xf4, 0x76, 0x6a, 0x51, 0x12, 0x69, 0x9c, - 0xc7, 0x44, 0xb6, 0x1a, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x59, 0x98, 0x0e, 0x73, 0x4c, 0x62, 0x79, - 0x49, 0x9c, 0x3b, 0x59, 0x89, 0xb8, 0x32, 0x25, 0xcb, 0x78, 0x96, 0x55, 0x0d, 0xba, 0x0d, 0x48, - 0xa8, 0x9e, 0x37, 0x82, 0x76, 0x18, 0x2d, 0xb4, 0xbd, 0x7a, 0x43, 0x26, 0xaa, 0xc9, 0x4e, 0xf3, - 0x9e, 0xa2, 0xd6, 0xea, 0x66, 0x11, 0x66, 0xd3, 0x14, 0x38, 0xa3, 0x0e, 0xfb, 0xf3, 0x7d, 0x30, - 0x2b, 0xd3, 0xe4, 0x66, 0xbc, 0x11, 0xf9, 0x9c, 0x95, 0x78, 0x24, 0xf2, 0x4a, 0xfe, 0x46, 0xff, - 0xc0, 0x9e, 0x8a, 0x7c, 0x31, 0xfd, 0x54, 0xe4, 0xb5, 0x43, 0x36, 0xe3, 0xbe, 0x3d, 0x18, 0xf9, - 0x8e, 0x7d, 0xe5, 0xf1, 0xe5, 0x63, 0x60, 0x1c, 0xcd, 0x08, 0xf3, 0xf0, 0xdd, 0x15, 0x69, 0x3a, - 0xca, 0xb9, 0xfe, 0x5f, 0x16, 0x34, 0xc6, 0x61, 0x3f, 0x2a, 0x83, 0x7c, 0xb3, 0x7d, 0x56, 0xf1, - 0xa1, 0x3c, 0x49, 0xb3, 0x15, 0xed, 0x2d, 0xb9, 0x81, 0x68, 0x71, 0x26, 0xcf, 0x65, 0x41, 0x93, - 0xe6, 0x29, 0x31, 0x58, 0xf1, 0x41, 0xbb, 0x30, 0xb5, 0x55, 0x23, 0x89, 0xcc, 0xf2, 0xc5, 0xfc, - 0x75, 0x7b, 0x69, 0x71, 0xb9, 0x43, 0x5a, 0x79, 0x76, 0xf9, 0x4b, 0x91, 0xe0, 0x74, 0x15, 0x2c, - 0xab, 0xbd, 0x73, 0x2b, 0x5c, 0x6e, 0x38, 0x61, 0xe4, 0xd6, 0x16, 0x1a, 0x7e, 0x6d, 0xa7, 0x1a, - 0xf9, 0x81, 0x4c, 0x6b, 0x97, 0x79, 0xf7, 0x9a, 0xbf, 0x51, 0x4d, 0xd1, 0xa7, 0xb3, 0xda, 0x67, - 0x51, 0xe1, 0xcc, 0xba, 0xd0, 0x3a, 0x0c, 0x6e, 0xb9, 0x11, 0x26, 0x2d, 0x5f, 0xec, 0x16, 0x99, - 0x5b, 0xe1, 0x25, 0x4e, 0x92, 0xce, 0x32, 0x2f, 0x10, 0x58, 0x32, 0x41, 0x6f, 0xa8, 0x43, 0x60, - 0x20, 0x5f, 0x01, 0x9b, 0xf6, 0xbd, 0xcb, 0x3c, 0x06, 0x5e, 0x87, 0xa2, 0xb7, 0x19, 0x76, 0x8a, - 0x18, 0xb3, 0xbe, 0x52, 0x4d, 0x67, 0x7f, 0x5f, 0x5f, 0xa9, 0x62, 0x5a, 0x90, 0x3d, 0x2e, 0x0d, - 0x6b, 0xa1, 0x2b, 0x12, 0xf4, 0x64, 0xbe, 0xb5, 0x2d, 0x57, 0x17, 0xab, 0xe5, 0x74, 0xc6, 0x7b, - 0x06, 0xc6, 0xbc, 0x38, 0xba, 0x0e, 0xc3, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0xaa, 0xec, 0xcc, - 0xc3, 0xe8, 0x92, 0x24, 0x4a, 0xe7, 0xb9, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, - 0x32, 0xd7, 0x38, 0x7b, 0x12, 0x26, 0xdc, 0xd4, 0x5e, 0xec, 0x25, 0xf9, 0x3b, 0x2b, 0x60, 0x54, - 0xc8, 0xcc, 0x2f, 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0x7a, 0xa7, 0x4c, 0x32, - 0x89, 0xf0, 0x39, 0xbc, 0xa3, 0xf1, 0xc2, 0x12, 0xa6, 0x05, 0xd1, 0x06, 0xc0, 0x66, 0x83, 0x88, - 0xb8, 0x84, 0xc2, 0x29, 0x2a, 0xf3, 0xf4, 0x5f, 0x51, 0x54, 0x32, 0x27, 0x15, 0x15, 0xb3, 0x63, - 0x28, 0xd6, 0xf8, 0xd0, 0xa9, 0x54, 0x73, 0xbd, 0x3a, 0x09, 0x98, 0x71, 0x2b, 0x67, 0x2a, 0x2d, - 0x32, 0x8a, 0xf4, 0x54, 0xe2, 0x70, 0x2c, 0x38, 0x30, 0x5e, 0xa4, 0xb5, 0xbd, 0x19, 0x76, 0x4a, - 0x8c, 0xb0, 0x48, 0x5a, 0xdb, 0x89, 0x09, 0xc5, 0x79, 0x31, 0x38, 0x16, 0x1c, 0xe8, 0x92, 0xd9, - 0xa4, 0x0b, 0x88, 0x04, 0x33, 0x13, 0xf9, 0x4b, 0x66, 0x85, 0x93, 0xa4, 0x97, 0x8c, 0x40, 0x60, - 0xc9, 0x04, 0x7d, 0xca, 0x94, 0x76, 0x26, 0x19, 0xcf, 0xa7, 0xbb, 0x48, 0x3b, 0x06, 0xdf, 0xce, - 0xf2, 0xce, 0x2b, 0x50, 0xd8, 0xac, 0x31, 0xa3, 0x58, 0x8e, 0xcd, 0x60, 0x65, 0xd1, 0xe0, 0xc6, - 0x02, 0x8d, 0xaf, 0x2c, 0xe2, 0xc2, 0x66, 0x8d, 0x4e, 0x7d, 0xe7, 0x4e, 0x3b, 0x20, 0x2b, 0x6e, - 0x83, 0x88, 0x24, 0x09, 0x99, 0x53, 0x7f, 0x5e, 0x12, 0xa5, 0xa7, 0xbe, 0x42, 0xe1, 0x98, 0x15, - 0xe5, 0x1b, 0xcb, 0x60, 0xd3, 0xf9, 0x7c, 0x95, 0xa8, 0x95, 0xe6, 0x9b, 0x29, 0x85, 0xed, 0xc0, - 0xd8, 0x6e, 0xd8, 0xda, 0x26, 0x72, 0x57, 0x64, 0xe6, 0xba, 0x9c, 0x78, 0x0a, 0xd7, 0x05, 0xa1, - 0x1b, 0x44, 0x6d, 0xa7, 0x91, 0xda, 0xc8, 0x99, 0x6a, 0xe5, 0xba, 0xce, 0x0c, 0x9b, 0xbc, 0xe9, - 0x44, 0x78, 0x87, 0x07, 0x3d, 0x63, 0x86, 0xbb, 0x9c, 0x89, 0x90, 0x11, 0x17, 0x8d, 0x4f, 0x04, - 0x81, 0xc0, 0x92, 0x89, 0xea, 0x6c, 0x76, 0x00, 0x9d, 0xe8, 0xd2, 0xd9, 0xa9, 0xf6, 0xc6, 0x9d, - 0xcd, 0x0e, 0x9c, 0x98, 0x15, 0x3b, 0x68, 0x5a, 0x19, 0x69, 0xd9, 0x99, 0xd9, 0x2e, 0xe7, 0xa0, - 0xe9, 0x96, 0xc6, 0x9d, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbf, - 0x4e, 0x24, 0x72, 0x78, 0x32, 0x27, 0xfc, 0x63, 0x3a, 0xc8, 0x1d, 0xff, 0x38, 0x85, 0xc2, 0x31, - 0x2b, 0x54, 0x87, 0xf1, 0x96, 0x11, 0x17, 0x95, 0x25, 0xa4, 0xc8, 0x91, 0x0b, 0xb2, 0x22, 0xa8, - 0x72, 0x0d, 0x91, 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x73, 0x8f, 0x3f, 0xf5, 0x63, 0xf9, 0x2a, 0x72, - 0x86, 0x3a, 0xe3, 0x35, 0x20, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x40, 0xcd, - 0x0f, 0x59, 0xda, 0x97, 0x3c, 0x03, 0x7b, 0x96, 0x99, 0x48, 0x06, 0x03, 0x17, 0x28, 0x1c, 0xb3, - 0xa2, 0x3b, 0x39, 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, - 0x2b, 0x8a, 0xa3, 0x4e, 0xc5, 0xae, 0x66, 0x29, 0x2b, 0x72, 0xda, 0xa5, 0x82, 0x5f, 0xa7, 0xdb, - 0xa5, 0x50, 0x38, 0x66, 0x65, 0xff, 0x70, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xed, 0xab, 0x12, - 0xfb, 0x1a, 0x25, 0x6c, 0x5f, 0x5c, 0x13, 0x13, 0x53, 0xf5, 0x1c, 0xce, 0xf6, 0x12, 0x4c, 0xa9, - 0x67, 0x84, 0x0d, 0xb7, 0xb6, 0xb7, 0x1e, 0x2b, 0xbf, 0x54, 0xe0, 0x97, 0x6a, 0x92, 0x00, 0xa7, - 0xcb, 0xa0, 0x79, 0x98, 0x30, 0x80, 0xe5, 0x25, 0x71, 0x6d, 0x8f, 0x93, 0x24, 0x98, 0x68, 0x9c, - 0xa4, 0xb7, 0x7f, 0xde, 0x82, 0x87, 0x72, 0xb2, 0x62, 0xf7, 0x1c, 0xad, 0x75, 0x13, 0x26, 0x5a, - 0x66, 0xd1, 0x2e, 0x01, 0xa6, 0x8d, 0xdc, 0xdb, 0xaa, 0xad, 0x09, 0x04, 0x4e, 0x32, 0xb5, 0x7f, - 0xb6, 0x00, 0xa7, 0x3b, 0xfa, 0xc5, 0x23, 0x0c, 0x27, 0xb6, 0x9a, 0xa1, 0xb3, 0x18, 0x90, 0x3a, - 0xf1, 0x22, 0xd7, 0x69, 0x54, 0x5b, 0xa4, 0xa6, 0x59, 0x2f, 0x99, 0x83, 0xf9, 0xa5, 0xb5, 0xea, - 0x7c, 0x9a, 0x02, 0xe7, 0x94, 0x44, 0x2b, 0x80, 0xd2, 0x18, 0x31, 0xc2, 0xec, 0x6a, 0x9a, 0xe6, - 0x87, 0x33, 0x4a, 0xa0, 0x97, 0x60, 0x4c, 0xf9, 0xdb, 0x6b, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, - 0xb0, 0x49, 0x87, 0x2e, 0xf2, 0xec, 0x39, 0x22, 0xcf, 0x92, 0x30, 0x75, 0x4e, 0xc8, 0xd4, 0x38, - 0x02, 0x8c, 0x75, 0x9a, 0x85, 0x97, 0x7f, 0xef, 0x9b, 0x67, 0x3e, 0xf0, 0x87, 0xdf, 0x3c, 0xf3, - 0x81, 0x3f, 0xfd, 0xe6, 0x99, 0x0f, 0x7c, 0xdf, 0xdd, 0x33, 0xd6, 0xef, 0xdd, 0x3d, 0x63, 0xfd, - 0xe1, 0xdd, 0x33, 0xd6, 0x9f, 0xde, 0x3d, 0x63, 0xfd, 0xfb, 0xbb, 0x67, 0xac, 0x2f, 0xfd, 0xc5, - 0x99, 0x0f, 0xbc, 0x85, 0xe2, 0xf8, 0xc7, 0x17, 0xe8, 0xe8, 0x5c, 0xd8, 0xbd, 0xf8, 0x3f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xfd, 0xca, 0x84, 0xba, 0xa5, 0x1e, 0x01, 0x00, + // 16114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9, + 0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee, + 0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74, + 0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32, + 0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08, + 0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e, + 0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82, + 0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd, + 0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0, + 0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c, + 0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38, + 0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6, + 0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb, + 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24, + 0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e, + 0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17, + 0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42, + 0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b, + 0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89, + 0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb, + 0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb, + 0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26, + 0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18, + 0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04, + 0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e, + 0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18, + 0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67, + 0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7, + 0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9, + 0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04, + 0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43, + 0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd, + 0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22, + 0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40, + 0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02, + 0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18, + 0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2, + 0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b, + 0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25, + 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77, + 0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a, + 0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b, + 0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15, + 0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa, + 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29, + 0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9, + 0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60, + 0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c, + 0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b, + 0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b, + 0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d, + 0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49, + 0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7, + 0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9, + 0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d, + 0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf, + 0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57, + 0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f, + 0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91, + 0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38, + 0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56, + 0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa, + 0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3, + 0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d, + 0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc, + 0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56, + 0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9, + 0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7, + 0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6, + 0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f, + 0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab, + 0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc, + 0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2, + 0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4, + 0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6, + 0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57, + 0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71, + 0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b, + 0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9, + 0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46, + 0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43, + 0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e, + 0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15, + 0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf, + 0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f, + 0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d, + 0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f, + 0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62, + 0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48, + 0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11, + 0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40, + 0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff, + 0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31, + 0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd, + 0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45, + 0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2, + 0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd, + 0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b, + 0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c, + 0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61, + 0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4, + 0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26, + 0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea, + 0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b, + 0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31, + 0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46, + 0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54, + 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18, + 0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1, + 0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f, + 0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3, + 0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58, + 0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73, + 0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc, + 0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f, + 0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d, + 0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19, + 0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70, + 0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5, + 0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06, + 0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12, + 0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47, + 0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c, + 0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d, + 0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8, + 0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b, + 0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39, + 0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18, + 0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f, + 0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58, + 0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99, + 0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d, + 0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b, + 0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7, + 0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37, + 0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76, + 0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43, + 0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75, + 0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca, + 0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6, + 0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2, + 0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c, + 0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1, + 0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f, + 0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2, + 0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c, + 0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa, + 0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99, + 0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6, + 0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29, + 0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab, + 0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52, + 0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca, + 0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca, + 0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25, + 0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4, + 0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c, + 0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12, + 0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75, + 0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e, + 0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14, + 0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a, + 0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab, + 0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c, + 0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85, + 0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83, + 0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70, + 0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59, + 0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58, + 0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5, + 0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96, + 0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09, + 0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb, + 0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda, + 0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88, + 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc, + 0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2, + 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75, + 0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19, + 0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5, + 0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf, + 0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d, + 0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9, + 0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49, + 0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb, + 0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4, + 0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, + 0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2, + 0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75, + 0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf, + 0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee, + 0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d, + 0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71, + 0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52, + 0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31, + 0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4, + 0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6, + 0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed, + 0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44, + 0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1, + 0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed, + 0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c, + 0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3, + 0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0, + 0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd, + 0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c, + 0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5, + 0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44, + 0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09, + 0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0, + 0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e, + 0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0, + 0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5, + 0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65, + 0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67, + 0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80, + 0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5, + 0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa, + 0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a, + 0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f, + 0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64, + 0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82, + 0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a, + 0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32, + 0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f, + 0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a, + 0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3, + 0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07, + 0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c, + 0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e, + 0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd, + 0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05, + 0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09, + 0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83, + 0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1, + 0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca, + 0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac, + 0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43, + 0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69, + 0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49, + 0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69, + 0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14, + 0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1, + 0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60, + 0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a, + 0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e, + 0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4, + 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c, + 0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7, + 0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa, + 0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d, + 0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55, + 0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8, + 0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20, + 0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4, + 0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37, + 0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d, + 0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4, + 0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68, + 0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05, + 0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07, + 0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff, + 0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d, + 0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28, + 0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8, + 0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c, + 0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e, + 0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d, + 0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d, + 0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46, + 0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0, + 0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1, + 0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09, + 0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b, + 0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99, + 0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb, + 0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee, + 0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28, + 0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66, + 0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63, + 0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02, + 0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21, + 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90, + 0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88, + 0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35, + 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30, + 0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8, + 0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7, + 0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79, + 0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5, + 0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18, + 0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72, + 0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e, + 0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8, + 0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31, + 0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88, + 0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39, + 0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3, + 0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5, + 0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31, + 0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58, + 0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a, + 0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e, + 0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5, + 0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc, + 0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab, + 0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb, + 0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9, + 0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa, + 0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05, + 0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f, + 0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda, + 0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11, + 0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01, + 0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae, + 0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a, + 0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b, + 0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee, + 0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76, + 0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d, + 0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7, + 0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8, + 0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab, + 0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5, + 0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c, + 0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda, + 0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e, + 0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e, + 0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e, + 0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07, + 0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5, + 0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91, + 0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9, + 0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a, + 0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb, + 0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00, + 0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30, + 0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5, + 0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74, + 0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15, + 0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb, + 0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc, + 0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85, + 0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2, + 0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22, + 0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06, + 0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93, + 0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47, + 0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4, + 0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69, + 0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c, + 0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e, + 0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5, + 0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1, + 0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b, + 0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f, + 0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, + 0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce, + 0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55, + 0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60, + 0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18, + 0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70, + 0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79, + 0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, + 0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43, + 0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4, + 0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57, + 0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e, + 0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8, + 0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3, + 0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98, + 0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed, + 0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72, + 0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f, + 0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07, + 0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9, + 0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09, + 0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d, + 0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71, + 0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e, + 0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76, + 0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf, + 0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82, + 0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3, + 0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53, + 0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72, + 0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd, + 0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f, + 0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f, + 0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d, + 0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82, + 0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, + 0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39, + 0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde, + 0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b, + 0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05, + 0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85, + 0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8, + 0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45, + 0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6, + 0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d, + 0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47, + 0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5, + 0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c, + 0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce, + 0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc, + 0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b, + 0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41, + 0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2, + 0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab, + 0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4, + 0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee, + 0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79, + 0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf, + 0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97, + 0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3, + 0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30, + 0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc, + 0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16, + 0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43, + 0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0, + 0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10, + 0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38, + 0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11, + 0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c, + 0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d, + 0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6, + 0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb, + 0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8, + 0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4, + 0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1, + 0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9, + 0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e, + 0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68, + 0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7, + 0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0, + 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a, + 0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78, + 0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3, + 0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96, + 0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31, + 0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59, + 0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4, + 0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7, + 0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8, + 0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a, + 0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c, + 0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76, + 0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1, + 0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9, + 0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12, + 0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61, + 0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79, + 0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b, + 0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7, + 0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5, + 0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69, + 0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1, + 0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4, + 0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe, + 0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79, + 0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44, + 0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9, + 0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f, + 0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29, + 0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78, + 0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28, + 0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb, + 0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c, + 0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16, + 0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30, + 0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d, + 0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74, + 0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2, + 0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff, + 0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39, + 0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47, + 0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77, + 0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1, + 0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb, + 0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae, + 0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe, + 0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6, + 0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0, + 0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b, + 0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6, + 0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54, + 0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac, + 0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55, + 0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62, + 0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b, + 0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3, + 0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49, + 0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4, + 0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b, + 0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82, + 0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82, + 0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f, + 0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50, + 0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1, + 0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5, + 0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1, + 0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f, + 0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03, + 0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39, + 0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01, + 0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c, + 0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb, + 0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58, + 0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6, + 0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90, + 0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3, + 0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d, + 0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04, + 0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd, + 0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53, + 0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a, + 0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc, + 0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09, + 0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75, + 0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d, + 0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50, + 0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44, + 0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce, + 0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6, + 0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05, + 0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb, + 0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd, + 0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92, + 0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a, + 0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a, + 0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64, + 0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83, + 0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe, + 0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f, + 0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3, + 0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f, + 0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb, + 0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06, + 0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9, + 0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89, + 0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f, + 0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9, + 0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4, + 0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, + 0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f, + 0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03, + 0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03, + 0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03, + 0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7, + 0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41, + 0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6, + 0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a, + 0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9, + 0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b, + 0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8, + 0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7, + 0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6, + 0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77, + 0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86, + 0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa, + 0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd, + 0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93, + 0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4, + 0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a, + 0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4, + 0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a, + 0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54, + 0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e, + 0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5, + 0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30, + 0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb, + 0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e, + 0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63, + 0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, + 0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c, + 0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb, + 0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0, + 0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85, + 0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60, + 0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9, + 0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c, + 0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f, + 0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad, + 0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf, + 0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53, + 0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d, + 0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39, + 0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f, + 0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5, + 0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8, + 0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a, + 0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b, + 0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4, + 0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba, + 0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14, + 0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36, + 0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b, + 0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05, + 0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d, + 0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1, + 0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23, + 0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15, + 0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d, + 0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d, + 0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4, + 0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70, + 0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7, + 0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39, + 0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22, + 0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e, + 0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09, + 0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82, + 0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1, + 0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30, + 0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56, + 0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c, + 0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40, + 0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37, + 0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6, + 0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6, + 0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8, + 0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae, + 0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d, + 0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10, + 0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18, + 0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3, + 0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb, + 0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c, + 0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc, + 0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e, + 0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62, + 0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98, + 0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d, + 0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47, + 0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76, + 0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8, + 0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75, + 0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93, + 0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, + 0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22, + 0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1, + 0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1, + 0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9, + 0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea, + 0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d, + 0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40, + 0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c, + 0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5, + 0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27, + 0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a, + 0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, + 0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3, + 0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c, + 0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60, + 0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c, + 0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f, + 0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9, + 0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9, + 0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad, + 0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc, + 0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a, + 0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75, + 0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0, + 0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35, + 0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43, + 0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62, + 0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d, + 0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40, + 0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed, + 0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8, + 0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24, + 0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49, + 0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53, + 0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f, + 0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49, + 0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37, + 0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea, + 0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c, + 0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a, + 0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe, + 0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d, + 0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6, + 0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0, + 0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19, + 0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e, + 0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d, + 0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c, + 0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92, + 0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51, + 0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7, + 0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4, + 0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a, + 0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09, + 0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b, + 0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66, + 0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a, + 0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, + 0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01, + 0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc, + 0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39, + 0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74, + 0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd, + 0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7, + 0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47, + 0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea, + 0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71, + 0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d, + 0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62, + 0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b, + 0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e, + 0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d, + 0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad, + 0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2, + 0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a, + 0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35, + 0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e, + 0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3, + 0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb, + 0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc, + 0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a, + 0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69, + 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef, + 0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd, + 0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02, + 0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50, + 0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa, + 0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87, + 0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e, + 0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c, + 0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29, + 0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57, + 0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc, + 0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b, + 0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44, + 0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0, + 0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f, + 0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d, + 0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b, + 0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05, + 0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d, + 0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11, + 0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d, + 0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01, + 0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13, + 0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69, + 0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38, + 0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1, + 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78, + 0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f, + 0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47, + 0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84, + 0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8, + 0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6, + 0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47, + 0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5, + 0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd, + 0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f, + 0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74, + 0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2, + 0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b, + 0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26, + 0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28, + 0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec, + 0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7, + 0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7, + 0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76, + 0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08, + 0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad, + 0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55, + 0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82, + 0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a, + 0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51, + 0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda, + 0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73, + 0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7, + 0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27, + 0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5, + 0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e, + 0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, + 0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35, + 0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59, + 0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84, + 0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e, + 0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70, + 0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71, + 0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68, + 0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26, + 0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a, + 0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90, + 0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46, + 0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6, + 0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67, + 0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27, + 0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03, + 0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10, + 0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9, + 0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93, + 0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40, + 0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b, + 0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83, + 0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75, + 0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67, + 0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73, + 0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb, + 0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d, + 0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf, + 0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae, + 0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c, + 0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71, + 0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc, + 0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02, + 0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83, + 0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a, + 0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba, + 0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a, + 0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c, + 0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc, + 0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b, + 0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2, + 0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3, + 0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf, + 0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d, + 0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40, + 0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71, + 0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64, + 0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72, + 0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f, + 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58, + 0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43, + 0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8, + 0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66, + 0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58, + 0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a, + 0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2, + 0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33, + 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0, + 0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1, + 0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60, + 0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c, + 0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e, + 0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e, + 0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08, + 0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42, + 0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03, + 0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6, + 0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1, + 0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb, + 0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32, + 0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3, + 0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc, + 0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e, + 0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda, + 0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6, + 0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d, + 0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09, + 0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6, + 0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58, + 0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c, + 0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd, + 0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a, + 0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a, + 0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15, + 0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e, + 0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3, + 0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a, + 0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23, + 0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf, + 0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11, + 0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc, + 0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98, + 0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f, + 0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3, + 0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8, + 0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb, + 0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98, + 0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21, + 0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce, + 0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46, + 0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75, + 0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44, + 0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5, + 0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39, + 0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb, + 0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9, + 0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9, + 0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a, + 0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07, + 0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9, + 0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5, + 0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5, + 0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29, + 0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff, + 0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5, + 0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d, + 0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda, + 0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4, + 0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99, + 0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c, + 0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84, + 0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e, + 0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34, + 0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46, + 0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6, + 0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c, + 0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c, + 0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a, + 0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6, + 0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe, + 0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7, + 0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4, + 0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5, + 0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b, + 0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc, + 0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3, + 0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6, + 0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb, + 0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d, + 0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27, + 0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac, + 0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8, + 0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94, + 0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59, + 0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae, + 0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf, + 0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b, + 0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f, + 0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf, + 0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71, + 0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e, + 0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2, + 0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3, + 0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3, + 0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e, + 0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c, + 0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e, + 0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a, + 0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d, + 0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3, + 0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8, + 0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38, + 0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61, + 0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd, + 0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1, + 0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59, + 0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5, + 0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38, + 0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7, + 0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5, + 0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a, + 0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb, + 0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d, + 0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89, + 0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f, + 0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5, + 0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99, + 0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16, + 0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14, + 0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81, + 0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e, + 0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15, + 0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59, + 0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f, + 0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2, + 0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9, + 0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, + 0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15, + 0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58, + 0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60, + 0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22, + 0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95, + 0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38, + 0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6, + 0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8, + 0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77, + 0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a, + 0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69, + 0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb, + 0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e, + 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08, + 0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e, + 0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47, + 0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85, + 0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6, + 0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59, + 0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c, + 0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00, + 0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35, + 0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9, + 0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c, + 0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34, + 0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0, + 0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31, + 0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8, + 0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78, + 0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3, + 0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea, + 0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89, + 0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a, + 0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34, + 0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb, + 0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0, + 0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6, + 0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f, + 0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff, + 0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3, + 0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24, + 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8494,43 +8665,6 @@ func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClaimSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClaimSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClaimSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResourceClaimTemplateName != nil { - i -= len(*m.ResourceClaimTemplateName) - copy(dAtA[i:], *m.ResourceClaimTemplateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) - i-- - dAtA[i] = 0x12 - } - if m.ResourceClaimName != nil { - i -= len(*m.ResourceClaimName) - copy(dAtA[i:], *m.ResourceClaimName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9753,6 +9887,32 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.AllocatedResourcesStatus) > 0 { + for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.VolumeMounts) > 0 { for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { { @@ -9872,6 +10032,41 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11972,6 +12167,39 @@ func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PullPolicy) + copy(dAtA[i:], m.PullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reference) + copy(dAtA[i:], m.Reference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12428,6 +12656,42 @@ func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x18 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.GID)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.UID)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13260,6 +13524,39 @@ func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeFeatures) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + i-- + if *m.SupplementalGroupsPolicy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13395,6 +13692,16 @@ func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if m.UserNamespaces != nil { + i-- + if *m.UserNamespaces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if m.RecursiveReadOnlyMounts != nil { i-- if *m.RecursiveReadOnlyMounts { @@ -13639,6 +13946,18 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Features != nil { + { + size, err := m.Features.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.RuntimeHandlers) > 0 { for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- { { @@ -15701,6 +16020,13 @@ func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Stream != nil { + i -= len(*m.Stream) + copy(dAtA[i:], *m.Stream) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Stream))) + i-- + dAtA[i] = 0x52 + } i-- if m.InsecureSkipTLSVerifyBackend { dAtA[i] = 1 @@ -15902,16 +16228,20 @@ func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceClaimTemplateName != nil { + i -= len(*m.ResourceClaimTemplateName) + copy(dAtA[i:], *m.ResourceClaimTemplateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName))) + i-- + dAtA[i] = 0x22 + } + if m.ResourceClaimName != nil { + i -= len(*m.ResourceClaimName) + copy(dAtA[i:], *m.ResourceClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName))) + i-- + dAtA[i] = 0x1a } - i-- - dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -16003,6 +16333,20 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SELinuxChangePolicy != nil { + i -= len(*m.SELinuxChangePolicy) + copy(dAtA[i:], *m.SELinuxChangePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SELinuxChangePolicy))) + i-- + dAtA[i] = 0x6a + } + if m.SupplementalGroupsPolicy != nil { + i -= len(*m.SupplementalGroupsPolicy) + copy(dAtA[i:], *m.SupplementalGroupsPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy))) + i-- + dAtA[i] = 0x62 + } if m.AppArmorProfile != nil { { size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i]) @@ -16162,6 +16506,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc2 + } if len(m.ResourceClaims) > 0 { for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { { @@ -17878,6 +18236,11 @@ func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -17929,6 +18292,39 @@ func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceHealth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x12 + i -= len(m.ResourceID) + copy(dAtA[i:], m.ResourceID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -18278,6 +18674,48 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResourceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20738,6 +21176,20 @@ func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 + } if m.Ephemeral != nil { { size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i]) @@ -21596,23 +22048,6 @@ func (m *CinderVolumeSource) Size() (n int) { return n } -func (m *ClaimSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResourceClaimName != nil { - l = len(*m.ResourceClaimName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceClaimTemplateName != nil { - l = len(*m.ResourceClaimTemplateName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *ClientIPConfig) Size() (n int) { if m == nil { return 0 @@ -22097,6 +22532,29 @@ func (m *ContainerStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllocatedResourcesStatus) > 0 { + for _, e := range m.AllocatedResourcesStatus { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -22881,6 +23339,19 @@ func (m *ISCSIVolumeSource) Size() (n int) { return n } +func (m *ImageVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *KeyToPath) Size() (n int) { if m == nil { return 0 @@ -23040,6 +23511,22 @@ func (m *LimitRangeSpec) Size() (n int) { return n } +func (m *LinuxContainerUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.UID)) + n += 1 + sovGenerated(uint64(m.GID)) + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + func (m *List) Size() (n int) { if m == nil { return 0 @@ -23346,6 +23833,18 @@ func (m *NodeDaemonEndpoints) Size() (n int) { return n } +func (m *NodeFeatures) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SupplementalGroupsPolicy != nil { + n += 2 + } + return n +} + func (m *NodeList) Size() (n int) { if m == nil { return 0 @@ -23398,6 +23897,9 @@ func (m *NodeRuntimeHandlerFeatures) Size() (n int) { if m.RecursiveReadOnlyMounts != nil { n += 2 } + if m.UserNamespaces != nil { + n += 2 + } return n } @@ -23558,6 +24060,10 @@ func (m *NodeStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Features != nil { + l = m.Features.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24263,6 +24769,10 @@ func (m *PodLogOptions) Size() (n int) { n += 1 + sovGenerated(uint64(*m.LimitBytes)) } n += 2 + if m.Stream != nil { + l = len(*m.Stream) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24321,8 +24831,14 @@ func (m *PodResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.ResourceClaimName != nil { + l = len(*m.ResourceClaimName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceClaimTemplateName != nil { + l = len(*m.ResourceClaimTemplateName) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24401,6 +24917,14 @@ func (m *PodSecurityContext) Size() (n int) { l = m.AppArmorProfile.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.SupplementalGroupsPolicy != nil { + l = len(*m.SupplementalGroupsPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SELinuxChangePolicy != nil { + l = len(*m.SELinuxChangePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -24575,6 +25099,10 @@ func (m *PodSpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if m.Resources != nil { + l = m.Resources.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -25040,6 +25568,8 @@ func (m *ResourceClaim) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -25058,6 +25588,19 @@ func (m *ResourceFieldSelector) Size() (n int) { return n } +func (m *ResourceHealth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Health) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ResourceQuota) Size() (n int) { if m == nil { return 0 @@ -25178,6 +25721,23 @@ func (m *ResourceRequirements) Size() (n int) { return n } +func (m *ResourceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *SELinuxOptions) Size() (n int) { if m == nil { return 0 @@ -26221,6 +26781,10 @@ func (m *VolumeSource) Size() (n int) { l = m.Ephemeral.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.Image != nil { + l = m.Image.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -26516,17 +27080,6 @@ func (this *CinderVolumeSource) String() string { }, "") return s } -func (this *ClaimSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClaimSource{`, - `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, - `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, - `}`, - }, "") - return s -} func (this *ClientIPConfig) String() string { if this == nil { return "nil" @@ -26874,6 +27427,11 @@ func (this *ContainerStatus) String() string { repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + "," } repeatedStringForVolumeMounts += "}" + repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{" + for _, f := range this.AllocatedResourcesStatus { + repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllocatedResourcesStatus += "}" keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources)) for k := range this.AllocatedResources { keysForAllocatedResources = append(keysForAllocatedResources, string(k)) @@ -26897,6 +27455,18 @@ func (this *ContainerStatus) String() string { `AllocatedResources:` + mapStringForAllocatedResources + `,`, `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`, + `AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`, + `}`, + }, "") + return s +} +func (this *ContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerUser{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`, `}`, }, "") return s @@ -27480,6 +28050,17 @@ func (this *ISCSIVolumeSource) String() string { }, "") return s } +func (this *ImageVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageVolumeSource{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`, + `}`, + }, "") + return s +} func (this *KeyToPath) String() string { if this == nil { return "nil" @@ -27623,6 +28204,18 @@ func (this *LimitRangeSpec) String() string { }, "") return s } +func (this *LinuxContainerUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerUser{`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `}`, + }, "") + return s +} func (this *List) String() string { if this == nil { return "nil" @@ -27871,6 +28464,16 @@ func (this *NodeDaemonEndpoints) String() string { }, "") return s } +func (this *NodeFeatures) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFeatures{`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `}`, + }, "") + return s +} func (this *NodeList) String() string { if this == nil { return "nil" @@ -27914,6 +28517,7 @@ func (this *NodeRuntimeHandlerFeatures) String() string { } s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`, `RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`, + `UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`, `}`, }, "") return s @@ -28049,6 +28653,7 @@ func (this *NodeStatus) String() string { `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, `RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`, + `Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`, `}`, }, "") return s @@ -28527,6 +29132,7 @@ func (this *PodLogOptions) String() string { `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `Stream:` + valueToStringGenerated(this.Stream) + `,`, `}`, }, "") return s @@ -28577,7 +29183,8 @@ func (this *PodResourceClaim) String() string { } s := strings.Join([]string{`&PodResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "ClaimSource", "ClaimSource", 1), `&`, ``, 1) + `,`, + `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`, + `ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`, `}`, }, "") return s @@ -28624,6 +29231,8 @@ func (this *PodSecurityContext) String() string { `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, `SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`, `AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`, + `SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`, + `SELinuxChangePolicy:` + valueToStringGenerated(this.SELinuxChangePolicy) + `,`, `}`, }, "") return s @@ -28757,6 +29366,7 @@ func (this *PodSpec) String() string { `HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`, `SchedulingGates:` + repeatedStringForSchedulingGates + `,`, `ResourceClaims:` + repeatedStringForResourceClaims + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`, `}`, }, "") return s @@ -29125,6 +29735,7 @@ func (this *ResourceClaim) String() string { } s := strings.Join([]string{`&ResourceClaim{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, `}`, }, "") return s @@ -29141,6 +29752,17 @@ func (this *ResourceFieldSelector) String() string { }, "") return s } +func (this *ResourceHealth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceHealth{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `Health:` + fmt.Sprintf("%v", this.Health) + `,`, + `}`, + }, "") + return s +} func (this *ResourceQuota) String() string { if this == nil { return "nil" @@ -29259,6 +29881,22 @@ func (this *ResourceRequirements) String() string { }, "") return s } +func (this *ResourceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]ResourceHealth{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&ResourceStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `}`, + }, "") + return s +} func (this *SELinuxOptions) String() string { if this == nil { return "nil" @@ -29967,6 +30605,7 @@ func (this *VolumeSource) String() string { `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`, `}`, }, "") return s @@ -32985,7 +33624,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClaimSource) Unmarshal(dAtA []byte) error { +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33008,50 +33647,17 @@ func (m *ClaimSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClaimSource: wiretype end group for non-group") + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClaimSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33061,25 +33667,12 @@ func (m *ClaimSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceClaimTemplateName = &s - iNdEx = postIndex + m.TimeoutSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33101,7 +33694,7 @@ func (m *ClaimSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { +func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33124,17 +33717,17 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33144,98 +33737,28 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37312,6 +37835,162 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &ContainerUser{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{}) + if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerUser{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44028,7 +44707,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *KeyToPath) Unmarshal(dAtA []byte) error { +func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44051,15 +44730,15 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44087,11 +44766,11 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Reference = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44119,28 +44798,8 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44162,7 +44821,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } return nil } -func (m *Lifecycle) Unmarshal(dAtA []byte) error { +func (m *KeyToPath) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -44185,17 +44844,17 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44205,33 +44864,29 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PostStart == nil { - m.PostStart = &LifecycleHandler{} - } - if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44241,28 +44896,166 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PreStop == nil { - m.PreStop = &LifecycleHandler{} + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &LifecycleHandler{} + } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &LifecycleHandler{} + } + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45522,6 +46315,170 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } return nil } +func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + m.UID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + m.GID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *List) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -48010,6 +48967,77 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } return nil } +func (m *NodeFeatures) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SupplementalGroupsPolicy = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NodeList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -48377,6 +49405,27 @@ func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.RecursiveReadOnlyMounts = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.UserNamespaces = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49637,6 +50686,42 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Features == nil { + m.Features = &NodeFeatures{} + } + if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55916,6 +57001,39 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } } m.InsecureSkipTLSVerifyBackend = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Stream = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -56370,11 +57488,11 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56384,24 +57502,57 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimName = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType) } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceClaimTemplateName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -57018,6 +58169,72 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex]) + m.SupplementalGroupsPolicy = &s + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxChangePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PodSELinuxChangePolicy(dAtA[iNdEx:postIndex]) + m.SELinuxChangePolicy = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -58507,6 +59724,42 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 40: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62585,7 +63838,121 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaim) Unmarshal(dAtA []byte) error { +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -62608,15 +63975,15 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62644,7 +64011,72 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -62667,7 +64099,7 @@ func (m *ResourceClaim) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { +func (m *ResourceHealth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -62690,15 +64122,15 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62726,11 +64158,11 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerName = string(dAtA[iNdEx:postIndex]) + m.ResourceID = ResourceID(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -62758,40 +64190,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -63977,6 +65376,122 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceHealth{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -73029,6 +74544,42 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageVolumeSource{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/constraint/vendor/k8s.io/api/core/v1/generated.proto b/constraint/vendor/k8s.io/api/core/v1/generated.proto index f3b47c722..08706987c 100644 --- a/constraint/vendor/k8s.io/api/core/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/core/v1/generated.proto @@ -126,20 +126,24 @@ message AzureDiskVolumeSource { // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) optional string cachingMode = 3; // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" optional string fsType = 4; // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false optional bool readOnly = 5; // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) optional string kind = 6; } @@ -177,18 +181,17 @@ message AzureFileVolumeSource { } // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. optional ObjectReference target = 2; } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver message CSIPersistentVolumeSource { // driver is the name of the driver to use for this volume. // Required. @@ -422,30 +425,6 @@ message CinderVolumeSource { optional LocalObjectReference secretRef = 4; } -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -message ClaimSource { - // ResourceClaimName is the name of a ResourceClaim object in the same - // namespace as this pod. - optional string resourceClaimName = 1; - - // ResourceClaimTemplateName is the name of a ResourceClaimTemplate - // object in the same namespace as this pod. - // - // The template will be used to create a new ResourceClaim, which will - // be bound to this pod. When this pod is deleted, the ResourceClaim - // will also be deleted. The pod name and resource name, along with a - // generated component, will be used to form a unique name for the - // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - // - // This field is immutable and no changes will be made to the - // corresponding ResourceClaim by the control plane after creating the - // ResourceClaim. - optional string resourceClaimTemplateName = 2; -} - // ClientIPConfig represents the configurations of Client IP based session affinity. message ClientIPConfig { // timeoutSeconds specifies the seconds of ClientIP type session sticky time. @@ -475,7 +454,7 @@ message ClusterTrustBundleProjection { // interpreted as "match nothing". If set but empty, interpreted as "match // everything". // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; // If true, don't block pod startup if the referenced ClusterTrustBundle(s) // aren't available. If using name, then the named ClusterTrustBundle is @@ -516,7 +495,7 @@ message ComponentStatus { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // List of component conditions observed // +optional @@ -533,7 +512,7 @@ message ComponentStatusList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ComponentStatus objects. repeated ComponentStatus items = 2; @@ -544,7 +523,7 @@ message ConfigMap { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the ConfigMap cannot // be updated (only object metadata can be modified). @@ -604,7 +583,7 @@ message ConfigMapKeySelector { message ConfigMapList { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of ConfigMaps. repeated ConfigMap items = 2; @@ -984,7 +963,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; } // ContainerStateTerminated is a terminated state of a container. @@ -1006,11 +985,11 @@ message ContainerStateTerminated { // Time at which previous execution of the container started // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; // Time at which the container last terminated // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; // Container's ID in the format '://' // +optional @@ -1091,9 +1070,9 @@ message ContainerStatus { // AllocatedResources represents the compute resources allocated for this container by the // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling + // +featureGate=InPlacePodVerticalScalingAllocatedStatus // +optional - map allocatedResources = 10; + map allocatedResources = 10; // Resources represents the compute resource requests and limits that have been successfully // enacted on the running container after it has been started or has been successfully resized. @@ -1109,6 +1088,29 @@ message ContainerStatus { // +listMapKey=mountPath // +featureGate=RecursiveReadOnlyMounts repeated VolumeMountStatus volumeMounts = 12; + + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + optional ContainerUser user = 13; + + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + repeated ResourceStatus allocatedResourcesStatus = 14; +} + +// ContainerUser represents user identity information +message ContainerUser { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + optional LinuxContainerUser linux = 1; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1188,7 +1190,7 @@ message EmptyDirVolumeSource { // The default is nil which means that the limit is undefined. // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; } // EndpointAddress is a tuple that describes single IP address. @@ -1300,7 +1302,7 @@ message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -1319,7 +1321,7 @@ message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; @@ -1622,7 +1624,7 @@ message EphemeralVolumeSource { message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. optional ObjectReference involvedObject = 2; @@ -1644,11 +1646,11 @@ message Event { // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; // The time at which the most recent occurrence of this event was recorded. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; // The number of times this event has occurred. // +optional @@ -1660,7 +1662,7 @@ message Event { // Time when this Event was first observed. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; // Data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -1688,7 +1690,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of events repeated Event items = 2; @@ -1701,7 +1703,7 @@ message EventSeries { optional int32 count = 1; // Time of the last occurrence observed - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } // EventSource contains information for an event. @@ -1867,6 +1869,7 @@ message GCEPersistentDiskVolumeSource { optional bool readOnly = 4; } +// GRPCAction specifies an action involving a GRPC service. message GRPCAction { // Port number of the gRPC service. Number must be in the range 1 to 65535. optional int32 port = 1; @@ -1954,7 +1957,7 @@ message HTTPGetAction { // Name or number of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // Host name to connect to, defaults to the pod IP. You probably want to set // "Host" in httpHeaders instead. @@ -1997,6 +2000,7 @@ message HostAlias { // HostIP represents a single IP address allocated to the host. message HostIP { // IP is the IP address assigned to the host + // +required optional string ip = 1; } @@ -2032,6 +2036,7 @@ message ISCSIPersistentVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -2089,6 +2094,7 @@ message ISCSIVolumeSource { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" optional string iscsiInterface = 4; // fsType is the filesystem type of the volume that you want to mount. @@ -2129,6 +2135,26 @@ message ISCSIVolumeSource { optional string initiatorName = 12; } +// ImageVolumeSource represents a image volume resource. +message ImageVolumeSource { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + optional string reference = 1; + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + optional string pullPolicy = 2; +} + // Maps a string key to a path within a volume. message KeyToPath { // key is the key to project. @@ -2177,21 +2203,21 @@ message Lifecycle { // LifecycleHandler defines a specific action that should be taken in a lifecycle // hook. One and only one of the fields, except TCPSocket must be specified. message LifecycleHandler { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional optional ExecAction exec = 1; - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional optional HTTPGetAction httpGet = 2; // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - // for the backward compatibility. There are no validation of this field and - // lifecycle hooks will fail in runtime when tcp handler is specified. + // for backward compatibility. There is no validation of this field and + // lifecycle hooks will fail at runtime when it is specified. // +optional optional TCPSocketAction tcpSocket = 3; - // Sleep represents the duration that the container should sleep before being terminated. + // Sleep represents a duration that the container should sleep. // +featureGate=PodLifecycleSleepAction // +optional optional SleepAction sleep = 4; @@ -2202,7 +2228,7 @@ message LimitRange { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2217,23 +2243,23 @@ message LimitRangeItem { // Max usage constraints on this kind by resource name. // +optional - map max = 2; + map max = 2; // Min usage constraints on this kind by resource name. // +optional - map min = 3; + map min = 3; // Default resource requirement limit value by resource name if resource limit is omitted. // +optional - map default = 4; + map default = 4; // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. // +optional - map defaultRequest = 5; + map defaultRequest = 5; // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. // +optional - map maxLimitRequestRatio = 6; + map maxLimitRequestRatio = 6; } // LimitRangeList is a list of LimitRange items. @@ -2241,7 +2267,7 @@ message LimitRangeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of LimitRange objects. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -2255,15 +2281,29 @@ message LimitRangeSpec { repeated LimitRangeItem limits = 1; } +// LinuxContainerUser represents user identity information in Linux containers +message LinuxContainerUser { + // UID is the primary uid initially attached to the first process in the container + optional int64 uid = 1; + + // GID is the primary gid initially attached to the first process in the container + optional int64 gid = 2; + + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + repeated int64 supplementalGroups = 3; +} + // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // LoadBalancerIngress represents the status of a load-balancer ingress point: @@ -2306,13 +2346,23 @@ message LoadBalancerStatus { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic message LocalObjectReference { // Name of the referent. // This field is effectively required, but due to backwards compatibility is // allowed to be empty. Instances of this type with an empty value here are // almost certainly wrong. - // TODO: Add other useful fields. apiVersion, kind, uid? // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional // +default="" @@ -2321,7 +2371,7 @@ message LocalObjectReference { optional string name = 1; } -// Local represents directly-attached storage with node affinity (Beta feature) +// Local represents directly-attached storage with node affinity message LocalVolumeSource { // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -2377,7 +2427,7 @@ message Namespace { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2398,12 +2448,15 @@ message NamespaceCondition { // Status of the condition, one of True, False, Unknown. optional string status = 2; + // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + // Unique, one-word, CamelCase reason for the condition's last transition. // +optional optional string reason = 5; + // Human-readable message indicating details about last transition. // +optional optional string message = 6; } @@ -2413,7 +2466,7 @@ message NamespaceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -2451,7 +2504,7 @@ message Node { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -2509,11 +2562,11 @@ message NodeCondition { // Last time we got an update on a given condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; // Last time the condition transit from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // (brief) reason for the condition's last transition. // +optional @@ -2588,12 +2641,21 @@ message NodeDaemonEndpoints { optional DaemonEndpoint kubeletEndpoint = 1; } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +message NodeFeatures { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + optional bool supplementalGroupsPolicy = 1; +} + // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -2618,12 +2680,17 @@ message NodeRuntimeHandler { optional NodeRuntimeHandlerFeatures features = 2; } -// NodeRuntimeHandlerFeatures is a set of runtime features. +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. message NodeRuntimeHandlerFeatures { // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. // +featureGate=RecursiveReadOnlyMounts // +optional optional bool recursiveReadOnlyMounts = 1; + + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + optional bool userNamespaces = 2; } // A node selector represents the union of the results of one or more label queries @@ -2713,14 +2780,14 @@ message NodeSpec { // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional - map capacity = 1; + map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. // +optional - map allocatable = 2; + map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase @@ -2729,7 +2796,7 @@ message NodeStatus { optional string phase = 3; // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // More info: https://kubernetes.io/docs/reference/node/node-status/#condition // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2739,7 +2806,7 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. @@ -2759,7 +2826,7 @@ message NodeStatus { optional NodeDaemonEndpoints daemonEndpoints = 6; // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // More info: https://kubernetes.io/docs/reference/node/node-status/#info // +optional optional NodeSystemInfo nodeInfo = 7; @@ -2784,9 +2851,15 @@ message NodeStatus { // The available runtime handlers. // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport // +optional // +listType=atomic repeated NodeRuntimeHandler runtimeHandlers = 12; + + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + optional NodeFeatures features = 13; } // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. @@ -2816,7 +2889,7 @@ message NodeSystemInfo { // Kubelet Version reported by the node. optional string kubeletVersion = 7; - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. optional string kubeProxyVersion = 8; // The Operating System reported by the node @@ -2904,7 +2977,7 @@ message PersistentVolume { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. @@ -2925,7 +2998,7 @@ message PersistentVolumeClaim { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2941,17 +3014,22 @@ message PersistentVolumeClaim { // PersistentVolumeClaimCondition contains details about state of pvc message PersistentVolumeClaimCondition { + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about optional string type = 1; + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required optional string status = 2; // lastProbeTime is the time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // lastTransitionTime is the time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // reason is a unique, this should be a short, machine understandable string that gives the reason // for condition's last transition. If it reports "Resizing" that means the underlying @@ -2969,7 +3047,7 @@ message PersistentVolumeClaimList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volume claims. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims @@ -2987,7 +3065,7 @@ message PersistentVolumeClaimSpec { // selector is a label query over volumes to consider for binding. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; // resources represents the minimum resources the volume should have. // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements @@ -3059,7 +3137,7 @@ message PersistentVolumeClaimSpec { // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 9; @@ -3079,7 +3157,7 @@ message PersistentVolumeClaimStatus { // capacity represents the actual resources of the underlying volume. // +optional - map capacity = 3; + map capacity = 3; // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being // resized then the Condition will be set to 'Resizing'. @@ -3114,7 +3192,7 @@ message PersistentVolumeClaimStatus { // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. // +featureGate=RecoverVolumeExpansionFailure // +optional - map allocatedResources = 5; + map allocatedResources = 5; // allocatedResourceStatuses stores status of resource being resized for the given PVC. // Key names follow standard Kubernetes label syntax. Valid values are either: @@ -3158,14 +3236,14 @@ message PersistentVolumeClaimStatus { // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string currentVolumeAttributesClassName = 8; // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional ModifyVolumeStatus modifyVolumeStatus = 9; @@ -3179,7 +3257,7 @@ message PersistentVolumeClaimTemplate { // validation. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The specification for the PersistentVolumeClaim. The entire content is // copied unchanged into the PVC that gets created from this @@ -3208,7 +3286,7 @@ message PersistentVolumeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of persistent volumes. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes @@ -3220,12 +3298,16 @@ message PersistentVolumeList { message PersistentVolumeSource { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; @@ -3240,6 +3322,7 @@ message PersistentVolumeSource { // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsPersistentVolumeSource glusterfs = 4; @@ -3250,6 +3333,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -3260,11 +3344,14 @@ message PersistentVolumeSource { optional ISCSIPersistentVolumeSource iscsi = 7; // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional optional CephFSPersistentVolumeSource cephfs = 9; @@ -3272,39 +3359,53 @@ message PersistentVolumeSource { // +optional optional FCVolumeSource fc = 10; - // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional optional FlockerVolumeSource flocker = 11; // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional optional FlexPersistentVolumeSource flexVolume = 12; // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional optional AzureFilePersistentVolumeSource azureFile = 13; - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional optional QuobyteVolumeSource quobyte = 15; // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional optional AzureDiskVolumeSource azureDisk = 16; - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional optional PortworxVolumeSource portworxVolume = 18; // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional optional ScaleIOPersistentVolumeSource scaleIO = 19; @@ -3312,12 +3413,13 @@ message PersistentVolumeSource { // +optional optional LocalVolumeSource local = 20; - // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // csi represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver. // +optional optional CSIPersistentVolumeSource csi = 22; } @@ -3327,7 +3429,7 @@ message PersistentVolumeSpec { // capacity is the description of the persistent volume's resources and capacity. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity // +optional - map capacity = 1; + map capacity = 1; // persistentVolumeSource is the actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; @@ -3382,7 +3484,7 @@ message PersistentVolumeSpec { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional optional string volumeAttributesClassName = 10; @@ -3406,10 +3508,8 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; } // Represents a Photon Controller persistent disk resource. @@ -3429,7 +3529,7 @@ message Pod { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -3482,7 +3582,7 @@ message PodAffinityTerm { // A label query over a set of resources, in this case pods. // If it's null, this PodAffinityTerm matches with no Pods. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; // namespaces specifies a static list of namespace names that the term applies to. // The term is applied to the union of the namespaces listed in this field @@ -3505,7 +3605,7 @@ message PodAffinityTerm { // null selector and null or empty namespaces list means "this pod's namespace". // An empty selector ({}) matches all namespaces. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; // MatchLabelKeys is a set of pod label keys to select which pods will // be taken into consideration. The keys are used to lookup values from the @@ -3515,7 +3615,8 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string matchLabelKeys = 5; @@ -3528,7 +3629,8 @@ message PodAffinityTerm { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional repeated string mismatchLabelKeys = 6; @@ -3607,11 +3709,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -3650,9 +3752,11 @@ message PodDNSConfig { // PodDNSConfigOption defines DNS resolver options of a pod. message PodDNSConfigOption { + // Name is this DNS resolver option's name. // Required. optional string name = 1; + // Value is this DNS resolver option's value. // +optional optional string value = 2; } @@ -3693,6 +3797,7 @@ message PodExecOptions { // PodIP represents a single IP address allocated to the pod. message PodIP { // IP is the IP address assigned to the pod + // +required optional string ip = 1; } @@ -3701,7 +3806,7 @@ message PodList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md @@ -3734,7 +3839,7 @@ message PodLogOptions { // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line // of log output. Defaults to false. @@ -3742,7 +3847,8 @@ message PodLogOptions { optional bool timestamps = 6; // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime + // logs are shown from the creation of the container or sinceSeconds or sinceTime. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". // +optional optional int64 tailLines = 7; @@ -3760,6 +3866,14 @@ message PodLogOptions { // the actual log data coming from the real kubelet). // +optional optional bool insecureSkipTLSVerifyBackend = 9; + + // Specify which container log stream to return to the client. + // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr + // are returned interleaved. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". + // +featureGate=PodLogsQuerySplitStreams + // +optional + optional string stream = 10; } // PodOS defines the OS parameters of a pod. @@ -3798,7 +3912,10 @@ message PodReadinessGate { optional string conditionType = 1; } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. message PodResourceClaim { @@ -3806,8 +3923,29 @@ message PodResourceClaim { // This must be a DNS_LABEL. optional string name = 1; - // Source describes where to find the ResourceClaim. - optional ClaimSource source = 2; + // ResourceClaimName is the name of a ResourceClaim object in the same + // namespace as this pod. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimName = 3; + + // ResourceClaimTemplateName is the name of a ResourceClaimTemplate + // object in the same namespace as this pod. + // + // The template will be used to create a new ResourceClaim, which will + // be bound to this pod. When this pod is deleted, the ResourceClaim + // will also be deleted. The pod name and resource name, along with a + // generated component, will be used to form a unique name for the + // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. + // + // This field is immutable and no changes will be made to the + // corresponding ResourceClaim by the control plane after creating the + // ResourceClaim. + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + optional string resourceClaimTemplateName = 4; } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3820,7 +3958,7 @@ message PodResourceClaimStatus { optional string name = 1; // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -3882,17 +4020,29 @@ message PodSecurityContext { // +optional optional bool runAsNonRoot = 3; - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional // +listType=atomic repeated int64 supplementalGroups = 4; + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + optional string supplementalGroupsPolicy = 12; + // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -3932,6 +4082,33 @@ message PodSecurityContext { // Note that this field cannot be set when spec.os.name is windows. // +optional optional AppArmorProfile appArmorProfile = 11; + + // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + // Valid values are "MountOption" and "Recursive". + // + // "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + // + // "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + // "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + // + // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + // and "Recursive" for all other volumes. + // + // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + // + // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + // Note that this field cannot be set when spec.os.name is windows. + // +featureGate=SELinuxChangePolicy + // +optional + optional string seLinuxChangePolicy = 13; } // Describes the class of pods that should avoid this node. @@ -3939,7 +4116,7 @@ message PodSecurityContext { message PodSignature { // Reference to controller whose pods should avoid this node. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; } // PodSpec is a description of a pod. @@ -4048,9 +4225,11 @@ message PodSpec { // +optional optional bool automountServiceAccountToken = 21; - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional optional string nodeName = 10; @@ -4189,7 +4368,7 @@ message PodSpec { // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md // +optional - map overhead = 32; + map overhead = 32; // TopologySpreadConstraints describes how a group of pods ought to spread across topology // domains. Scheduler will schedule pods in a way which abides by the constraints. @@ -4230,6 +4409,7 @@ message PodSpec { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile @@ -4286,6 +4466,21 @@ message PodSpec { // +featureGate=DynamicResourceAllocation // +optional repeated PodResourceClaim resourceClaims = 39; + + // Resources is the total amount of CPU and Memory resources required by all + // containers in the pod. It supports specifying Requests and Limits for + // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // + // This field enables fine-grained control over resource allocation for the + // entire pod, allowing resource sharing among containers in a pod. + // TODO: For beta graduation, expand this comment with a detailed explanation. + // + // This is an alpha field and requires enabling the PodLevelResources feature + // gate. + // + // +featureGate=PodLevelResources + // +optional + optional ResourceRequirements resources = 40; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -4375,16 +4570,28 @@ message PodStatus { // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; - // The list has one entry per init container in the manifest. The most recent successful + // Statuses of init containers in this pod. The most recent successful non-restartable // init container will have ready = true, the most recently started container will have // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // Each init container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status // +listType=atomic repeated ContainerStatus initContainerStatuses = 10; - // The list has one entry per container in the manifest. + // Statuses of containers in this pod. + // Each container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic @@ -4396,7 +4603,14 @@ message PodStatus { // +optional optional string qosClass = 9; - // Status for any ephemeral containers that have run in this pod. + // Statuses for any ephemeral containers that have run in this pod. + // Each ephemeral container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic repeated ContainerStatus ephemeralContainerStatuses = 13; @@ -4423,7 +4637,7 @@ message PodStatusResult { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. @@ -4439,7 +4653,7 @@ message PodTemplate { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4452,7 +4666,7 @@ message PodTemplateList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pod templates repeated PodTemplate items = 2; @@ -4463,7 +4677,7 @@ message PodTemplateSpec { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4471,6 +4685,7 @@ message PodTemplateSpec { optional PodSpec spec = 2; } +// PortStatus represents the error condition of a service port message PortStatus { // Port is the port number of the service port of which status is recorded here optional int32 port = 1; @@ -4525,7 +4740,7 @@ message PreferAvoidPodsEntry { // Time at which this entry was added to the list. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; // (brief) reason why this entry was added to the list. // +optional @@ -4595,26 +4810,27 @@ message Probe { // ProbeHandler defines a specific action that should be taken in a probe. // One and only one of the fields must be specified. message ProbeHandler { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional optional ExecAction exec = 1; - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional optional HTTPGetAction httpGet = 2; - // TCPSocket specifies an action involving a TCP port. + // TCPSocket specifies a connection to a TCP port. // +optional optional TCPSocketAction tcpSocket = 3; - // GRPC specifies an action involving a GRPC port. + // GRPC specifies a GRPC HealthCheckRequest. // +optional optional GRPCAction grpc = 4; } // Represents a projected volume source message ProjectedVolumeSource { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional // +listType=atomic repeated VolumeProjection sources = 1; @@ -4685,18 +4901,21 @@ message RBDPersistentVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4737,18 +4956,21 @@ message RBDVolumeSource { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" optional string pool = 4; // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" optional string user = 5; // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" optional string keyring = 6; // secretRef is name of the authentication secret for RBDUser. If provided @@ -4770,7 +4992,7 @@ message RangeAllocation { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. optional string range = 2; @@ -4785,7 +5007,7 @@ message ReplicationController { // be the same as the Pod(s) that the replication controller manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4811,7 +5033,7 @@ message ReplicationControllerCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -4827,7 +5049,7 @@ message ReplicationControllerList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of replication controllers. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -4904,6 +5126,13 @@ message ResourceClaim { // the Pod where this field is used. It makes that resource available // inside a container. optional string name = 1; + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + optional string request = 2; } // ResourceFieldSelector represents container resources (cpu, memory) and their output format @@ -4918,7 +5147,26 @@ message ResourceFieldSelector { // Specifies the output format of the exposed resources, defaults to "1" // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680. +message ResourceHealth { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + optional string resourceID = 1; + + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + optional string health = 2; } // ResourceQuota sets aggregate quota restrictions enforced per namespace @@ -4926,7 +5174,7 @@ message ResourceQuota { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -4944,7 +5192,7 @@ message ResourceQuotaList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ @@ -4956,7 +5204,7 @@ message ResourceQuotaSpec { // hard is the set of desired hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. @@ -4976,11 +5224,11 @@ message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ // +optional - map hard = 1; + map hard = 1; // Used is the current observed total usage of the resource in the namespace. // +optional - map used = 2; + map used = 2; } // ResourceRequirements describes the compute resource requirements. @@ -4988,14 +5236,14 @@ message ResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; // Claims lists the names of resources, defined in spec.resourceClaims, // that are used by this container. @@ -5012,6 +5260,23 @@ message ResourceRequirements { repeated ResourceClaim claims = 3; } +// ResourceStatus represents the status of a single resource allocated to a Pod. +message ResourceStatus { + // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. + // For DRA resources, the value must be "claim:/". + // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container. + // +required + optional string name = 1; + + // List of unique resources health. Each element in the list contains an unique resource ID and its health. + // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. + // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. + // See ResourceID type definition for a specific format it has in various use cases. + // +listType=map + // +listMapKey=resourceID + repeated ResourceHealth resources = 2; +} + // SELinuxOptions are the labels to be applied to the container message SELinuxOptions { // User is a SELinux user label that applies to the container. @@ -5058,6 +5323,7 @@ message ScaleIOPersistentVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -5069,6 +5335,7 @@ message ScaleIOPersistentVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" optional string fsType = 9; // readOnly defaults to false (read/write). ReadOnly here will force @@ -5104,6 +5371,7 @@ message ScaleIOVolumeSource { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" optional string storageMode = 7; // volumeName is the name of a volume already created in the ScaleIO system @@ -5115,6 +5383,7 @@ message ScaleIOVolumeSource { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" optional string fsType = 9; // readOnly Defaults to false (read/write). ReadOnly here will force @@ -5179,7 +5448,7 @@ message Secret { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the Secret cannot // be updated (only object metadata can be modified). @@ -5242,7 +5511,7 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5398,7 +5667,7 @@ message SecurityContext { optional bool allowPrivilegeEscalation = 7; // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -5433,7 +5702,7 @@ message Service { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -5456,10 +5725,12 @@ message ServiceAccount { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32. + // Prefer separate namespaces to isolate access to mounted secrets. // This field should not be used to find auto-generated service account token secrets for use outside of pods. // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5489,7 +5760,7 @@ message ServiceAccountList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ServiceAccounts. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ @@ -5527,7 +5798,7 @@ message ServiceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -5579,7 +5850,7 @@ message ServicePort { // omitted or set equal to the 'port' field. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is @@ -5845,7 +6116,7 @@ message ServiceSpec { // not set, the implementation will apply its default routing strategy. If set // to "PreferClose", implementations should prioritize endpoints that are // topologically close (e.g., same zone). - // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // This is a beta field and requires enabling ServiceTrafficDistribution feature. // +featureGate=ServiceTrafficDistribution // +optional optional string trafficDistribution = 23; @@ -5864,7 +6135,7 @@ message ServiceStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. @@ -5958,7 +6229,7 @@ message TCPSocketAction { // Number or name of the port to access on the container. // Number must be in the range 1 to 65535. // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; // Optional: Host name to connect to, defaults to the pod IP. // +optional @@ -5983,7 +6254,7 @@ message Taint { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; } // The pod this Toleration is attached to tolerates any taint that matches @@ -6107,7 +6378,7 @@ message TopologySpreadConstraint { // Pods that match this label selector are counted to determine the number of pods // in their corresponding topology domain. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; // MinDomains indicates a minimum number of eligible domains. // When the number of eligible domains with matching topology keys is less than minDomains, @@ -6172,6 +6443,20 @@ message TopologySpreadConstraint { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic message TypedLocalObjectReference { // APIGroup is the group for the resource being referenced. @@ -6187,6 +6472,7 @@ message TypedLocalObjectReference { optional string name = 3; } +// TypedObjectReference contains enough information to let you locate the typed referenced object message TypedObjectReference { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -6313,7 +6599,8 @@ message VolumeNodeAffinity { optional NodeSelector required = 1; } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. message VolumeProjection { // secret information about the secret data to project // +optional @@ -6355,14 +6642,14 @@ message VolumeResourceRequirements { // Limits describes the maximum amount of compute resources allowed. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map limits = 1; + map limits = 1; // Requests describes the minimum amount of compute resources required. // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, // otherwise to an implementation-defined value. Requests cannot exceed Limits. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ // +optional - map requests = 2; + map requests = 2; } // Represents the source of a volume to mount. @@ -6386,18 +6673,22 @@ message VolumeSource { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; // gitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional @@ -6420,6 +6711,7 @@ message VolumeSource { optional ISCSIVolumeSource iscsi = 8; // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -6431,25 +6723,31 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional optional FlexVolumeSource flexVolume = 12; // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional optional CephFSVolumeSource cephfs = 14; - // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional optional FlockerVolumeSource flocker = 15; @@ -6462,6 +6760,8 @@ message VolumeSource { optional FCVolumeSource fc = 17; // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional optional AzureFileVolumeSource azureFile = 18; @@ -6469,37 +6769,48 @@ message VolumeSource { // +optional optional ConfigMapVolumeSource configMap = 19; - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional optional QuobyteVolumeSource quobyte = 21; // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional optional AzureDiskVolumeSource azureDisk = 22; - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; // projected items for all in one resources secrets, configmaps, and downward API optional ProjectedVolumeSource projected = 26; - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional optional PortworxVolumeSource portworxVolume = 24; // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional optional ScaleIOVolumeSource scaleIO = 25; // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional optional StorageOSVolumeSource storageos = 27; - // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. // +optional optional CSIVolumeSource csi = 28; @@ -6530,6 +6841,24 @@ message VolumeSource { // // +optional optional EphemeralVolumeSource ephemeral = 29; + + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + optional ImageVolumeSource image = 30; } // Represents a vSphere volume resource. diff --git a/constraint/vendor/k8s.io/api/core/v1/objectreference.go b/constraint/vendor/k8s.io/api/core/v1/objectreference.go index ee5335ee8..609cadc7a 100644 --- a/constraint/vendor/k8s.io/api/core/v1/objectreference.go +++ b/constraint/vendor/k8s.io/api/core/v1/objectreference.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// SetGroupVersionKind allows clients to preemptively get a reference to an API object and pass it to places that // intend only to get a reference to that object. This simplifies the event recording interface. func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() diff --git a/constraint/vendor/k8s.io/api/core/v1/types.go b/constraint/vendor/k8s.io/api/core/v1/types.go index 328df9a7b..fb2c1c745 100644 --- a/constraint/vendor/k8s.io/api/core/v1/types.go +++ b/constraint/vendor/k8s.io/api/core/v1/types.go @@ -63,16 +63,20 @@ type VolumeSource struct { EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // gitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. // +optional @@ -91,6 +95,7 @@ type VolumeSource struct { // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` @@ -100,21 +105,27 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` - // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` // downwardAPI represents downward API about the pod that should populate this volume @@ -124,34 +135,47 @@ type VolumeSource struct { // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` // configMap represents a configMap that should populate this volume // +optional ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` // projected items for all in one resources secrets, configmaps, and downward API Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` - // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. // +optional CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` // ephemeral represents a volume that is handled by a cluster storage driver. @@ -181,6 +205,23 @@ type VolumeSource struct { // // +optional Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"` + // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. + // The volume is resolved at pod startup depending on which PullPolicy value is provided: + // + // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // + // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. + // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. + // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. + // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. + // The volume will be mounted read-only (ro) and non-executable files (noexec). + // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. + // +featureGate=ImageVolume + // +optional + Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -202,11 +243,15 @@ type PersistentVolumeClaimVolumeSource struct { type PersistentVolumeSource struct { // gcePersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk // +optional GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` // awsElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. + // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` @@ -219,6 +264,7 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. + // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. // More info: https://examples.k8s.io/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` @@ -227,6 +273,7 @@ type PersistentVolumeSource struct { // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. // More info: https://examples.k8s.io/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` @@ -235,50 +282,68 @@ type PersistentVolumeSource struct { // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // cinder represents a cinder volume attached and mounted on kubelets host machine. + // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + // are redirected to the cinder.csi.openstack.org CSI driver. // More info: https://examples.k8s.io/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` - // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. // +optional CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. // +optional FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` - // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. + // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. // +optional Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` // flexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. + // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. // +optional FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // azureFile represents an Azure File Service mount on the host and bind mount to the pod. + // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + // are redirected to the file.csi.azure.com CSI driver. // +optional AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` - // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + // are redirected to the csi.vsphere.vmware.com CSI driver. // +optional VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` - // quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. // +optional Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + // are redirected to the disk.csi.azure.com CSI driver. // +optional AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` - // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` - // portworxVolume represents a portworx volume attached and mounted on kubelets host machine + // portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + // is on. // +optional PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. // +optional ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` // local represents directly-attached storage with node affinity // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` - // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. + // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. // More info: https://examples.k8s.io/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // csi represents storage that is handled by an external CSI driver (Beta feature). + // csi represents storage that is handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -295,6 +360,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. @@ -371,7 +437,7 @@ type PersistentVolumeSpec struct { // after a volume has been updated successfully to a new class. // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` @@ -425,13 +491,12 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). - // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeList is a list of PersistentVolume items. type PersistentVolumeList struct { @@ -447,6 +512,7 @@ type PersistentVolumeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaim is a user's request for and claim to a persistent volume type PersistentVolumeClaim struct { @@ -469,6 +535,7 @@ type PersistentVolumeClaim struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. type PersistentVolumeClaimList struct { @@ -557,12 +624,13 @@ type PersistentVolumeClaimSpec struct { // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource // exists. // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). // +featureGate=VolumeAttributesClass // +optional VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` } +// TypedObjectReference contains enough information to let you locate the typed referenced object type TypedObjectReference struct { // APIGroup is the group for the resource being referenced. // If APIGroup is not specified, the specified Kind must be in the core API group. @@ -581,15 +649,29 @@ type TypedObjectReference struct { Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` } -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +// PersistentVolumeClaimConditionType defines the condition of PV claim. +// Valid values are: +// - "Resizing", "FileSystemResizePending" +// +// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected: +// - "ControllerResizeError", "NodeResizeError" +// +// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected: +// - "ModifyVolumeError", "ModifyingVolume" type PersistentVolumeClaimConditionType string +// These are valid conditions of PVC const ( // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" + // PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller + PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError" + // PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node. + PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError" + // Applying the target VolumeAttributesClass encountered an error PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" // Volume is being modified @@ -606,18 +688,19 @@ const ( // State set when resize controller starts resizing the volume in control-plane. PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" - // State set when resize has failed in resize controller with a terminal error. + // State set when resize has failed in resize controller with a terminal unrecoverable error. // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed" + PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible" // State set when resize controller has finished resizing the volume but further resizing of volume // is needed on the node. PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" // State set when kubelet starts resizing the volume. PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" - // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed - PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" + // State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors + // shouldn't set this status + PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible" ) // +enum @@ -654,8 +737,13 @@ type ModifyVolumeStatus struct { // PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about + Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // lastProbeTime is the time we probed the condition. // +optional LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -763,13 +851,13 @@ type PersistentVolumeClaimStatus struct { AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. + // This is a beta field and requires enabling VolumeAttributesClass feature (off by default). // +featureGate=VolumeAttributesClass // +optional ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` @@ -943,16 +1031,19 @@ type RBDVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -988,16 +1079,19 @@ type RBDPersistentVolumeSource struct { // Default is rbd. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="rbd" RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // user is the rados user name. // Default is admin. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="admin" RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it // +optional + // +default="/etc/ceph/keyring" Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // secretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. @@ -1426,6 +1520,7 @@ type ISCSIVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1473,6 +1568,7 @@ type ISCSIPersistentVolumeSource struct { // iscsiInterface is the interface Name that uses an iSCSI transport. // Defaults to 'default' (tcp). // +optional + // +default="default" ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // fsType is the filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -1613,17 +1709,21 @@ type AzureDiskVolumeSource struct { DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` // cachingMode is the Host Caching mode: None, Read Only, Read Write. // +optional + // +default=ref(AzureDataDiskCachingReadWrite) CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` // fsType is Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. // +optional + // +default="ext4" FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. // +optional + // +default=false ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + // +default=ref(AzureSharedBlobDisk) Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` } @@ -1662,6 +1762,7 @@ type ScaleIOVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1671,6 +1772,7 @@ type ScaleIOVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs". // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1699,6 +1801,7 @@ type ScaleIOPersistentVolumeSource struct { // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. // Default is ThinProvisioned. // +optional + // +default="ThinProvisioned" StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` // volumeName is the name of a volume already created in the ScaleIO system // that is associated with this volume source. @@ -1708,6 +1811,7 @@ type ScaleIOPersistentVolumeSource struct { // Ex. "ext4", "xfs", "ntfs". // Default is "xfs" // +optional + // +default="xfs" FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` // readOnly defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. @@ -1891,7 +1995,8 @@ type ClusterTrustBundleProjection struct { // Represents a projected volume source type ProjectedVolumeSource struct { - // sources is the list of volume projections + // sources is the list of volume projections. Each entry in this list + // handles one source. // +optional // +listType=atomic Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` @@ -1905,10 +2010,9 @@ type ProjectedVolumeSource struct { DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` } -// Projection that may be projected along with other supported volume types +// Projection that may be projected along with other supported volume types. +// Exactly one of these fields must be set. type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - // secret information about the secret data to project // +optional Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` @@ -1965,7 +2069,7 @@ type KeyToPath struct { Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` } -// Local represents directly-attached storage with node affinity (Beta feature) +// Local represents directly-attached storage with node affinity type LocalVolumeSource struct { // path of the full path to the volume on the node. // It can be either a directory or block device (disk, partition, ...). @@ -1979,7 +2083,7 @@ type LocalVolumeSource struct { FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` } -// Represents storage that is managed by an external CSI volume driver (Beta feature) +// Represents storage that is managed by an external CSI volume driver type CSIPersistentVolumeSource struct { // driver is the name of the driver to use for this volume. // Required. @@ -2426,6 +2530,7 @@ type TCPSocketAction struct { Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` } +// GRPCAction specifies an action involving a GRPC service. type GRPCAction struct { // Port number of the gRPC service. Number must be in the range 1 to 65535. Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"` @@ -2631,6 +2736,13 @@ type ResourceClaim struct { // the Pod where this field is used. It makes that resource available // inside a container. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Request is the name chosen for a request in the referenced claim. + // If empty, everything from the claim is made available, otherwise + // only the result of this request. + // + // +optional + Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"` } const ( @@ -2834,17 +2946,16 @@ type Container struct { // ProbeHandler defines a specific action that should be taken in a probe. // One and only one of the fields must be specified. type ProbeHandler struct { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` - // TCPSocket specifies an action involving a TCP port. + // TCPSocket specifies a connection to a TCP port. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` - - // GRPC specifies an action involving a GRPC port. + // GRPC specifies a GRPC HealthCheckRequest. // +optional GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"` } @@ -2852,18 +2963,18 @@ type ProbeHandler struct { // LifecycleHandler defines a specific action that should be taken in a lifecycle // hook. One and only one of the fields, except TCPSocket must be specified. type LifecycleHandler struct { - // Exec specifies the action to take. + // Exec specifies a command to execute in the container. // +optional Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. + // HTTPGet specifies an HTTP GET request to perform. // +optional HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - // for the backward compatibility. There are no validation of this field and - // lifecycle hooks will fail in runtime when tcp handler is specified. + // for backward compatibility. There is no validation of this field and + // lifecycle hooks will fail at runtime when it is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` - // Sleep represents the duration that the container should sleep before being terminated. + // Sleep represents a duration that the container should sleep. // +featureGate=PodLifecycleSleepAction // +optional Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` @@ -3014,7 +3125,7 @@ type ContainerStatus struct { // AllocatedResources represents the compute resources allocated for this container by the // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling + // +featureGate=InPlacePodVerticalScalingAllocatedStatus // +optional AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"` // Resources represents the compute resource requests and limits that have been successfully @@ -3030,6 +3141,96 @@ type ContainerStatus struct { // +listMapKey=mountPath // +featureGate=RecursiveReadOnlyMounts VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"` + // User represents user identity information initially attached to the first process of the container + // +featureGate=SupplementalGroupsPolicy + // +optional + User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"` + // AllocatedResourcesStatus represents the status of various resources + // allocated for this Pod. + // +featureGate=ResourceHealthStatus + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"` +} + +// ResourceStatus represents the status of a single resource allocated to a Pod. +type ResourceStatus struct { + // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. + // For DRA resources, the value must be "claim:/". + // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container. + // +required + Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"` + // List of unique resources health. Each element in the list contains an unique resource ID and its health. + // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. + // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. + // See ResourceID type definition for a specific format it has in various use cases. + // +listType=map + // +listMapKey=resourceID + Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` +} + +type ResourceHealthStatus string + +const ( + ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy" + ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy" + ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown" +) + +// ResourceID is calculated based on the source of this resource health information. +// For DevicePlugin: +// +// DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73 +// +// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node. +// For DRA: +// +// //: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster. +type ResourceID string + +// ResourceHealth represents the health of a resource. It has the latest device health information. +// This is a part of KEP https://kep.k8s.io/4680. +type ResourceHealth struct { + // ResourceID is the unique identifier of the resource. See the ResourceID type for more information. + ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"` + // Health of the resource. + // can be one of: + // - Healthy: operates as normal + // - Unhealthy: reported unhealthy. We consider this a temporary health issue + // since we do not have a mechanism today to distinguish + // temporary and permanent issues. + // - Unknown: The status cannot be determined. + // For example, Device Plugin got unregistered and hasn't been re-registered since. + // + // In future we may want to introduce the PermanentlyUnhealthy Status. + Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"` +} + +// ContainerUser represents user identity information +type ContainerUser struct { + // Linux holds user identity information initially attached to the first process of the containers in Linux. + // Note that the actual running identity can be changed if the process has enough privilege to do so. + // +optional + Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"` + + // Windows holds user identity information initially attached to the first process of the containers in Windows + // This is just reserved for future use. + // Windows *WindowsContainerUser +} + +// LinuxContainerUser represents user identity information in Linux containers +type LinuxContainerUser struct { + // UID is the primary uid initially attached to the first process in the container + UID int64 `json:"uid" protobuf:"varint,1,name=uid"` + // GID is the primary gid initially attached to the first process in the container + GID int64 `json:"gid" protobuf:"varint,2,name=gid"` + // SupplementalGroups are the supplemental groups initially attached to the first process in the container + // +optional + // +listType=atomic + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"` } // PodPhase is a label for the condition of a pod at the current time. @@ -3093,7 +3294,7 @@ const ( // during scheduling, for example due to nodeAffinity parsing errors. PodReasonSchedulerError = "SchedulerError" - // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // PodReasonTerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination // is initiated by kubelet PodReasonTerminationByKubelet = "TerminationByKubelet" @@ -3426,7 +3627,8 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both matchLabelKeys and labelSelector. // Also, matchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` @@ -3438,7 +3640,8 @@ type PodAffinityTerm struct { // pod labels will be ignored. The default value is empty. // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. // Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + // // +listType=atomic // +optional MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` @@ -3667,9 +3870,11 @@ type PodSpec struct { // +optional AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename // +optional NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. @@ -3826,6 +4031,7 @@ type PodSpec struct { // - spec.securityContext.runAsUser // - spec.securityContext.runAsGroup // - spec.securityContext.supplementalGroups + // - spec.securityContext.supplementalGroupsPolicy // - spec.containers[*].securityContext.appArmorProfile // - spec.containers[*].securityContext.seLinuxOptions // - spec.containers[*].securityContext.seccompProfile @@ -3881,9 +4087,26 @@ type PodSpec struct { // +featureGate=DynamicResourceAllocation // +optional ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"` + // Resources is the total amount of CPU and Memory resources required by all + // containers in the pod. It supports specifying Requests and Limits for + // "cpu" and "memory" resource names only. ResourceClaims are not supported. + // + // This field enables fine-grained control over resource allocation for the + // entire pod, allowing resource sharing among containers in a pod. + // TODO: For beta graduation, expand this comment with a detailed explanation. + // + // This is an alpha field and requires enabling the PodLevelResources feature + // gate. + // + // +featureGate=PodLevelResources + // +optional + Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"` } -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. +// PodResourceClaim references exactly one ResourceClaim, either directly +// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim +// for the pod. +// // It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. // Containers that need access to the ResourceClaim reference it with this name. type PodResourceClaim struct { @@ -3891,18 +4114,17 @@ type PodResourceClaim struct { // This must be a DNS_LABEL. Name string `json:"name" protobuf:"bytes,1,name=name"` - // Source describes where to find the ResourceClaim. - Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -} + // Source is tombstoned since Kubernetes 1.31 where it got replaced with + // the inlined fields below. + // + // Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"` -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -type ClaimSource struct { // ResourceClaimName is the name of a ResourceClaim object in the same // namespace as this pod. - ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,1,opt,name=resourceClaimName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"` // ResourceClaimTemplateName is the name of a ResourceClaimTemplate // object in the same namespace as this pod. @@ -3916,7 +4138,10 @@ type ClaimSource struct { // This field is immutable and no changes will be made to the // corresponding ResourceClaim by the control plane after creating the // ResourceClaim. - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"` + // + // Exactly one of ResourceClaimName and ResourceClaimTemplateName must + // be set. + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"` } // PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim @@ -3929,7 +4154,7 @@ type PodResourceClaimStatus struct { Name string `json:"name" protobuf:"bytes,1,name=name"` // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is + // generated for the Pod in the namespace of the Pod. If this is // unset, then generating a ResourceClaim was not necessary. The // pod.spec.resourceClaims entry can be ignored in this case. // @@ -4137,6 +4362,39 @@ const ( FSGroupChangeAlways PodFSGroupChangePolicy = "Always" ) +// SupplementalGroupsPolicy defines how supplemental groups +// of the first container processes are calculated. +// +enum +type SupplementalGroupsPolicy string + +const ( + // SupplementalGroupsPolicyMerge means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // merged with the primary user's groups as defined in the container image + // (in /etc/group). + SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge" + // SupplementalGroupsPolicyStrict means that the container's provided + // SupplementalGroups and FsGroup (specified in SecurityContext) will be + // used instead of any groups defined in the container image. + SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict" +) + +// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. +type PodSELinuxChangePolicy string + +const ( + // Recursive relabeling of all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive" + // MountOption mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption" +) + // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -4179,16 +4437,27 @@ type PodSecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. + // A list of groups applied to the first process run in each container, in + // addition to the container's primary GID and fsGroup (if specified). If + // the SupplementalGroupsPolicy feature is enabled, the + // supplementalGroupsPolicy field determines whether these are in addition + // to or instead of any group memberships defined in the container image. + // If unspecified, no additional groups are added, though group memberships + // defined in the container image may still be used, depending on the + // supplementalGroupsPolicy field. // Note that this field cannot be set when spec.os.name is windows. // +optional // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // Defines how supplemental groups of the first container processes are calculated. + // Valid values are "Merge" and "Strict". If not specified, "Merge" is used. + // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled + // and the container runtime must implement support for this feature. + // Note that this field cannot be set when spec.os.name is windows. + // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34 + // +featureGate=SupplementalGroupsPolicy + // +optional + SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: @@ -4224,6 +4493,32 @@ type PodSecurityContext struct { // Note that this field cannot be set when spec.os.name is windows. // +optional AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"` + // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. + // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. + // Valid values are "MountOption" and "Recursive". + // + // "Recursive" means relabeling of all files on all Pod volumes by the container runtime. + // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node. + // + // "MountOption" mounts all eligible Pod volumes with `-o context` mount option. + // This requires all Pods that share the same volume to use the same SELinux label. + // It is not possible to share the same volume among privileged and unprivileged Pods. + // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes + // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their + // CSIDriver instance. Other volumes are always re-labelled recursively. + // "MountOption" value is allowed only when SELinuxMount feature gate is enabled. + // + // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used. + // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes + // and "Recursive" for all other volumes. + // + // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers. + // + // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. + // Note that this field cannot be set when spec.os.name is windows. + // +featureGate=SELinuxChangePolicy + // +optional + SELinuxChangePolicy *PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty" protobuf:"bytes,13,opt,name=seLinuxChangePolicy"` } // SeccompProfile defines a pod/container's seccomp profile settings. @@ -4331,8 +4626,10 @@ type PodDNSConfig struct { // PodDNSConfigOption defines DNS resolver options of a pod. type PodDNSConfigOption struct { + // Name is this DNS resolver option's name. // Required. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Value is this DNS resolver option's value. // +optional Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } @@ -4340,13 +4637,15 @@ type PodDNSConfigOption struct { // PodIP represents a single IP address allocated to the pod. type PodIP struct { // IP is the IP address assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // HostIP represents a single IP address allocated to the host. type HostIP struct { // IP is the IP address assigned to the host - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // EphemeralContainerCommon is a copy of all fields in Container to be inlined in @@ -4623,24 +4922,45 @@ type PodStatus struct { // +optional StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` - // The list has one entry per init container in the manifest. The most recent successful + // Statuses of init containers in this pod. The most recent successful non-restartable // init container will have ready = true, the most recently started container will have // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // Each init container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status // +listType=atomic InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` - // The list has one entry per container in the manifest. + // Statuses of containers in this pod. + // Each container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` - // Status for any ephemeral containers that have run in this pod. + + // Statuses for any ephemeral containers that have run in this pod. + // Each ephemeral container in the pod should have at most one status in this list, + // and all statuses should be for containers in the pod. + // However this is not enforced. + // If a status for a non-existent container is present in the list, or the list has duplicate names, + // the behavior of various Kubernetes components is not defined and those statuses might be + // ignored. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional // +listType=atomic EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` @@ -4663,6 +4983,7 @@ type PodStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded type PodStatusResult struct { @@ -4682,7 +5003,9 @@ type PodStatusResult struct { // +genclient // +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers +// +genclient:method=UpdateResize,verb=update,subresource=resize // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Pod is a collection of containers that can run on a host. This resource is created // by clients and scheduled onto hosts. @@ -4708,6 +5031,7 @@ type Pod struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodList is a list of Pods. type PodList struct { @@ -4737,6 +5061,7 @@ type PodTemplateSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplate describes a template for creating copies of a predefined pod. type PodTemplate struct { @@ -4753,6 +5078,7 @@ type PodTemplate struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodTemplateList is a list of PodTemplates. type PodTemplateList struct { @@ -4867,6 +5193,7 @@ type ReplicationControllerCondition struct { // +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationController represents the configuration of a replication controller. type ReplicationController struct { @@ -4893,6 +5220,7 @@ type ReplicationController struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ReplicationControllerList is a collection of replication controllers. type ReplicationControllerList struct { @@ -5367,7 +5695,7 @@ type ServiceSpec struct { // not set, the implementation will apply its default routing strategy. If set // to "PreferClose", implementations should prioritize endpoints that are // topologically close (e.g., same zone). - // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // This is a beta field and requires enabling ServiceTrafficDistribution feature. // +featureGate=ServiceTrafficDistribution // +optional TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` @@ -5437,6 +5765,7 @@ type ServicePort struct { // +genclient // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Service is a named abstraction of software service (for example, mysql) consisting of local port // (for example 3306) that the proxy listens on, and the selector that determines which pods @@ -5468,6 +5797,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceList holds a list of services. type ServiceList struct { @@ -5484,6 +5814,7 @@ type ServiceList struct { // +genclient // +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccount binds together: // * a name, understood by users, and perhaps by peripheral systems, for an identity @@ -5498,6 +5829,8 @@ type ServiceAccount struct { // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". + // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32. + // Prefer separate namespaces to isolate access to mounted secrets. // This field should not be used to find auto-generated service account token secrets for use outside of pods. // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -5523,6 +5856,7 @@ type ServiceAccount struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ServiceAccountList is a list of ServiceAccount objects type ServiceAccountList struct { @@ -5539,6 +5873,7 @@ type ServiceAccountList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Endpoints is a collection of endpoints that implement the actual service. Example: // @@ -5660,6 +5995,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EndpointsList is a list of endpoints. type EndpointsList struct { @@ -5772,13 +6108,16 @@ type NodeDaemonEndpoints struct { KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` } -// NodeRuntimeHandlerFeatures is a set of runtime features. +// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. type NodeRuntimeHandlerFeatures struct { // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. // +featureGate=RecursiveReadOnlyMounts // +optional RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"` - // Reserved: UserNamespaces *bool (varint 2, for consistency with CRI API) + // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes. + // +featureGate=UserNamespacesSupport + // +optional + UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"` } // NodeRuntimeHandler is a set of runtime handler information. @@ -5792,6 +6131,15 @@ type NodeRuntimeHandler struct { Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"` } +// NodeFeatures describes the set of features implemented by the CRI implementation. +// The features contained in the NodeFeatures should depend only on the cri implementation +// independent of runtime handlers. +type NodeFeatures struct { + // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. + // +optional + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"` +} + // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. type NodeSystemInfo struct { // MachineID reported by the node. For unique machine identification @@ -5812,7 +6160,7 @@ type NodeSystemInfo struct { ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` // Kubelet Version reported by the node. KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. + // Deprecated: KubeProxy Version reported by the node. KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` // The Operating System reported by the node OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` @@ -5870,7 +6218,7 @@ type NodeConfigStatus struct { // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity // +optional Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. @@ -5883,7 +6231,7 @@ type NodeStatus struct { // +optional Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // More info: https://kubernetes.io/docs/reference/node/node-status/#condition // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -5892,7 +6240,7 @@ type NodeStatus struct { Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses // Note: This field is declared as mergeable, but the merge key is not sufficiently // unique, which can cause data corruption when it is merged. Callers should instead // use a full-replacement patch. See https://pr.k8s.io/79391 for an example. @@ -5910,7 +6258,7 @@ type NodeStatus struct { // +optional DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // More info: https://kubernetes.io/docs/reference/node/node-status/#info // +optional NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node @@ -5930,9 +6278,14 @@ type NodeStatus struct { Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` // The available runtime handlers. // +featureGate=RecursiveReadOnlyMounts + // +featureGate=UserNamespacesSupport // +optional // +listType=atomic RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"` + // Features describes the set of features implemented by the CRI implementation. + // +featureGate=SupplementalGroupsPolicy + // +optional + Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"` } type UniqueVolumeName string @@ -6128,6 +6481,7 @@ type ResourceList map[ResourceName]resource.Quantity // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). @@ -6152,6 +6506,7 @@ type Node struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NodeList is the whole list of all Nodes which have been registered with master. type NodeList struct { @@ -6238,10 +6593,13 @@ type NamespaceCondition struct { Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` // Status of the condition, one of True, False, Unknown. Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. // +optional LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. // +optional Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } @@ -6250,6 +6608,7 @@ type NamespaceCondition struct { // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Namespace provides a scope for Names. // Use of multiple namespaces is optional. @@ -6272,6 +6631,7 @@ type Namespace struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // NamespaceList is a list of Namespaces. type NamespaceList struct { @@ -6287,9 +6647,9 @@ type NamespaceList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -6309,8 +6669,18 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +const ( + // LogStreamStdout is the stream type for stdout. + LogStreamStdout = "Stdout" + // LogStreamStderr is the stream type for stderr. + LogStreamStderr = "Stderr" + // LogStreamAll represents the combined stdout and stderr. + LogStreamAll = "All" +) + // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodLogOptions is the query options for a Pod's logs REST call. type PodLogOptions struct { @@ -6342,7 +6712,8 @@ type PodLogOptions struct { // +optional Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime + // logs are shown from the creation of the container or sinceSeconds or sinceTime. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". // +optional TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` // If set, the number of bytes to read from the server before terminating the @@ -6359,10 +6730,19 @@ type PodLogOptions struct { // the actual log data coming from the real kubelet). // +optional InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` + + // Specify which container log stream to return to the client. + // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr + // are returned interleaved. + // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All". + // +featureGate=PodLogsQuerySplitStreams + // +optional + Stream *string `json:"stream,omitempty" protobuf:"varint,10,opt,name=stream"` } // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.1 // PodAttachOptions is the query options to a Pod's remote attach call. // --- @@ -6401,6 +6781,7 @@ type PodAttachOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodExecOptions is the query options to a Pod's remote exec call. // --- @@ -6439,6 +6820,7 @@ type PodExecOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // PodPortForwardOptions is the query options to a Pod's port forward call // when using WebSockets. @@ -6458,6 +6840,7 @@ type PodPortForwardOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // PodProxyOptions is the query options to a Pod's proxy call. type PodProxyOptions struct { @@ -6470,6 +6853,7 @@ type PodProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // NodeProxyOptions is the query options to a Node's proxy call. type NodeProxyOptions struct { @@ -6482,6 +6866,7 @@ type NodeProxyOptions struct { // +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ServiceProxyOptions is the query options to a Service's proxy call. type ServiceProxyOptions struct { @@ -6553,13 +6938,23 @@ type ObjectReference struct { // LocalObjectReference contains enough information to let you locate the // referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic type LocalObjectReference struct { // Name of the referent. // This field is effectively required, but due to backwards compatibility is // allowed to be empty. Instances of this type with an empty value here are // almost certainly wrong. - // TODO: Add other useful fields. apiVersion, kind, uid? // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +optional // +default="" @@ -6570,6 +6965,20 @@ type LocalObjectReference struct { // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +structType=atomic type TypedLocalObjectReference struct { // APIGroup is the group for the resource being referenced. @@ -6584,6 +6993,7 @@ type TypedLocalObjectReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SerializedReference is a reference to serialized object. type SerializedReference struct { @@ -6613,6 +7023,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Event is a report of an event somewhere in the cluster. Events // have a limited retention time and triggers and messages may evolve @@ -6697,6 +7108,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // EventList is a list of events. type EventList struct { @@ -6711,6 +7123,7 @@ type EventList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // List holds a list of objects, which may not be known by the server. type List metav1.List @@ -6758,6 +7171,7 @@ type LimitRangeSpec struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRange sets resource usage limits for each kind of resource in a Namespace. type LimitRange struct { @@ -6774,6 +7188,7 @@ type LimitRange struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // LimitRangeList is a list of LimitRange items. type LimitRangeList struct { @@ -6822,6 +7237,8 @@ const ( ResourceLimitsMemory ResourceName = "limits.memory" // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" + // resource.k8s.io devices requested with a certain DeviceClass, number + ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices" ) // The following identify resource prefix for Kubernetes object types @@ -6922,6 +7339,7 @@ type ResourceQuotaStatus struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuota sets aggregate quota restrictions enforced per namespace type ResourceQuota struct { @@ -6943,6 +7361,7 @@ type ResourceQuota struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ResourceQuotaList is a list of ResourceQuota items. type ResourceQuotaList struct { @@ -6959,6 +7378,7 @@ type ResourceQuotaList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Secret holds secret data of a certain type. The total bytes of the values in // the Data field must be less than MaxSecretSize bytes. @@ -7085,6 +7505,7 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // SecretList is a list of Secret. type SecretList struct { @@ -7101,6 +7522,7 @@ type SecretList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMap holds configuration data for pods to consume. type ConfigMap struct { @@ -7137,6 +7559,7 @@ type ConfigMap struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.2 // ConfigMapList is a resource containing a list of ConfigMap objects. type ConfigMapList struct { @@ -7179,6 +7602,7 @@ type ComponentCondition struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // ComponentStatus (and ComponentStatusList) holds the cluster validation info. // Deprecated: This API is deprecated in v1.19+ @@ -7199,6 +7623,7 @@ type ComponentStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // Status of all the conditions for the component as a list of ComponentStatus objects. // Deprecated: This API is deprecated in v1.19+ @@ -7332,7 +7757,7 @@ type SecurityContext struct { // +optional AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for + // The default value is Default which uses the container runtime defaults for // readonly paths and masked paths. // This requires the ProcMountType feature flag to be enabled. // Note that this field cannot be set when spec.os.name is windows. @@ -7410,6 +7835,7 @@ type WindowsSecurityContextOptions struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.0 // RangeAllocation is not a public type. type RangeAllocation struct { @@ -7486,7 +7912,6 @@ const ( ) // PortStatus represents the error condition of a service port - type PortStatus struct { // Port is the port number of the service port of which status is recorded here Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` @@ -7519,3 +7944,23 @@ const ( // the destination set to the node's IP and port or the pod's IP and port. LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" ) + +// ImageVolumeSource represents a image volume resource. +type ImageVolumeSource struct { + // Required: Image or artifact reference to be used. + // Behaves in the same way as pod.spec.containers[*].image. + // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` + + // Policy for pulling OCI objects. Possible values are: + // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. + // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. + // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // +optional + PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"` +} diff --git a/constraint/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index c54f2a2fe..89ce3d230 100644 --- a/constraint/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -117,7 +117,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { } var map_Binding = map[string]string{ - "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -127,7 +127,7 @@ func (Binding) SwaggerDoc() map[string]string { } var map_CSIPersistentVolumeSource = map[string]string{ - "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "": "Represents storage that is managed by an external CSI volume driver", "driver": "driver is the name of the driver to use for this volume. Required.", "volumeHandle": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", "readOnly": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", @@ -219,16 +219,6 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string { return map_CinderVolumeSource } -var map_ClaimSource = map[string]string{ - "": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", - "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", - "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", -} - -func (ClaimSource) SwaggerDoc() map[string]string { - return map_ClaimSource -} - var map_ClientIPConfig = map[string]string{ "": "ClientIPConfig represents the configurations of Client IP based session affinity.", "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", @@ -469,25 +459,36 @@ func (ContainerStateWaiting) SwaggerDoc() map[string]string { } var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", - "state": "State holds details about the container's current condition.", - "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", - "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", - "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", - "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", - "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", - "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", - "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", - "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", - "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", - "volumeMounts": "Status of volume mounts.", + "": "ContainerStatus contains details for the current status of this container.", + "name": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "state": "State holds details about the container's current condition.", + "lastState": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "ready": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "restartCount": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "image": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "imageID": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "containerID": "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "started": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "allocatedResources": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "resources": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "volumeMounts": "Status of volume mounts.", + "user": "User represents user identity information initially attached to the first process of the container", + "allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.", } func (ContainerStatus) SwaggerDoc() map[string]string { return map_ContainerStatus } +var map_ContainerUser = map[string]string{ + "": "ContainerUser represents user identity information", + "linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.", +} + +func (ContainerUser) SwaggerDoc() map[string]string { + return map_ContainerUser +} + var map_DaemonEndpoint = map[string]string{ "": "DaemonEndpoint contains information about a single Daemon endpoint.", "Port": "Port number of the given endpoint.", @@ -801,6 +802,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { } var map_GRPCAction = map[string]string{ + "": "GRPCAction specifies an action involving a GRPC service.", "port": "Port number of the gRPC service. Number must be in the range 1 to 65535.", "service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", } @@ -933,6 +935,16 @@ func (ISCSIVolumeSource) SwaggerDoc() map[string]string { return map_ISCSIVolumeSource } +var map_ImageVolumeSource = map[string]string{ + "": "ImageVolumeSource represents a image volume resource.", + "reference": "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", +} + +func (ImageVolumeSource) SwaggerDoc() map[string]string { + return map_ImageVolumeSource +} + var map_KeyToPath = map[string]string{ "": "Maps a string key to a path within a volume.", "key": "key is the key to project.", @@ -956,10 +968,10 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LifecycleHandler = map[string]string{ "": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", - "exec": "Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", - "sleep": "Sleep represents the duration that the container should sleep before being terminated.", + "exec": "Exec specifies a command to execute in the container.", + "httpGet": "HTTPGet specifies an HTTP GET request to perform.", + "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.", + "sleep": "Sleep represents a duration that the container should sleep.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -1009,6 +1021,17 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { return map_LimitRangeSpec } +var map_LinuxContainerUser = map[string]string{ + "": "LinuxContainerUser represents user identity information in Linux containers", + "uid": "UID is the primary uid initially attached to the first process in the container", + "gid": "GID is the primary gid initially attached to the first process in the container", + "supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container", +} + +func (LinuxContainerUser) SwaggerDoc() map[string]string { + return map_LinuxContainerUser +} + var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", @@ -1040,7 +1063,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { } var map_LocalVolumeSource = map[string]string{ - "": "Local represents directly-attached storage with node affinity (Beta feature)", + "": "Local represents directly-attached storage with node affinity", "path": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", "fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", } @@ -1082,9 +1105,12 @@ func (Namespace) SwaggerDoc() map[string]string { } var map_NamespaceCondition = map[string]string{ - "": "NamespaceCondition contains details about state of namespace.", - "type": "Type of namespace controller condition.", - "status": "Status of the condition, one of True, False, Unknown.", + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", } func (NamespaceCondition) SwaggerDoc() map[string]string { @@ -1195,6 +1221,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { return map_NodeDaemonEndpoints } +var map_NodeFeatures = map[string]string{ + "": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", + "supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", +} + +func (NodeFeatures) SwaggerDoc() map[string]string { + return map_NodeFeatures +} + var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", @@ -1225,8 +1260,9 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string { } var map_NodeRuntimeHandlerFeatures = map[string]string{ - "": "NodeRuntimeHandlerFeatures is a set of runtime features.", + "": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", "recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", + "userNamespaces": "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", } func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string { @@ -1280,18 +1316,19 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", - "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", + "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info", "images": "List of container images on this node", "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", "runtimeHandlers": "The available runtime handlers.", + "features": "Features describes the set of features implemented by the CRI implementation.", } func (NodeStatus) SwaggerDoc() map[string]string { @@ -1307,7 +1344,7 @@ var map_NodeSystemInfo = map[string]string{ "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", + "kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.", "operatingSystem": "The Operating System reported by the node", "architecture": "The Architecture reported by the node", } @@ -1365,6 +1402,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimCondition = map[string]string{ "": "PersistentVolumeClaimCondition contains details about state of pvc", + "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required", "lastProbeTime": "lastProbeTime is the time we probed the condition.", "lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.", "reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.", @@ -1395,7 +1434,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1410,8 +1449,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{ "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.", "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", + "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1450,28 +1489,28 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "nfs": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", "local": "local represents directly-attached storage with node affinity", - "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", - "csi": "csi represents storage that is handled by an external CSI driver (Beta feature).", + "storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md", + "csi": "csi represents storage that is handled by an external CSI driver.", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1488,7 +1527,7 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1500,7 +1539,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1544,8 +1583,8 @@ var map_PodAffinityTerm = map[string]string{ "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", + "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1601,8 +1640,9 @@ func (PodDNSConfig) SwaggerDoc() map[string]string { } var map_PodDNSConfigOption = map[string]string{ - "": "PodDNSConfigOption defines DNS resolver options of a pod.", - "name": "Required.", + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Name is this DNS resolver option's name. Required.", + "value": "Value is this DNS resolver option's value.", } func (PodDNSConfigOption) SwaggerDoc() map[string]string { @@ -1650,9 +1690,10 @@ var map_PodLogOptions = map[string]string{ "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", + "stream": "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".", } func (PodLogOptions) SwaggerDoc() map[string]string { @@ -1696,9 +1737,10 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { } var map_PodResourceClaim = map[string]string{ - "": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", - "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", - "source": "Source describes where to find the ResourceClaim.", + "": "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "name": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", + "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.", } func (PodResourceClaim) SwaggerDoc() map[string]string { @@ -1708,7 +1750,7 @@ func (PodResourceClaim) SwaggerDoc() map[string]string { var map_PodResourceClaimStatus = map[string]string{ "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", - "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", } func (PodResourceClaimStatus) SwaggerDoc() map[string]string { @@ -1725,18 +1767,20 @@ func (PodSchedulingGate) SwaggerDoc() map[string]string { } var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", - "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", - "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified). If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.", + "supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "seLinuxChangePolicy": "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1766,7 +1810,7 @@ var map_PodSpec = map[string]string{ "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "nodeName": "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", @@ -1789,10 +1833,11 @@ var map_PodSpec = map[string]string{ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", "setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", - "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", "hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", "schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.", "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", + "resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1811,10 +1856,10 @@ var map_PodStatus = map[string]string{ "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status", + "containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.", + "ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", "resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", "resourceClaimStatuses": "Status of resource claims.", } @@ -1864,6 +1909,7 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string { } var map_PortStatus = map[string]string{ + "": "PortStatus represents the error condition of a service port", "port": "Port is the port number of the service port of which status is recorded here", "protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", "error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", @@ -1931,10 +1977,10 @@ func (Probe) SwaggerDoc() map[string]string { var map_ProbeHandler = map[string]string{ "": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.", - "exec": "Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port.", - "grpc": "GRPC specifies an action involving a GRPC port.", + "exec": "Exec specifies a command to execute in the container.", + "httpGet": "HTTPGet specifies an HTTP GET request to perform.", + "tcpSocket": "TCPSocket specifies a connection to a TCP port.", + "grpc": "GRPC specifies a GRPC HealthCheckRequest.", } func (ProbeHandler) SwaggerDoc() map[string]string { @@ -1943,7 +1989,7 @@ func (ProbeHandler) SwaggerDoc() map[string]string { var map_ProjectedVolumeSource = map[string]string{ "": "Represents a projected volume source", - "sources": "sources is the list of volume projections", + "sources": "sources is the list of volume projections. Each entry in this list handles one source.", "defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -2069,8 +2115,9 @@ func (ReplicationControllerStatus) SwaggerDoc() map[string]string { } var map_ResourceClaim = map[string]string{ - "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", - "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "name": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.", } func (ResourceClaim) SwaggerDoc() map[string]string { @@ -2088,6 +2135,16 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { return map_ResourceFieldSelector } +var map_ResourceHealth = map[string]string{ + "": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.", + "resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.", + "health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.", +} + +func (ResourceHealth) SwaggerDoc() map[string]string { + return map_ResourceHealth +} + var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -2141,6 +2198,16 @@ func (ResourceRequirements) SwaggerDoc() map[string]string { return map_ResourceRequirements } +var map_ResourceStatus = map[string]string{ + "": "ResourceStatus represents the status of a single resource allocated to a Pod.", + "name": "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:/\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.", + "resources": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.", +} + +func (ResourceStatus) SwaggerDoc() map[string]string { + return map_ResourceStatus +} + var map_SELinuxOptions = map[string]string{ "": "SELinuxOptions are the labels to be applied to the container", "user": "User is a SELinux user label that applies to the container.", @@ -2304,7 +2371,7 @@ var map_SecurityContext = map[string]string{ "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", "seccompProfile": "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", "appArmorProfile": "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.", } @@ -2336,7 +2403,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", } @@ -2420,7 +2487,7 @@ var map_ServiceSpec = map[string]string{ "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", "loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", "internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", - "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.", + "trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2573,6 +2640,7 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string { } var map_TypedObjectReference = map[string]string{ + "": "TypedObjectReference contains enough information to let you locate the typed referenced object", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", "kind": "Kind is the type of resource being referenced", "name": "Name is the name of resource being referenced", @@ -2639,7 +2707,7 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string { } var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", + "": "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.", "secret": "secret information about the secret data to project", "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", @@ -2665,33 +2733,34 @@ var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", "emptyDir": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "gitRepo": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md", "persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", - "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md", + "flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.", + "cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.", + "flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.", "downwardAPI": "downwardAPI represents downward API about the pod that should populate this volume", "fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.", "configMap": "configMap represents a configMap that should populate this volume", - "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.", + "quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.", + "azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.", + "photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.", "projected": "projected items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.", + "scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.", + "storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.", + "csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", "ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 08e927848..3f669092e 100644 --- a/constraint/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimSource) DeepCopyInto(out *ClaimSource) { - *out = *in - if in.ResourceClaimName != nil { - in, out := &in.ResourceClaimName, &out.ResourceClaimName - *out = new(string) - **out = **in - } - if in.ResourceClaimTemplateName != nil { - in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource. -func (in *ClaimSource) DeepCopy() *ClaimSource { - if in == nil { - return nil - } - out := new(ClaimSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { *out = *in @@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.User != nil { + in, out := &in.User, &out.User + *out = new(ContainerUser) + (*in).DeepCopyInto(*out) + } + if in.AllocatedResourcesStatus != nil { + in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus + *out = make([]ResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerUser) DeepCopyInto(out *ContainerUser) { + *out = *in + if in.Linux != nil { + in, out := &in.Linux, &out.Linux + *out = new(LinuxContainerUser) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser. +func (in *ContainerUser) DeepCopy() *ContainerUser { + if in == nil { + return nil + } + out := new(ContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { *out = *in @@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource. +func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource { + if in == nil { + return nil + } + out := new(ImageVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { *out = *in @@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) { + *out = *in + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser. +func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser { + if in == nil { + return nil + } + out := new(LinuxContainerUser) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in @@ -2695,6 +2739,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { + *out = *in + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. +func (in *NodeFeatures) DeepCopy() *NodeFeatures { + if in == nil { + return nil + } + out := new(NodeFeatures) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in @@ -2782,6 +2847,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur *out = new(bool) **out = **in } + if in.UserNamespaces != nil { + in, out := &in.UserNamespaces, &out.UserNamespaces + *out = new(bool) + **out = **in + } return } @@ -2962,6 +3032,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(NodeFeatures) + (*in).DeepCopyInto(*out) + } return } @@ -3860,6 +3935,11 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { *out = new(int64) **out = **in } + if in.Stream != nil { + in, out := &in.Stream, &out.Stream + *out = new(string) + **out = **in + } return } @@ -3971,7 +4051,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { *out = *in - in.Source.DeepCopyInto(&out.Source) + if in.ResourceClaimName != nil { + in, out := &in.ResourceClaimName, &out.ResourceClaimName + *out = new(string) + **out = **in + } + if in.ResourceClaimTemplateName != nil { + in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName + *out = new(string) + **out = **in + } return } @@ -4055,6 +4144,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]int64, len(*in)) copy(*out, *in) } + if in.SupplementalGroupsPolicy != nil { + in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy + *out = new(SupplementalGroupsPolicy) + **out = **in + } if in.FSGroup != nil { in, out := &in.FSGroup, &out.FSGroup *out = new(int64) @@ -4080,6 +4174,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(AppArmorProfile) (*in).DeepCopyInto(*out) } + if in.SELinuxChangePolicy != nil { + in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy + *out = new(PodSELinuxChangePolicy) + **out = **in + } return } @@ -4272,6 +4371,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -4900,6 +5004,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth. +func (in *ResourceHealth) DeepCopy() *ResourceHealth { + if in == nil { + return nil + } + out := new(ResourceHealth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ResourceList) DeepCopyInto(out *ResourceList) { { @@ -5081,6 +5201,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceHealth, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { *out = *in @@ -6426,6 +6567,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(EphemeralVolumeSource) (*in).DeepCopyInto(*out) } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(ImageVolumeSource) + **out = **in + } return } diff --git a/constraint/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..6710a96d1 --- /dev/null +++ b/constraint/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,274 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Binding) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Endpoints) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRange) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *List) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Namespace) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Node) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Pod) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 1 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Secret) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SecretList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Service) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 0 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) { + return 1, 2 +} diff --git a/constraint/vendor/k8s.io/api/discovery/v1/doc.go b/constraint/vendor/k8s.io/api/discovery/v1/doc.go index 96ae531ce..01913669f 100644 --- a/constraint/vendor/k8s.io/api/discovery/v1/doc.go +++ b/constraint/vendor/k8s.io/api/discovery/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=discovery.k8s.io package v1 // import "k8s.io/api/discovery/v1" diff --git a/constraint/vendor/k8s.io/api/discovery/v1/generated.proto b/constraint/vendor/k8s.io/api/discovery/v1/generated.proto index 6d234017b..8ddf0dc5d 100644 --- a/constraint/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/discovery/v1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // deprecatedTopology contains topology information part of the v1beta1 // API. This field is deprecated, and will be removed when the v1beta1 @@ -161,7 +161,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -191,7 +191,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/constraint/vendor/k8s.io/api/discovery/v1/types.go b/constraint/vendor/k8s.io/api/discovery/v1/types.go index 7ebb07ca3..d6a9d0fce 100644 --- a/constraint/vendor/k8s.io/api/discovery/v1/types.go +++ b/constraint/vendor/k8s.io/api/discovery/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSlice represents a subset of the endpoints that implement a service. // For a given service there may be multiple EndpointSlice objects, selected by @@ -206,6 +207,7 @@ type EndpointPort struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // EndpointSliceList represents a list of endpoint slices type EndpointSliceList struct { diff --git a/constraint/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..362867c5b --- /dev/null +++ b/constraint/vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EndpointSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/constraint/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/discovery/v1beta1/generated.proto index ec555a40b..55828dd97 100644 --- a/constraint/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -54,7 +54,7 @@ message Endpoint { // targetRef is a reference to a Kubernetes object that represents this // endpoint. // +optional - optional k8s.io.api.core.v1.ObjectReference targetRef = 4; + optional .k8s.io.api.core.v1.ObjectReference targetRef = 4; // topology contains arbitrary topology information associated with the // endpoint. These key/value pairs must conform with the label format. @@ -153,7 +153,7 @@ message EndpointPort { message EndpointSlice { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // addressType specifies the type of address carried by this EndpointSlice. // All addresses in this slice must be the same type. This field is @@ -183,7 +183,7 @@ message EndpointSlice { message EndpointSliceList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of endpoint slices repeated EndpointSlice items = 2; diff --git a/constraint/vendor/k8s.io/api/events/v1/doc.go b/constraint/vendor/k8s.io/api/events/v1/doc.go index 6e320e063..5fe700ffc 100644 --- a/constraint/vendor/k8s.io/api/events/v1/doc.go +++ b/constraint/vendor/k8s.io/api/events/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=events.k8s.io package v1 // import "k8s.io/api/events/v1" diff --git a/constraint/vendor/k8s.io/api/events/v1/generated.proto b/constraint/vendor/k8s.io/api/events/v1/generated.proto index cfa16b021..6c7e4cca1 100644 --- a/constraint/vendor/k8s.io/api/events/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/events/v1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -68,12 +68,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -88,15 +88,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -108,7 +108,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -123,6 +123,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/constraint/vendor/k8s.io/api/events/v1/types.go b/constraint/vendor/k8s.io/api/events/v1/types.go index e01a2b21e..86b12eee1 100644 --- a/constraint/vendor/k8s.io/api/events/v1/types.go +++ b/constraint/vendor/k8s.io/api/events/v1/types.go @@ -23,6 +23,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. // Events have a limited retention time and triggers and messages may evolve @@ -109,6 +110,7 @@ type EventSeries struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // EventList is a list of Event objects. type EventList struct { diff --git a/constraint/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..5217d1ac6 --- /dev/null +++ b/constraint/vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Event) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *EventList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/constraint/vendor/k8s.io/api/events/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/events/v1beta1/generated.proto index de60bdc3e..fbdb30970 100644 --- a/constraint/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -39,10 +39,10 @@ message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // eventTime is the time when this Event was first observed. It is required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; // series is data about the Event series this event represents or nil if it's a singleton Event. // +optional @@ -72,12 +72,12 @@ message Event { // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because // it acts on some changes in a ReplicaSet object. // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; + optional .k8s.io.api.core.v1.ObjectReference regarding = 8; // related is the optional secondary object for more complex actions. E.g. when regarding object triggers // a creation or deletion of related object. // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; + optional .k8s.io.api.core.v1.ObjectReference related = 9; // note is a human-readable description of the status of this operation. // Maximal length of the note is 1kB, but libraries should be prepared to @@ -92,15 +92,15 @@ message Event { // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + optional .k8s.io.api.core.v1.EventSource deprecatedSource = 12; // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type. // +optional @@ -112,7 +112,7 @@ message EventList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated Event items = 2; @@ -125,6 +125,6 @@ message EventSeries { optional int32 count = 1; // lastObservedTime is the time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; } diff --git a/constraint/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/extensions/v1beta1/generated.proto index 60effc8f7..9bbcaa0e2 100644 --- a/constraint/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -37,7 +37,7 @@ message DaemonSet { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -63,7 +63,7 @@ message DaemonSetCondition { // Last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -79,7 +79,7 @@ message DaemonSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // A list of daemon sets. repeated DaemonSet items = 2; @@ -92,14 +92,14 @@ message DaemonSetSpec { // If empty, defaulted to labels on Pod template. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 2; // An update strategy to replace existing DaemonSet pods with new pods. // +optional @@ -205,7 +205,7 @@ message DaemonSetUpdateStrategy { message Deployment { // Standard object metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the Deployment. // +optional @@ -225,10 +225,10 @@ message DeploymentCondition { optional string status = 2; // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; // The reason for the condition's last transition. optional string reason = 4; @@ -241,7 +241,7 @@ message DeploymentCondition { message DeploymentList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Deployments. repeated Deployment items = 2; @@ -271,10 +271,10 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; // The deployment strategy to use to replace existing pods with new ones. // +optional @@ -440,7 +440,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -461,13 +461,13 @@ message IngressBackend { // Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // Resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. @@ -475,7 +475,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Ingress. repeated Ingress items = 2; @@ -651,7 +651,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. // +optional @@ -710,7 +710,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -725,7 +725,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. @@ -734,7 +734,7 @@ message NetworkPolicyPeer { // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // IPBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -754,7 +754,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // If set, indicates that the range of ports from port to endPort, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -771,7 +771,7 @@ message NetworkPolicySpec { // same set of pods. In this case, the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // List of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -818,7 +818,7 @@ message ReplicaSet { // be the same as the Pod(s) that the ReplicaSet manages. // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -844,7 +844,7 @@ message ReplicaSetCondition { // The last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // The reason for the condition's last transition. // +optional @@ -860,7 +860,7 @@ message ReplicaSetList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of ReplicaSets. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller @@ -887,13 +887,13 @@ message ReplicaSetSpec { // Label keys and values that must match in order to be controlled by this replica set. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. @@ -951,7 +951,7 @@ message RollingUpdateDaemonSet { // 70% of original number of DaemonSet pods are available at all times during // the update. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of nodes with an existing available DaemonSet pod that // can have an updated DaemonSet pod during during an update. @@ -973,7 +973,7 @@ message RollingUpdateDaemonSet { // cause evictions during disruption. // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // Spec to control the desired behavior of rolling update. @@ -989,7 +989,7 @@ message RollingUpdateDeployment { // that the total number of pods available at all times during the update is at // least 70% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; // The maximum number of pods that can be scheduled above the desired number of // pods. @@ -1003,14 +1003,14 @@ message RollingUpdateDeployment { // new RC can be scaled up further, ensuring that total number of pods running // at any time during the update is at most 130% of desired pods. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } // represents a scaling request for a resource. message Scale { // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional diff --git a/constraint/vendor/k8s.io/api/extensions/v1beta1/types.go b/constraint/vendor/k8s.io/api/extensions/v1beta1/types.go index cc2deadac..09f58692f 100644 --- a/constraint/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -775,7 +775,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1/doc.go b/constraint/vendor/k8s.io/api/flowcontrol/v1/doc.go index 1bc51d406..c9e7db158 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1/doc.go +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/constraint/vendor/k8s.io/api/flowcontrol/v1/generated.proto index a5c6f4fc4..33a135889 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -302,7 +302,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -327,7 +327,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -341,7 +341,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1/types.go b/constraint/vendor/k8s.io/api/flowcontrol/v1/types.go index e62d23280..ad72bcee2 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1/types.go +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1/types.go @@ -106,6 +106,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +127,7 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -381,6 +383,7 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +403,7 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.29 // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..fbab9868c --- /dev/null +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 29 +} diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 04b54820c..61ed3833a 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/constraint/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a832114af..d6073fc92 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -295,7 +295,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -320,7 +320,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -334,7 +334,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/constraint/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto index eda0f7829..c6504d435 100644 --- a/constraint/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto +++ b/constraint/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto @@ -76,7 +76,7 @@ message FlowSchema { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a FlowSchema. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -101,7 +101,7 @@ message FlowSchemaCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -115,7 +115,7 @@ message FlowSchemaList { // `metadata` is the standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of FlowSchemas. repeated FlowSchema items = 2; @@ -297,7 +297,7 @@ message PriorityLevelConfiguration { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // `spec` is the specification of the desired behavior of a "request-priority". // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -322,7 +322,7 @@ message PriorityLevelConfigurationCondition { optional string status = 2; // `lastTransitionTime` is the last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // `reason` is a unique, one-word, CamelCase reason for the condition's last transition. optional string reason = 4; @@ -336,7 +336,7 @@ message PriorityLevelConfigurationList { // `metadata` is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // `items` is a list of request-priorities. repeated PriorityLevelConfiguration items = 2; diff --git a/constraint/vendor/k8s.io/api/networking/v1/doc.go b/constraint/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5ed1..1d13e7bab 100644 --- a/constraint/vendor/k8s.io/api/networking/v1/doc.go +++ b/constraint/vendor/k8s.io/api/networking/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/constraint/vendor/k8s.io/api/networking/v1/generated.proto b/constraint/vendor/k8s.io/api/networking/v1/generated.proto index 22a9085a5..c72fdc8f3 100644 --- a/constraint/vendor/k8s.io/api/networking/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/networking/v1/generated.proto @@ -96,7 +96,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -121,7 +121,7 @@ message IngressBackend { // service.Port must not be specified. // This is a mutually exclusive setting with "Service". // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -133,7 +133,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -145,7 +145,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -200,7 +200,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -381,7 +381,7 @@ message NetworkPolicy { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the desired behavior for this NetworkPolicy. // +optional @@ -438,7 +438,7 @@ message NetworkPolicyList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated NetworkPolicy items = 2; @@ -454,7 +454,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the pods matching podSelector in the policy's own namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // namespaceSelector selects namespaces using cluster-scoped labels. This field follows // standard label selector semantics; if present but empty, it selects all namespaces. @@ -463,7 +463,7 @@ message NetworkPolicyPeer { // the pods matching podSelector in the namespaces selected by namespaceSelector. // Otherwise it selects all pods in the namespaces selected by namespaceSelector. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; // ipBlock defines policy on a particular IPBlock. If this field is set then // neither of the other fields can be. @@ -483,7 +483,7 @@ message NetworkPolicyPort { // numbers. // If present, only traffic on the specified protocol AND port will be matched. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; // endPort indicates that the range of ports from port to endPort if set, inclusive, // should be allowed by the policy. This field cannot be defined if the port field @@ -501,7 +501,7 @@ message NetworkPolicySpec { // the ingress rules for each are combined additively. // This field is NOT optional and follows standard label selector semantics. // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; // ingress is a list of ingress rules to be applied to the selected pods. // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod @@ -541,6 +541,7 @@ message NetworkPolicySpec { } // ServiceBackendPort is the service port being referenced. +// +structType=atomic message ServiceBackendPort { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". diff --git a/constraint/vendor/k8s.io/api/networking/v1/types.go b/constraint/vendor/k8s.io/api/networking/v1/types.go index 8ee62918b..d75e27558 100644 --- a/constraint/vendor/k8s.io/api/networking/v1/types.go +++ b/constraint/vendor/k8s.io/api/networking/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { @@ -214,6 +215,7 @@ type NetworkPolicyPeer struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // NetworkPolicyList is a list of NetworkPolicy objects. type NetworkPolicyList struct { @@ -230,6 +232,7 @@ type NetworkPolicyList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services @@ -255,6 +258,7 @@ type Ingress struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressList is a collection of Ingress. type IngressList struct { @@ -415,7 +419,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -527,6 +531,7 @@ type IngressServiceBackend struct { } // ServiceBackendPort is the service port being referenced. +// +structType=atomic type ServiceBackendPort struct { // name is the name of the port on the Service. // This is a mutually exclusive setting with "Number". @@ -542,6 +547,7 @@ type ServiceBackendPort struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClass represents the class of the Ingress, referenced by the Ingress // Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be @@ -616,6 +622,7 @@ type IngressClassParametersReference struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.19 // IngressClassList is a collection of IngressClasses. type IngressClassList struct { diff --git a/constraint/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..21e8c671a --- /dev/null +++ b/constraint/vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Ingress) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClass) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IngressList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *NetworkPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 19 +} diff --git a/constraint/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/networking/v1alpha1/generated.proto index 8914fffcf..80ec6af73 100644 --- a/constraint/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -39,7 +39,7 @@ message IPAddress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IPAddress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -52,7 +52,7 @@ message IPAddressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IPAddresses. repeated IPAddress items = 2; @@ -91,7 +91,7 @@ message ServiceCIDR { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the ServiceCIDR. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -109,7 +109,7 @@ message ServiceCIDRList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of ServiceCIDRs. repeated ServiceCIDR items = 2; @@ -119,6 +119,9 @@ message ServiceCIDRList { message ServiceCIDRSpec { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional // +listType=atomic @@ -134,6 +137,6 @@ message ServiceCIDRStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; } diff --git a/constraint/vendor/k8s.io/api/networking/v1alpha1/types.go b/constraint/vendor/k8s.io/api/networking/v1alpha1/types.go index bcdc33b45..0e454f026 100644 --- a/constraint/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -109,6 +109,9 @@ type ServiceCIDR struct { type ServiceCIDRSpec struct { // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // The network address of each CIDR, the address that identifies the subnet of a host, is reserved + // and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be + // allocated. // This field is immutable. // +optional // +listType=atomic diff --git a/constraint/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 481ec0603..4c8eb57a7 100644 --- a/constraint/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -91,7 +91,7 @@ func (ServiceCIDRList) SwaggerDoc() map[string]string { var map_ServiceCIDRSpec = map[string]string{ "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. The network address of each CIDR, the address that identifies the subnet of a host, is reserved and will not be allocated. The broadcast address for IPv4 CIDRs is also reserved and will not be allocated. This field is immutable.", } func (ServiceCIDRSpec) SwaggerDoc() map[string]string { diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/networking/v1beta1/generated.pb.go index 13d4f5385..a924725f2 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -27,6 +27,7 @@ import ( proto "github.com/gogo/protobuf/proto" k8s_io_api_core_v1 "k8s.io/api/core/v1" v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -101,10 +102,94 @@ func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{2} +} +func (m *IPAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) +} +func (m *IPAddress) XXX_Size() int { + return m.Size() +} +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddress proto.InternalMessageInfo + +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{3} +} +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) +} +func (m *IPAddressList) XXX_Size() int { + return m.Size() +} +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo + +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{4} +} +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) +} +func (m *IPAddressSpec) XXX_Size() int { + return m.Size() +} +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo + func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{2} + return fileDescriptor_9497719c79c89d2d, []int{5} } func (m *Ingress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -132,7 +217,7 @@ var xxx_messageInfo_Ingress proto.InternalMessageInfo func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{3} + return fileDescriptor_9497719c79c89d2d, []int{6} } func (m *IngressBackend) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -160,7 +245,7 @@ var xxx_messageInfo_IngressBackend proto.InternalMessageInfo func (m *IngressClass) Reset() { *m = IngressClass{} } func (*IngressClass) ProtoMessage() {} func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{4} + return fileDescriptor_9497719c79c89d2d, []int{7} } func (m *IngressClass) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -188,7 +273,7 @@ var xxx_messageInfo_IngressClass proto.InternalMessageInfo func (m *IngressClassList) Reset() { *m = IngressClassList{} } func (*IngressClassList) ProtoMessage() {} func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{5} + return fileDescriptor_9497719c79c89d2d, []int{8} } func (m *IngressClassList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,7 +301,7 @@ var xxx_messageInfo_IngressClassList proto.InternalMessageInfo func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} } func (*IngressClassParametersReference) ProtoMessage() {} func (*IngressClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{6} + return fileDescriptor_9497719c79c89d2d, []int{9} } func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -244,7 +329,7 @@ var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } func (*IngressClassSpec) ProtoMessage() {} func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{7} + return fileDescriptor_9497719c79c89d2d, []int{10} } func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -272,7 +357,7 @@ var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{8} + return fileDescriptor_9497719c79c89d2d, []int{11} } func (m *IngressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -300,7 +385,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} } func (*IngressLoadBalancerIngress) ProtoMessage() {} func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{9} + return fileDescriptor_9497719c79c89d2d, []int{12} } func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -328,7 +413,7 @@ var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} } func (*IngressLoadBalancerStatus) ProtoMessage() {} func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{10} + return fileDescriptor_9497719c79c89d2d, []int{13} } func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,7 +441,7 @@ var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} } func (*IngressPortStatus) ProtoMessage() {} func (*IngressPortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{11} + return fileDescriptor_9497719c79c89d2d, []int{14} } func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -384,7 +469,7 @@ var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{12} + return fileDescriptor_9497719c79c89d2d, []int{15} } func (m *IngressRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +497,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{13} + return fileDescriptor_9497719c79c89d2d, []int{16} } func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +525,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{14} + return fileDescriptor_9497719c79c89d2d, []int{17} } func (m *IngressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +553,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{15} + return fileDescriptor_9497719c79c89d2d, []int{18} } func (m *IngressStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -496,7 +581,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_9497719c79c89d2d, []int{16} + return fileDescriptor_9497719c79c89d2d, []int{19} } func (m *IngressTLS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -521,9 +606,152 @@ func (m *IngressTLS) XXX_DiscardUnknown() { var xxx_messageInfo_IngressTLS proto.InternalMessageInfo +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{20} +} +func (m *ParentReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) +} +func (m *ParentReference) XXX_Size() int { + return m.Size() +} +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ParentReference proto.InternalMessageInfo + +func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } +func (*ServiceCIDR) ProtoMessage() {} +func (*ServiceCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{21} +} +func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDR.Merge(m, src) +} +func (m *ServiceCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo + +func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } +func (*ServiceCIDRList) ProtoMessage() {} +func (*ServiceCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{22} +} +func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRList.Merge(m, src) +} +func (m *ServiceCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo + +func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } +func (*ServiceCIDRSpec) ProtoMessage() {} +func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{23} +} +func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) +} +func (m *ServiceCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo + +func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } +func (*ServiceCIDRStatus) ProtoMessage() {} +func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9497719c79c89d2d, []int{24} +} +func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +} +func (m *ServiceCIDRStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo + func init() { proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress") + proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList") + proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec") proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") @@ -539,6 +767,11 @@ func init() { proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") + proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference") + proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR") + proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList") + proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec") + proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus") } func init() { @@ -546,85 +779,99 @@ func init() { } var fileDescriptor_9497719c79c89d2d = []byte{ - // 1234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xda, 0x71, 0xe3, 0x8c, 0xd3, 0x34, 0xff, 0xf9, 0xe7, 0x60, 0x82, 0x6a, 0x47, 0x7b, - 0x40, 0x81, 0x36, 0xbb, 0x4d, 0x5a, 0x50, 0xb9, 0x20, 0xd8, 0x08, 0x91, 0x28, 0x21, 0x31, 0x63, - 0xf3, 0x22, 0xc4, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0x66, 0x83, 0x7a, 0x03, - 0x71, 0xe2, 0x04, 0xdf, 0x01, 0x89, 0x8f, 0x80, 0xb8, 0x20, 0x21, 0xb8, 0xe4, 0xd8, 0x63, 0x2f, - 0x44, 0xc4, 0x7c, 0x8b, 0x9e, 0xd0, 0x33, 0x3b, 0xfb, 0xe2, 0x97, 0xb4, 0x0e, 0x87, 0x9e, 0xe2, - 0x7d, 0x5e, 0x7e, 0xcf, 0xfb, 0x33, 0x4f, 0xd0, 0xf6, 0xe0, 0xa1, 0xb0, 0xbc, 0xd0, 0xa6, 0x91, - 0x67, 0x07, 0x4c, 0x7e, 0x1d, 0xf2, 0x81, 0x17, 0xf4, 0xec, 0xb3, 0x9d, 0x53, 0x26, 0xe9, 0x8e, - 0xdd, 0x63, 0x01, 0xe3, 0x54, 0xb2, 0xae, 0x15, 0xf1, 0x50, 0x86, 0xf8, 0x76, 0x22, 0x6e, 0xd1, - 0xc8, 0xb3, 0x72, 0x71, 0x4b, 0x8b, 0x6f, 0x6c, 0xf7, 0x3c, 0xd9, 0x8f, 0x4f, 0x2d, 0x37, 0x1c, - 0xda, 0xbd, 0xb0, 0x17, 0xda, 0x4a, 0xeb, 0x34, 0x7e, 0xa4, 0xbe, 0xd4, 0x87, 0xfa, 0x95, 0xa0, - 0x6d, 0x98, 0x05, 0xe3, 0x6e, 0xc8, 0x99, 0x7d, 0x36, 0x65, 0x71, 0xe3, 0x41, 0x2e, 0x33, 0xa4, - 0x6e, 0xdf, 0x0b, 0x18, 0x7f, 0x6c, 0x47, 0x83, 0x1e, 0x10, 0x84, 0x3d, 0x64, 0x92, 0xce, 0xd2, - 0xb2, 0xaf, 0xd2, 0xe2, 0x71, 0x20, 0xbd, 0x21, 0x9b, 0x52, 0x78, 0xeb, 0x45, 0x0a, 0xc2, 0xed, - 0xb3, 0x21, 0x9d, 0xd2, 0xbb, 0x7f, 0x95, 0x5e, 0x2c, 0x3d, 0xdf, 0xf6, 0x02, 0x29, 0x24, 0x9f, - 0x54, 0x32, 0xff, 0x34, 0xd0, 0xad, 0xfd, 0x4e, 0xa7, 0x75, 0x10, 0xf4, 0x38, 0x13, 0xa2, 0x45, - 0x65, 0x1f, 0x6f, 0xa2, 0xc5, 0x88, 0xca, 0x7e, 0xdd, 0xd8, 0x34, 0xb6, 0x96, 0x9d, 0x95, 0xf3, - 0x8b, 0xe6, 0xc2, 0xe8, 0xa2, 0xb9, 0x08, 0x3c, 0xa2, 0x38, 0xf8, 0x01, 0xaa, 0xc2, 0xdf, 0xce, - 0xe3, 0x88, 0xd5, 0xcb, 0x4a, 0xaa, 0x3e, 0xba, 0x68, 0x56, 0x5b, 0x9a, 0xf6, 0xac, 0xf0, 0x9b, - 0x64, 0x92, 0xf8, 0x33, 0xb4, 0x74, 0x4a, 0xdd, 0x01, 0x0b, 0xba, 0xf5, 0xd2, 0xa6, 0xb1, 0x55, - 0xdb, 0xdd, 0xb6, 0x9e, 0x5b, 0x43, 0x4b, 0x3b, 0xe5, 0x24, 0x4a, 0xce, 0x2d, 0xed, 0xc9, 0x92, - 0x26, 0x90, 0x14, 0xce, 0x1c, 0xa0, 0xf5, 0x42, 0x10, 0x24, 0xf6, 0xd9, 0x27, 0xd4, 0x8f, 0x19, - 0x6e, 0xa3, 0x0a, 0x58, 0x17, 0x75, 0x63, 0xb3, 0xbc, 0x55, 0xdb, 0xb5, 0x5e, 0x60, 0x6f, 0x22, - 0x11, 0xce, 0x4d, 0x6d, 0xb0, 0x02, 0x5f, 0x82, 0x24, 0x58, 0xe6, 0x0f, 0x25, 0xb4, 0xa4, 0xa5, - 0xf0, 0x97, 0xa8, 0x0a, 0x75, 0xef, 0x52, 0x49, 0x55, 0xba, 0x6a, 0xbb, 0xf7, 0x0a, 0x36, 0xb2, - 0x32, 0x58, 0xd1, 0xa0, 0x07, 0x04, 0x61, 0x81, 0xb4, 0x75, 0xb6, 0x63, 0x9d, 0x9c, 0x7e, 0xc5, - 0x5c, 0xf9, 0x21, 0x93, 0xd4, 0xc1, 0xda, 0x0a, 0xca, 0x69, 0x24, 0x43, 0xc5, 0x47, 0x68, 0x51, - 0x44, 0xcc, 0xd5, 0x19, 0x7b, 0x63, 0xbe, 0x8c, 0xb5, 0x23, 0xe6, 0xe6, 0x85, 0x83, 0x2f, 0xa2, - 0x50, 0x70, 0x07, 0xdd, 0x10, 0x92, 0xca, 0x58, 0xa8, 0xb2, 0xd5, 0x76, 0xef, 0xce, 0x89, 0xa7, - 0x74, 0x9c, 0x55, 0x8d, 0x78, 0x23, 0xf9, 0x26, 0x1a, 0xcb, 0xfc, 0xbe, 0x84, 0x56, 0xc7, 0x6b, - 0x85, 0xdf, 0x44, 0x35, 0xc1, 0xf8, 0x99, 0xe7, 0xb2, 0x63, 0x3a, 0x64, 0xba, 0x95, 0xfe, 0xaf, - 0xf5, 0x6b, 0xed, 0x9c, 0x45, 0x8a, 0x72, 0xb8, 0x97, 0xa9, 0xb5, 0x42, 0x2e, 0x75, 0xd0, 0x57, - 0xa7, 0x14, 0x3a, 0xdb, 0x4a, 0x3a, 0xdb, 0x3a, 0x08, 0xe4, 0x09, 0x6f, 0x4b, 0xee, 0x05, 0xbd, - 0x29, 0x43, 0x00, 0x46, 0x8a, 0xc8, 0xf8, 0x53, 0x54, 0xe5, 0x4c, 0x84, 0x31, 0x77, 0x99, 0x4e, - 0xc5, 0x58, 0x33, 0xc2, 0x0a, 0x80, 0x32, 0x41, 0xdf, 0x76, 0x8f, 0x42, 0x97, 0xfa, 0x49, 0x71, - 0x08, 0x7b, 0xc4, 0x38, 0x0b, 0x5c, 0xe6, 0xac, 0x40, 0xc3, 0x13, 0x0d, 0x41, 0x32, 0x30, 0x18, - 0xa8, 0x15, 0x9d, 0x8b, 0x3d, 0x9f, 0xbe, 0x94, 0x16, 0xf9, 0x68, 0xac, 0x45, 0xec, 0xf9, 0x4a, - 0xaa, 0x9c, 0xbb, 0xaa, 0x4f, 0xcc, 0x3f, 0x0c, 0xb4, 0x56, 0x14, 0x3c, 0xf2, 0x84, 0xc4, 0x5f, - 0x4c, 0x45, 0x62, 0xcd, 0x17, 0x09, 0x68, 0xab, 0x38, 0xd6, 0xb4, 0xa9, 0x6a, 0x4a, 0x29, 0x44, - 0xd1, 0x42, 0x15, 0x4f, 0xb2, 0xa1, 0xa8, 0x97, 0xd4, 0xac, 0xde, 0xb9, 0x46, 0x18, 0xf9, 0xa0, - 0x1e, 0x00, 0x02, 0x49, 0x80, 0xcc, 0xbf, 0x0c, 0xd4, 0x2c, 0x8a, 0xb5, 0x28, 0xa7, 0x43, 0x26, - 0x19, 0x17, 0x59, 0x19, 0xf1, 0x16, 0xaa, 0xd2, 0xd6, 0xc1, 0x07, 0x3c, 0x8c, 0xa3, 0x74, 0xdf, - 0x81, 0x7f, 0xef, 0x69, 0x1a, 0xc9, 0xb8, 0xb0, 0x15, 0x07, 0x9e, 0x5e, 0x5d, 0x85, 0xad, 0x78, - 0xe8, 0x05, 0x5d, 0xa2, 0x38, 0x20, 0x11, 0x40, 0xb3, 0x97, 0xc7, 0x25, 0x54, 0x97, 0x2b, 0x0e, - 0x6e, 0xa2, 0x8a, 0x70, 0xc3, 0x88, 0xd5, 0x17, 0x95, 0xc8, 0x32, 0xb8, 0xdc, 0x06, 0x02, 0x49, - 0xe8, 0xf8, 0x0e, 0x5a, 0x06, 0x41, 0x11, 0x51, 0x97, 0xd5, 0x2b, 0x4a, 0xe8, 0xe6, 0xe8, 0xa2, - 0xb9, 0x7c, 0x9c, 0x12, 0x49, 0xce, 0x37, 0x7f, 0x99, 0x28, 0x12, 0xd4, 0x0f, 0xef, 0x22, 0xe4, - 0x86, 0x81, 0xe4, 0xa1, 0xef, 0x33, 0xae, 0x43, 0xca, 0xda, 0x67, 0x2f, 0xe3, 0x90, 0x82, 0x14, - 0x0e, 0x10, 0x8a, 0xb2, 0xdc, 0xe8, 0x36, 0x7a, 0xe7, 0x1a, 0xf9, 0x9f, 0x91, 0x58, 0x67, 0x15, - 0xec, 0x15, 0x18, 0x05, 0x0b, 0xe6, 0xaf, 0x06, 0xaa, 0x69, 0xfd, 0x97, 0xd0, 0x58, 0x87, 0xe3, - 0x8d, 0xf5, 0xda, 0x9c, 0x8f, 0xce, 0xec, 0x9e, 0xfa, 0xcd, 0x40, 0x1b, 0xa9, 0xeb, 0x21, 0xed, - 0x3a, 0xd4, 0xa7, 0x81, 0xcb, 0x78, 0xfa, 0x1e, 0x6c, 0xa0, 0x92, 0x97, 0x36, 0x12, 0xd2, 0x00, - 0xa5, 0x83, 0x16, 0x29, 0x79, 0x11, 0xbe, 0x8b, 0xaa, 0xfd, 0x50, 0x48, 0xd5, 0x22, 0x49, 0x13, - 0x65, 0x5e, 0xef, 0x6b, 0x3a, 0xc9, 0x24, 0xf0, 0xc7, 0xa8, 0x12, 0x85, 0x5c, 0x8a, 0xfa, 0xa2, - 0xf2, 0xfa, 0xde, 0x7c, 0x5e, 0xc3, 0x6e, 0xd3, 0xcb, 0x3a, 0x7f, 0xbc, 0x00, 0x86, 0x24, 0x68, - 0xe6, 0xb7, 0x06, 0x7a, 0x65, 0x86, 0xff, 0x89, 0x0e, 0xee, 0xa2, 0x25, 0x2f, 0x61, 0xea, 0x17, - 0xf3, 0xed, 0xf9, 0xcc, 0xce, 0x48, 0x45, 0xfe, 0x5a, 0xa7, 0xaf, 0x72, 0x0a, 0x6d, 0xfe, 0x64, - 0xa0, 0xff, 0x4d, 0xf9, 0xab, 0xae, 0x0e, 0xd8, 0xf9, 0x90, 0xbc, 0x4a, 0xe1, 0xea, 0x80, 0xd5, - 0xad, 0x38, 0xf8, 0x10, 0x55, 0xd5, 0xd1, 0xe2, 0x86, 0xbe, 0x4e, 0xa0, 0x9d, 0x26, 0xb0, 0xa5, - 0xe9, 0xcf, 0x2e, 0x9a, 0xaf, 0x4e, 0x5f, 0x72, 0x56, 0xca, 0x26, 0x19, 0x00, 0x8c, 0x22, 0xe3, - 0x3c, 0xe4, 0x7a, 0x5a, 0xd5, 0x28, 0xbe, 0x0f, 0x04, 0x92, 0xd0, 0xcd, 0x9f, 0xf3, 0x26, 0x85, - 0x83, 0x02, 0xfc, 0x83, 0xe2, 0x4c, 0x5e, 0x45, 0x50, 0x3a, 0xa2, 0x38, 0x38, 0x46, 0x6b, 0xde, - 0xc4, 0x05, 0x72, 0xbd, 0x9d, 0x9c, 0xa9, 0x39, 0x75, 0x0d, 0xbf, 0x36, 0xc9, 0x21, 0x53, 0x26, - 0x4c, 0x86, 0xa6, 0xa4, 0xe0, 0x49, 0xe8, 0x4b, 0x19, 0xe9, 0x69, 0xba, 0x3f, 0xff, 0xdd, 0x93, - 0xbb, 0x50, 0x55, 0xd1, 0x75, 0x3a, 0x2d, 0xa2, 0xa0, 0xcc, 0xdf, 0x4b, 0x59, 0x3e, 0xd4, 0xa2, - 0x79, 0x37, 0x8b, 0x56, 0xed, 0x00, 0xf5, 0xcc, 0x27, 0x6b, 0x6d, 0xbd, 0xe0, 0x78, 0xc6, 0x23, - 0x53, 0xd2, 0xb8, 0x93, 0xdf, 0x83, 0xc6, 0x7f, 0xb9, 0x07, 0x6b, 0xb3, 0x6e, 0x41, 0xbc, 0x8f, - 0xca, 0xd2, 0x4f, 0x87, 0xfd, 0xf5, 0xf9, 0x10, 0x3b, 0x47, 0x6d, 0xa7, 0xa6, 0x53, 0x5e, 0xee, - 0x1c, 0xb5, 0x09, 0x40, 0xe0, 0x13, 0x54, 0xe1, 0xb1, 0xcf, 0xe0, 0x56, 0x2a, 0xcf, 0x7f, 0x7b, - 0x41, 0x06, 0xf3, 0xe1, 0x83, 0x2f, 0x41, 0x12, 0x1c, 0xf3, 0x3b, 0x03, 0xdd, 0x1c, 0xbb, 0xa8, - 0x30, 0x47, 0x2b, 0x7e, 0x61, 0x76, 0x74, 0x1e, 0x1e, 0x5e, 0x7f, 0xea, 0xf4, 0xd0, 0xaf, 0x6b, - 0xbb, 0x2b, 0x45, 0x1e, 0x19, 0xb3, 0x61, 0x52, 0x84, 0xf2, 0xb0, 0x61, 0x0e, 0xa0, 0x79, 0x93, - 0x81, 0xd7, 0x73, 0x00, 0x3d, 0x2d, 0x48, 0x42, 0x87, 0x07, 0x45, 0x30, 0x97, 0x33, 0x79, 0x9c, - 0x2f, 0xae, 0xec, 0x41, 0x69, 0x67, 0x1c, 0x52, 0x90, 0x72, 0xf6, 0xce, 0x2f, 0x1b, 0x0b, 0x4f, - 0x2e, 0x1b, 0x0b, 0x4f, 0x2f, 0x1b, 0x0b, 0xdf, 0x8c, 0x1a, 0xc6, 0xf9, 0xa8, 0x61, 0x3c, 0x19, - 0x35, 0x8c, 0xa7, 0xa3, 0x86, 0xf1, 0xf7, 0xa8, 0x61, 0xfc, 0xf8, 0x4f, 0x63, 0xe1, 0xf3, 0xdb, - 0xcf, 0xfd, 0x87, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0xf6, 0xe9, 0x27, 0x10, 0x0e, - 0x00, 0x00, + // 1457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5, + 0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16, + 0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c, + 0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40, + 0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec, + 0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54, + 0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38, + 0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e, + 0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34, + 0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f, + 0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2, + 0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90, + 0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96, + 0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76, + 0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0, + 0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d, + 0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d, + 0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7, + 0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc, + 0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6, + 0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64, + 0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee, + 0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc, + 0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad, + 0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84, + 0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6, + 0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a, + 0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c, + 0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52, + 0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95, + 0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0, + 0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85, + 0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c, + 0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf, + 0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79, + 0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93, + 0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f, + 0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff, + 0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4, + 0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e, + 0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66, + 0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d, + 0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca, + 0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9, + 0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04, + 0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e, + 0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8, + 0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1, + 0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40, + 0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3, + 0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44, + 0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb, + 0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92, + 0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c, + 0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10, + 0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9, + 0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4, + 0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2, + 0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16, + 0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff, + 0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed, + 0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57, + 0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55, + 0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b, + 0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39, + 0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c, + 0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9, + 0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38, + 0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7, + 0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e, + 0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8, + 0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf, + 0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78, + 0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0, + 0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9, + 0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce, + 0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9, + 0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b, + 0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99, + 0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34, + 0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e, + 0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37, + 0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf, + 0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f, + 0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a, + 0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e, + 0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58, + 0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79, + 0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb, + 0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97, + 0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00, + 0x00, } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { @@ -709,7 +956,7 @@ func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Ingress) Marshal() (dAtA []byte, err error) { +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -719,18 +966,18 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -738,9 +985,9 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -748,9 +995,46 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -762,7 +1046,7 @@ func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -772,19 +1056,19 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Resource != nil { + if m.ParentRef != nil { { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -792,27 +1076,12 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - { - size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x12 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *IngressClass) Marshal() (dAtA []byte, err error) { +func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -822,16 +1091,26 @@ func (m *IngressClass) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -855,7 +1134,7 @@ func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngressClassList) Marshal() (dAtA []byte, err error) { +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -865,23 +1144,116 @@ func (m *IngressClassList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } @@ -1378,108 +1750,363 @@ func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ParentReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } - return n + +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Ingress) Size() (n int) { - if m == nil { - return 0 +func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *IngressBackend) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *IngressClass) Size() (n int) { - if m == nil { - return 0 +func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *IngressClassList) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CIDRs) > 0 { + for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CIDRs[iNdEx]) + copy(dAtA[i:], m.CIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IPAddressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IPAddressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1673,31 +2300,110 @@ func (m *IngressTLS) Size() (n int) { return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" +func (m *ParentReference) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" + +func (m *ServiceCIDR) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CIDRs) > 0 { + for _, s := range m.CIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceCIDRStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," } repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, @@ -1706,6 +2412,43 @@ func (this *HTTPIngressRuleValue) String() string { }, "") return s } +func (this *IPAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IPAddress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IPAddressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IPAddressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Ingress) String() string { if this == nil { return "nil" @@ -1900,22 +2643,1172 @@ func (this *IngressTLS) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *ParentReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceCIDRSpec{`, + `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceCIDRStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ServiceCIDRStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IPAddress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IngressClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { +func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1938,15 +3831,15 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1974,13 +3867,14 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1990,28 +3884,27 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2039,64 +3932,46 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + s := string(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2106,25 +3981,24 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.Namespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -2147,7 +4021,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *Ingress) Unmarshal(dAtA []byte) error { +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2170,17 +4044,17 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2190,28 +4064,27 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Controller = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2238,40 +4111,10 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Parameters == nil { + m.Parameters = &IngressClassParametersReference{} } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2296,7 +4139,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressBackend) Unmarshal(dAtA []byte) error { +func (m *IngressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2319,47 +4162,15 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2386,13 +4197,13 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2419,10 +4230,8 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2447,7 +4256,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClass) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2470,17 +4279,17 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2490,28 +4299,59 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2538,7 +4378,8 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, IngressPortStatus{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2563,7 +4404,7 @@ func (m *IngressClass) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassList) Unmarshal(dAtA []byte) error { +func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2586,48 +4427,15 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2654,8 +4462,8 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2680,7 +4488,7 @@ func (m *IngressClassList) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { +func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2703,17 +4511,17 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group") + return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var stringLen uint64 + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2723,28 +4531,14 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Port |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2772,11 +4566,11 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2804,11 +4598,62 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Error = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2836,14 +4681,13 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2853,24 +4697,24 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2893,7 +4737,7 @@ func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2912,51 +4756,19 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2983,10 +4795,10 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parameters == nil { - m.Parameters = &IngressClassParametersReference{} + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3011,7 +4823,7 @@ func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressList) Unmarshal(dAtA []byte) error { +func (m *IngressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3034,15 +4846,15 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3069,13 +4881,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3102,66 +4917,16 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3171,27 +4936,29 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3219,11 +4986,62 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3250,8 +5068,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, IngressPortStatus{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3276,7 +5093,7 @@ func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { +func (m *IngressTLS) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3299,17 +5116,17 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group") + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3319,25 +5136,55 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3360,7 +5207,7 @@ func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3383,17 +5230,49 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - m.Port = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3403,14 +5282,27 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3438,11 +5330,11 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3470,8 +5362,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Error = &s + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -3494,7 +5385,7 @@ func (m *IngressPortStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressRule) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3517,17 +5408,17 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3537,27 +5428,28 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3584,63 +5476,13 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3667,10 +5509,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3695,7 +5534,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressSpec) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3718,15 +5557,15 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3753,50 +5592,13 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3823,44 +5625,11 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ServiceCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3882,7 +5651,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressStatus) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3905,17 +5674,17 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3925,24 +5694,23 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -3965,7 +5733,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngressTLS) Unmarshal(dAtA []byte) error { +func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3988,17 +5756,17 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4008,55 +5776,25 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/networking/v1beta1/generated.proto index f36df9ec1..3368dcaec 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -73,6 +73,44 @@ message HTTPIngressRuleValue { repeated HTTPIngressPath paths = 1; } +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +message IPAddress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IPAddressSpec spec = 2; +} + +// IPAddressList contains a list of IPAddress. +message IPAddressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of IPAddresses. + repeated IPAddress items = 2; +} + +// IPAddressSpec describe the attributes in an IP Address. +message IPAddressSpec { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + optional ParentReference parentRef = 1; +} + // Ingress is a collection of rules that allow inbound connections to reach the // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name @@ -81,7 +119,7 @@ message Ingress { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the Ingress. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -102,13 +140,13 @@ message IngressBackend { // servicePort Specifies the port of the referenced service. // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; // resource is an ObjectRef to another Kubernetes resource in the namespace // of the Ingress object. If resource is specified, serviceName and servicePort // must not be specified. // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; + optional .k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressClass represents the class of the Ingress, referenced by the Ingress @@ -120,7 +158,7 @@ message IngressClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the desired state of the IngressClass. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -132,7 +170,7 @@ message IngressClass { message IngressClassList { // Standard list metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of IngressClasses. repeated IngressClass items = 2; @@ -186,7 +224,7 @@ message IngressList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of Ingress. repeated Ingress items = 2; @@ -351,3 +389,74 @@ message IngressTLS { optional string secretName = 2; } +// ParentReference describes a reference to a parent object. +message ParentReference { + // Group is the group of the object being referenced. + // +optional + optional string group = 1; + + // Resource is the resource of the object being referenced. + // +required + optional string resource = 2; + + // Namespace is the namespace of the object being referenced. + // +optional + optional string namespace = 3; + + // Name is the name of the object being referenced. + // +required + optional string name = 4; +} + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +message ServiceCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRSpec spec = 2; + + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceCIDRStatus status = 3; +} + +// ServiceCIDRList contains a list of ServiceCIDR objects. +message ServiceCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ServiceCIDRs. + repeated ServiceCIDR items = 2; +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +message ServiceCIDRSpec { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + repeated string cidrs = 1; +} + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +message ServiceCIDRStatus { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; +} + diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/register.go b/constraint/vendor/k8s.io/api/networking/v1beta1/register.go index 04234953e..9d2a13cc6 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/register.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/register.go @@ -51,6 +51,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, + &IPAddress{}, + &IPAddressList{}, + &ServiceCIDR{}, + &ServiceCIDRList{}, ) // Add the watch version that applies metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/types.go b/constraint/vendor/k8s.io/api/networking/v1beta1/types.go index 34dfe76aa..cd7126a5a 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/types.go @@ -218,7 +218,7 @@ type IngressRule struct { // default backend, is left to the controller fulfilling the Ingress. Http is // currently the only supported IngressRuleValue. // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` + IngressRuleValue `json:",inline" protobuf:"bytes,2,opt,name=ingressRuleValue"` } // IngressRuleValue represents a rule to apply against incoming requests. If the @@ -421,3 +421,133 @@ type IngressClassList struct { // items is the list of IngressClasses. Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs +// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. +// An IP address can be represented in different formats, to guarantee the uniqueness of the IP, +// the name of the object is the IP address in canonical format, four decimal digits separated +// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. +// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 +// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1 +type IPAddress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the IPAddress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IPAddressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAddressSpec describe the attributes in an IP Address. +type IPAddressSpec struct { + // ParentRef references the resource that an IPAddress is attached to. + // An IPAddress must reference a parent object. + // +required + ParentRef *ParentReference `json:"parentRef,omitempty" protobuf:"bytes,1,opt,name=parentRef"` +} + +// ParentReference describes a reference to a parent object. +type ParentReference struct { + // Group is the group of the object being referenced. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resource is the resource of the object being referenced. + // +required + Resource string `json:"resource,omitempty" protobuf:"bytes,2,opt,name=resource"` + // Namespace is the namespace of the object being referenced. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + // Name is the name of the object being referenced. + // +required + Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// IPAddressList contains a list of IPAddress. +type IPAddressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of IPAddresses. + Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). +// This range is used to allocate ClusterIPs to Service objects. +type ServiceCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the desired state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // status represents the current state of the ServiceCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. +type ServiceCIDRSpec struct { + // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") + // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. + // This field is immutable. + // +optional + // +listType=atomic + CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` +} + +const ( + // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the + // apiserver to allocate ClusterIPs for Services. + ServiceCIDRConditionReady = "Ready" + // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is + // being deleted. + ServiceCIDRReasonTerminating = "Terminating" +) + +// ServiceCIDRStatus describes the current state of the ServiceCIDR. +type ServiceCIDRStatus struct { + // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. + // Current service state + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 + +// ServiceCIDRList contains a list of ServiceCIDR objects. +type ServiceCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // items is the list of ServiceCIDRs. + Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go index b2373669f..9d27517f3 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -47,6 +47,35 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { return map_HTTPIngressRuleValue } +var map_IPAddress = map[string]string{ + "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IPAddress) SwaggerDoc() map[string]string { + return map_IPAddress +} + +var map_IPAddressList = map[string]string{ + "": "IPAddressList contains a list of IPAddress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of IPAddresses.", +} + +func (IPAddressList) SwaggerDoc() map[string]string { + return map_IPAddressList +} + +var map_IPAddressSpec = map[string]string{ + "": "IPAddressSpec describe the attributes in an IP Address.", + "parentRef": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", +} + +func (IPAddressSpec) SwaggerDoc() map[string]string { + return map_IPAddressSpec +} + var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -201,4 +230,55 @@ func (IngressTLS) SwaggerDoc() map[string]string { return map_IngressTLS } +var map_ParentReference = map[string]string{ + "": "ParentReference describes a reference to a parent object.", + "group": "Group is the group of the object being referenced.", + "resource": "Resource is the resource of the object being referenced.", + "namespace": "Namespace is the namespace of the object being referenced.", + "name": "Name is the name of the object being referenced.", +} + +func (ParentReference) SwaggerDoc() map[string]string { + return map_ParentReference +} + +var map_ServiceCIDR = map[string]string{ + "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ServiceCIDR) SwaggerDoc() map[string]string { + return map_ServiceCIDR +} + +var map_ServiceCIDRList = map[string]string{ + "": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ServiceCIDRs.", +} + +func (ServiceCIDRList) SwaggerDoc() map[string]string { + return map_ServiceCIDRList +} + +var map_ServiceCIDRSpec = map[string]string{ + "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", +} + +func (ServiceCIDRSpec) SwaggerDoc() map[string]string { + return map_ServiceCIDRSpec +} + +var map_ServiceCIDRStatus = map[string]string{ + "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", +} + +func (ServiceCIDRStatus) SwaggerDoc() map[string]string { + return map_ServiceCIDRStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go b/constraint/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go new file mode 100644 index 000000000..bc2207766 --- /dev/null +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/well_known_labels.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + + // TODO: Use IPFamily as field with a field selector,And the value is set based on + // the name at create time and immutable. + // LabelIPAddressFamily is used to indicate the IP family of a Kubernetes IPAddress. + // This label simplify dual-stack client operations allowing to obtain the list of + // IP addresses filtered by family. + LabelIPAddressFamily = "ipaddress.kubernetes.io/ip-family" + // LabelManagedBy is used to indicate the controller or entity that manages + // an IPAddress. This label aims to enable different IPAddress + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // IPAddress objects. + LabelManagedBy = "ipaddress.kubernetes.io/managed-by" +) diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go index 005d64e7f..1a6869cd6 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -23,6 +23,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -71,6 +72,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddress) DeepCopyInto(out *IPAddress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { + if in == nil { + return nil + } + out := new(IPAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { + if in == nil { + return nil + } + out := new(IPAddressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAddressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { + *out = *in + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { + if in == nil { + return nil + } + out := new(IPAddressSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ingress) DeepCopyInto(out *Ingress) { *out = *in @@ -448,3 +530,124 @@ func (in *IngressTLS) DeepCopy() *IngressTLS { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParentReference) DeepCopyInto(out *ParentReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { + if in == nil { + return nil + } + out := new(ParentReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. +func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { + if in == nil { + return nil + } + out := new(ServiceCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. +func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { + if in == nil { + return nil + } + out := new(ServiceCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { + *out = *in + if in.CIDRs != nil { + in, out := &in.CIDRs, &out.CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. +func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { + if in == nil { + return nil + } + out := new(ServiceCIDRSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. +func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { + if in == nil { + return nil + } + out := new(ServiceCIDRStatus) + in.DeepCopyInto(out) + return out +} diff --git a/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go index e8b4c7ec7..a876fd5fe 100644 --- a/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/constraint/vendor/k8s.io/api/networking/v1beta1/zz_generated.prerelease-lifecycle.go @@ -25,6 +25,42 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" ) +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *Ingress) APILifecycleIntroduced() (major, minor int) { @@ -120,3 +156,39 @@ func (in *IngressList) APILifecycleReplacement() schema.GroupVersionKind { func (in *IngressList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/constraint/vendor/k8s.io/api/node/v1/doc.go b/constraint/vendor/k8s.io/api/node/v1/doc.go index 12cbcb8a0..57ca52445 100644 --- a/constraint/vendor/k8s.io/api/node/v1/doc.go +++ b/constraint/vendor/k8s.io/api/node/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=node.k8s.io package v1 // import "k8s.io/api/node/v1" diff --git a/constraint/vendor/k8s.io/api/node/v1/generated.proto b/constraint/vendor/k8s.io/api/node/v1/generated.proto index 0152d5e3a..e6b8852ec 100644 --- a/constraint/vendor/k8s.io/api/node/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/node/v1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/constraint/vendor/k8s.io/api/node/v1/types.go b/constraint/vendor/k8s.io/api/node/v1/types.go index b00f58772..169862ea9 100644 --- a/constraint/vendor/k8s.io/api/node/v1/types.go +++ b/constraint/vendor/k8s.io/api/node/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClass defines a class of container runtime supported in the cluster. // The RuntimeClass is used to determine which container runtime is used to run @@ -93,6 +94,7 @@ type Scheduling struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.20 // RuntimeClassList is a list of RuntimeClass objects. type RuntimeClassList struct { diff --git a/constraint/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..749795568 --- /dev/null +++ b/constraint/vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClass) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RuntimeClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 20 +} diff --git a/constraint/vendor/k8s.io/api/node/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/node/v1alpha1/generated.proto index 4673e9261..bc68718d9 100644 --- a/constraint/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1alpha1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the RuntimeClass // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -59,7 +59,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -113,6 +113,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/constraint/vendor/k8s.io/api/node/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/node/v1beta1/generated.proto index 54dbc0995..497027e03 100644 --- a/constraint/vendor/k8s.io/api/node/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -34,7 +34,7 @@ option go_package = "k8s.io/api/node/v1beta1"; message Overhead { // podFixed represents the fixed resource overhead associated with running a pod. // +optional - map podFixed = 1; + map podFixed = 1; } // RuntimeClass defines a class of container runtime supported in the cluster. @@ -47,7 +47,7 @@ message Overhead { message RuntimeClass { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // handler specifies the underlying runtime and configuration that the CRI // implementation will use to handle pods of this class. The possible values @@ -80,7 +80,7 @@ message RuntimeClassList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is a list of schema objects. repeated RuntimeClass items = 2; @@ -103,6 +103,6 @@ message Scheduling { // tolerated by the pod and the RuntimeClass. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; + repeated .k8s.io.api.core.v1.Toleration tolerations = 2; } diff --git a/constraint/vendor/k8s.io/api/policy/v1/doc.go b/constraint/vendor/k8s.io/api/policy/v1/doc.go index 177cdf523..c51e02685 100644 --- a/constraint/vendor/k8s.io/api/policy/v1/doc.go +++ b/constraint/vendor/k8s.io/api/policy/v1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, diff --git a/constraint/vendor/k8s.io/api/policy/v1/generated.proto b/constraint/vendor/k8s.io/api/policy/v1/generated.proto index a79e71028..57128e811 100644 --- a/constraint/vendor/k8s.io/api/policy/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/policy/v1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of PodDisruptionBudgets repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // all pods within the namespace. // +patchStrategy=replace // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/constraint/vendor/k8s.io/api/policy/v1/types.go b/constraint/vendor/k8s.io/api/policy/v1/types.go index 45b9550f4..f05367ebe 100644 --- a/constraint/vendor/k8s.io/api/policy/v1/types.go +++ b/constraint/vendor/k8s.io/api/policy/v1/types.go @@ -170,6 +170,7 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { @@ -188,6 +189,7 @@ type PodDisruptionBudget struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.21 // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { @@ -203,6 +205,7 @@ type PodDisruptionBudgetList struct { // +genclient // +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.22 // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is diff --git a/constraint/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..d6663b923 --- /dev/null +++ b/constraint/vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Eviction) APILifecycleIntroduced() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudget) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodDisruptionBudgetList) APILifecycleIntroduced() (major, minor int) { + return 1, 21 +} diff --git a/constraint/vendor/k8s.io/api/policy/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/policy/v1beta1/generated.proto index d1409913f..91e33f233 100644 --- a/constraint/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -35,11 +35,11 @@ option go_package = "k8s.io/api/policy/v1beta1"; message Eviction { // ObjectMeta describes the pod that is being evicted. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods @@ -47,7 +47,7 @@ message PodDisruptionBudget { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. // +optional @@ -63,7 +63,7 @@ message PodDisruptionBudgetList { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual PodDisruptionBudget objects repeated PodDisruptionBudget items = 2; @@ -76,7 +76,7 @@ message PodDisruptionBudgetSpec { // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. @@ -84,14 +84,14 @@ message PodDisruptionBudgetSpec { // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // In policy/v1, an empty selector will select all pods in the namespace. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; + optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods // should be considered for eviction. Current implementation considers healthy pods, @@ -142,7 +142,7 @@ message PodDisruptionBudgetStatus { // If everything goes smooth this map should be empty for the most of the time. // Large number of entries in the map may indicate problems with pod deletions. // +optional - map disruptedPods = 2; + map disruptedPods = 2; // Number of pod disruptions that are currently allowed. optional int32 disruptionsAllowed = 3; @@ -174,6 +174,6 @@ message PodDisruptionBudgetStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } diff --git a/constraint/vendor/k8s.io/api/rbac/v1/doc.go b/constraint/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce92..b0e4e5b5b 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1/doc.go +++ b/constraint/vendor/k8s.io/api/rbac/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=rbac.authorization.k8s.io package v1 // import "k8s.io/api/rbac/v1" diff --git a/constraint/vendor/k8s.io/api/rbac/v1/generated.proto b/constraint/vendor/k8s.io/api/rbac/v1/generated.proto index 62f5e558b..87b8f832d 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/rbac/v1/generated.proto @@ -34,14 +34,14 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -60,7 +60,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -77,7 +77,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -87,7 +87,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -128,7 +128,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -142,7 +142,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -159,7 +159,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -169,7 +169,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/constraint/vendor/k8s.io/api/rbac/v1/types.go b/constraint/vendor/k8s.io/api/rbac/v1/types.go index 8bef1ac46..f9628b853 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1/types.go +++ b/constraint/vendor/k8s.io/api/rbac/v1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty @@ -106,6 +106,7 @@ type RoleRef struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. type Role struct { @@ -122,6 +123,7 @@ type Role struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given @@ -144,6 +146,7 @@ type RoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleBindingList is a collection of RoleBindings type RoleBindingList struct { @@ -157,6 +160,7 @@ type RoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // RoleList is a collection of Roles type RoleList struct { @@ -172,6 +176,7 @@ type RoleList struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. type ClusterRole struct { @@ -204,6 +209,7 @@ type AggregationRule struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. @@ -225,6 +231,7 @@ type ClusterRoleBinding struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { @@ -238,6 +245,7 @@ type ClusterRoleBindingList struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.8 // ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { diff --git a/constraint/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..f6f74413b --- /dev/null +++ b/constraint/vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRole) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterRoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *Role) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBinding) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleBindingList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *RoleList) APILifecycleIntroduced() (major, minor int) { + return 1, 8 +} diff --git a/constraint/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 170e008a5..19d43cdee 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -34,7 +34,7 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -42,7 +42,7 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -62,7 +62,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -79,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -90,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -132,7 +132,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -147,7 +147,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -164,7 +164,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -175,7 +175,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/constraint/vendor/k8s.io/api/rbac/v1alpha1/types.go b/constraint/vendor/k8s.io/api/rbac/v1alpha1/types.go index 9a0a21977..2146b4ce3 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. // +k8s:conversion-gen=false // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty diff --git a/constraint/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 7dfc50d7e..8bfbd0c8a 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -34,7 +34,7 @@ message AggregationRule { // If any of the selectors match, then the ClusterRole's permissions will be added // +optional // +listType=atomic - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. @@ -42,7 +42,7 @@ message AggregationRule { message ClusterRole { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole // +optional @@ -62,7 +62,7 @@ message ClusterRole { message ClusterRoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -79,7 +79,7 @@ message ClusterRoleBinding { message ClusterRoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoleBindings repeated ClusterRoleBinding items = 2; @@ -90,7 +90,7 @@ message ClusterRoleBindingList { message ClusterRoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of ClusterRoles repeated ClusterRole items = 2; @@ -133,7 +133,7 @@ message PolicyRule { message Role { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role // +optional @@ -148,7 +148,7 @@ message Role { message RoleBinding { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Subjects holds references to the objects the role applies to. // +optional @@ -165,7 +165,7 @@ message RoleBinding { message RoleBindingList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of RoleBindings repeated RoleBinding items = 2; @@ -176,7 +176,7 @@ message RoleBindingList { message RoleList { // Standard object's metadata. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of Roles repeated Role items = 2; diff --git a/constraint/vendor/k8s.io/api/rbac/v1beta1/types.go b/constraint/vendor/k8s.io/api/rbac/v1beta1/types.go index f761f81a6..9cfaaceb9 100644 --- a/constraint/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -84,7 +84,7 @@ type Subject struct { // Defaults to "" for ServiceAccount subjects. // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"` // Name of the object being referenced. Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/doc.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/doc.go deleted file mode 100644 index d9c20e089..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package - -// +groupName=resource.k8s.io - -// Package v1alpha2 is the v1alpha2 version of the resource API. -package v1alpha2 // import "k8s.io/api/resource/v1alpha2" diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go deleted file mode 100644 index 6c6ba438e..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.pb.go +++ /dev/null @@ -1,10589 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/api/resource/v1alpha2/generated.proto - -package v1alpha2 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AllocationResult) Reset() { *m = AllocationResult{} } -func (*AllocationResult) ProtoMessage() {} -func (*AllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{0} -} -func (m *AllocationResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocationResult.Merge(m, src) -} -func (m *AllocationResult) XXX_Size() int { - return m.Size() -} -func (m *AllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_AllocationResult.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocationResult proto.InternalMessageInfo - -func (m *AllocationResultModel) Reset() { *m = AllocationResultModel{} } -func (*AllocationResultModel) ProtoMessage() {} -func (*AllocationResultModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{1} -} -func (m *AllocationResultModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllocationResultModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllocationResultModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocationResultModel.Merge(m, src) -} -func (m *AllocationResultModel) XXX_Size() int { - return m.Size() -} -func (m *AllocationResultModel) XXX_DiscardUnknown() { - xxx_messageInfo_AllocationResultModel.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocationResultModel proto.InternalMessageInfo - -func (m *DriverAllocationResult) Reset() { *m = DriverAllocationResult{} } -func (*DriverAllocationResult) ProtoMessage() {} -func (*DriverAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{2} -} -func (m *DriverAllocationResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DriverAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DriverAllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_DriverAllocationResult.Merge(m, src) -} -func (m *DriverAllocationResult) XXX_Size() int { - return m.Size() -} -func (m *DriverAllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_DriverAllocationResult.DiscardUnknown(m) -} - -var xxx_messageInfo_DriverAllocationResult proto.InternalMessageInfo - -func (m *DriverRequests) Reset() { *m = DriverRequests{} } -func (*DriverRequests) ProtoMessage() {} -func (*DriverRequests) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{3} -} -func (m *DriverRequests) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DriverRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DriverRequests) XXX_Merge(src proto.Message) { - xxx_messageInfo_DriverRequests.Merge(m, src) -} -func (m *DriverRequests) XXX_Size() int { - return m.Size() -} -func (m *DriverRequests) XXX_DiscardUnknown() { - xxx_messageInfo_DriverRequests.DiscardUnknown(m) -} - -var xxx_messageInfo_DriverRequests proto.InternalMessageInfo - -func (m *NamedResourcesAllocationResult) Reset() { *m = NamedResourcesAllocationResult{} } -func (*NamedResourcesAllocationResult) ProtoMessage() {} -func (*NamedResourcesAllocationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{4} -} -func (m *NamedResourcesAllocationResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesAllocationResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAllocationResult.Merge(m, src) -} -func (m *NamedResourcesAllocationResult) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesAllocationResult) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAllocationResult.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesAllocationResult proto.InternalMessageInfo - -func (m *NamedResourcesAttribute) Reset() { *m = NamedResourcesAttribute{} } -func (*NamedResourcesAttribute) ProtoMessage() {} -func (*NamedResourcesAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{5} -} -func (m *NamedResourcesAttribute) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesAttribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAttribute.Merge(m, src) -} -func (m *NamedResourcesAttribute) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesAttribute) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAttribute.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesAttribute proto.InternalMessageInfo - -func (m *NamedResourcesAttributeValue) Reset() { *m = NamedResourcesAttributeValue{} } -func (*NamedResourcesAttributeValue) ProtoMessage() {} -func (*NamedResourcesAttributeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{6} -} -func (m *NamedResourcesAttributeValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesAttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesAttributeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesAttributeValue.Merge(m, src) -} -func (m *NamedResourcesAttributeValue) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesAttributeValue) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesAttributeValue.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesAttributeValue proto.InternalMessageInfo - -func (m *NamedResourcesFilter) Reset() { *m = NamedResourcesFilter{} } -func (*NamedResourcesFilter) ProtoMessage() {} -func (*NamedResourcesFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{7} -} -func (m *NamedResourcesFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesFilter.Merge(m, src) -} -func (m *NamedResourcesFilter) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesFilter) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesFilter proto.InternalMessageInfo - -func (m *NamedResourcesInstance) Reset() { *m = NamedResourcesInstance{} } -func (*NamedResourcesInstance) ProtoMessage() {} -func (*NamedResourcesInstance) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{8} -} -func (m *NamedResourcesInstance) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesInstance) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesInstance.Merge(m, src) -} -func (m *NamedResourcesInstance) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesInstance) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesInstance.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesInstance proto.InternalMessageInfo - -func (m *NamedResourcesIntSlice) Reset() { *m = NamedResourcesIntSlice{} } -func (*NamedResourcesIntSlice) ProtoMessage() {} -func (*NamedResourcesIntSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{9} -} -func (m *NamedResourcesIntSlice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesIntSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesIntSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesIntSlice.Merge(m, src) -} -func (m *NamedResourcesIntSlice) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesIntSlice) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesIntSlice.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesIntSlice proto.InternalMessageInfo - -func (m *NamedResourcesRequest) Reset() { *m = NamedResourcesRequest{} } -func (*NamedResourcesRequest) ProtoMessage() {} -func (*NamedResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{10} -} -func (m *NamedResourcesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesRequest.Merge(m, src) -} -func (m *NamedResourcesRequest) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesRequest proto.InternalMessageInfo - -func (m *NamedResourcesResources) Reset() { *m = NamedResourcesResources{} } -func (*NamedResourcesResources) ProtoMessage() {} -func (*NamedResourcesResources) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{11} -} -func (m *NamedResourcesResources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesResources.Merge(m, src) -} -func (m *NamedResourcesResources) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesResources) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesResources.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesResources proto.InternalMessageInfo - -func (m *NamedResourcesStringSlice) Reset() { *m = NamedResourcesStringSlice{} } -func (*NamedResourcesStringSlice) ProtoMessage() {} -func (*NamedResourcesStringSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{12} -} -func (m *NamedResourcesStringSlice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedResourcesStringSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedResourcesStringSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedResourcesStringSlice.Merge(m, src) -} -func (m *NamedResourcesStringSlice) XXX_Size() int { - return m.Size() -} -func (m *NamedResourcesStringSlice) XXX_DiscardUnknown() { - xxx_messageInfo_NamedResourcesStringSlice.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedResourcesStringSlice proto.InternalMessageInfo - -func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } -func (*PodSchedulingContext) ProtoMessage() {} -func (*PodSchedulingContext) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{13} -} -func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContext.Merge(m, src) -} -func (m *PodSchedulingContext) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContext) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo - -func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } -func (*PodSchedulingContextList) ProtoMessage() {} -func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{14} -} -func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextList.Merge(m, src) -} -func (m *PodSchedulingContextList) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo - -func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } -func (*PodSchedulingContextSpec) ProtoMessage() {} -func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{15} -} -func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) -} -func (m *PodSchedulingContextSpec) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo - -func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } -func (*PodSchedulingContextStatus) ProtoMessage() {} -func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{16} -} -func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) -} -func (m *PodSchedulingContextStatus) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo - -func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } -func (*ResourceClaim) ProtoMessage() {} -func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{17} -} -func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaim) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaim.Merge(m, src) -} -func (m *ResourceClaim) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaim) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaim.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo - -func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } -func (*ResourceClaimConsumerReference) ProtoMessage() {} -func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{18} -} -func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) -} -func (m *ResourceClaimConsumerReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo - -func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } -func (*ResourceClaimList) ProtoMessage() {} -func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{19} -} -func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimList.Merge(m, src) -} -func (m *ResourceClaimList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo - -func (m *ResourceClaimParameters) Reset() { *m = ResourceClaimParameters{} } -func (*ResourceClaimParameters) ProtoMessage() {} -func (*ResourceClaimParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{20} -} -func (m *ResourceClaimParameters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParameters.Merge(m, src) -} -func (m *ResourceClaimParameters) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParameters) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParameters.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParameters proto.InternalMessageInfo - -func (m *ResourceClaimParametersList) Reset() { *m = ResourceClaimParametersList{} } -func (*ResourceClaimParametersList) ProtoMessage() {} -func (*ResourceClaimParametersList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{21} -} -func (m *ResourceClaimParametersList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParametersList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParametersList.Merge(m, src) -} -func (m *ResourceClaimParametersList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParametersList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParametersList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParametersList proto.InternalMessageInfo - -func (m *ResourceClaimParametersReference) Reset() { *m = ResourceClaimParametersReference{} } -func (*ResourceClaimParametersReference) ProtoMessage() {} -func (*ResourceClaimParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{22} -} -func (m *ResourceClaimParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimParametersReference.Merge(m, src) -} -func (m *ResourceClaimParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimParametersReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimParametersReference proto.InternalMessageInfo - -func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } -func (*ResourceClaimSchedulingStatus) ProtoMessage() {} -func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{23} -} -func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) -} -func (m *ResourceClaimSchedulingStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo - -func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } -func (*ResourceClaimSpec) ProtoMessage() {} -func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{24} -} -func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSpec.Merge(m, src) -} -func (m *ResourceClaimSpec) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo - -func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } -func (*ResourceClaimStatus) ProtoMessage() {} -func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{25} -} -func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimStatus.Merge(m, src) -} -func (m *ResourceClaimStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo - -func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } -func (*ResourceClaimTemplate) ProtoMessage() {} -func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{26} -} -func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) -} -func (m *ResourceClaimTemplate) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo - -func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } -func (*ResourceClaimTemplateList) ProtoMessage() {} -func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{27} -} -func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) -} -func (m *ResourceClaimTemplateList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo - -func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } -func (*ResourceClaimTemplateSpec) ProtoMessage() {} -func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{28} -} -func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) -} -func (m *ResourceClaimTemplateSpec) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo - -func (m *ResourceClass) Reset() { *m = ResourceClass{} } -func (*ResourceClass) ProtoMessage() {} -func (*ResourceClass) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{29} -} -func (m *ResourceClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClass.Merge(m, src) -} -func (m *ResourceClass) XXX_Size() int { - return m.Size() -} -func (m *ResourceClass) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClass.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClass proto.InternalMessageInfo - -func (m *ResourceClassList) Reset() { *m = ResourceClassList{} } -func (*ResourceClassList) ProtoMessage() {} -func (*ResourceClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{30} -} -func (m *ResourceClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassList.Merge(m, src) -} -func (m *ResourceClassList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassList proto.InternalMessageInfo - -func (m *ResourceClassParameters) Reset() { *m = ResourceClassParameters{} } -func (*ResourceClassParameters) ProtoMessage() {} -func (*ResourceClassParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{31} -} -func (m *ResourceClassParameters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParameters.Merge(m, src) -} -func (m *ResourceClassParameters) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassParameters) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParameters.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassParameters proto.InternalMessageInfo - -func (m *ResourceClassParametersList) Reset() { *m = ResourceClassParametersList{} } -func (*ResourceClassParametersList) ProtoMessage() {} -func (*ResourceClassParametersList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{32} -} -func (m *ResourceClassParametersList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassParametersList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassParametersList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParametersList.Merge(m, src) -} -func (m *ResourceClassParametersList) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassParametersList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParametersList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassParametersList proto.InternalMessageInfo - -func (m *ResourceClassParametersReference) Reset() { *m = ResourceClassParametersReference{} } -func (*ResourceClassParametersReference) ProtoMessage() {} -func (*ResourceClassParametersReference) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{33} -} -func (m *ResourceClassParametersReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClassParametersReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClassParametersReference.Merge(m, src) -} -func (m *ResourceClassParametersReference) XXX_Size() int { - return m.Size() -} -func (m *ResourceClassParametersReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClassParametersReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClassParametersReference proto.InternalMessageInfo - -func (m *ResourceFilter) Reset() { *m = ResourceFilter{} } -func (*ResourceFilter) ProtoMessage() {} -func (*ResourceFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{34} -} -func (m *ResourceFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceFilter.Merge(m, src) -} -func (m *ResourceFilter) XXX_Size() int { - return m.Size() -} -func (m *ResourceFilter) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceFilter proto.InternalMessageInfo - -func (m *ResourceFilterModel) Reset() { *m = ResourceFilterModel{} } -func (*ResourceFilterModel) ProtoMessage() {} -func (*ResourceFilterModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{35} -} -func (m *ResourceFilterModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceFilterModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceFilterModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceFilterModel.Merge(m, src) -} -func (m *ResourceFilterModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceFilterModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceFilterModel.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceFilterModel proto.InternalMessageInfo - -func (m *ResourceHandle) Reset() { *m = ResourceHandle{} } -func (*ResourceHandle) ProtoMessage() {} -func (*ResourceHandle) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{36} -} -func (m *ResourceHandle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceHandle) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceHandle.Merge(m, src) -} -func (m *ResourceHandle) XXX_Size() int { - return m.Size() -} -func (m *ResourceHandle) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceHandle.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceHandle proto.InternalMessageInfo - -func (m *ResourceModel) Reset() { *m = ResourceModel{} } -func (*ResourceModel) ProtoMessage() {} -func (*ResourceModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{37} -} -func (m *ResourceModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceModel.Merge(m, src) -} -func (m *ResourceModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceModel.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceModel proto.InternalMessageInfo - -func (m *ResourceRequest) Reset() { *m = ResourceRequest{} } -func (*ResourceRequest) ProtoMessage() {} -func (*ResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{38} -} -func (m *ResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRequest.Merge(m, src) -} -func (m *ResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *ResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceRequest proto.InternalMessageInfo - -func (m *ResourceRequestModel) Reset() { *m = ResourceRequestModel{} } -func (*ResourceRequestModel) ProtoMessage() {} -func (*ResourceRequestModel) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{39} -} -func (m *ResourceRequestModel) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRequestModel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRequestModel) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRequestModel.Merge(m, src) -} -func (m *ResourceRequestModel) XXX_Size() int { - return m.Size() -} -func (m *ResourceRequestModel) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRequestModel.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceRequestModel proto.InternalMessageInfo - -func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } -func (*ResourceSlice) ProtoMessage() {} -func (*ResourceSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{40} -} -func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceSlice) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSlice.Merge(m, src) -} -func (m *ResourceSlice) XXX_Size() int { - return m.Size() -} -func (m *ResourceSlice) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSlice.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo - -func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } -func (*ResourceSliceList) ProtoMessage() {} -func (*ResourceSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{41} -} -func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceSliceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceSliceList.Merge(m, src) -} -func (m *ResourceSliceList) XXX_Size() int { - return m.Size() -} -func (m *ResourceSliceList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo - -func (m *StructuredResourceHandle) Reset() { *m = StructuredResourceHandle{} } -func (*StructuredResourceHandle) ProtoMessage() {} -func (*StructuredResourceHandle) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{42} -} -func (m *StructuredResourceHandle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StructuredResourceHandle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StructuredResourceHandle) XXX_Merge(src proto.Message) { - xxx_messageInfo_StructuredResourceHandle.Merge(m, src) -} -func (m *StructuredResourceHandle) XXX_Size() int { - return m.Size() -} -func (m *StructuredResourceHandle) XXX_DiscardUnknown() { - xxx_messageInfo_StructuredResourceHandle.DiscardUnknown(m) -} - -var xxx_messageInfo_StructuredResourceHandle proto.InternalMessageInfo - -func (m *VendorParameters) Reset() { *m = VendorParameters{} } -func (*VendorParameters) ProtoMessage() {} -func (*VendorParameters) Descriptor() ([]byte, []int) { - return fileDescriptor_4312f5b44a31ec02, []int{43} -} -func (m *VendorParameters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VendorParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VendorParameters) XXX_Merge(src proto.Message) { - xxx_messageInfo_VendorParameters.Merge(m, src) -} -func (m *VendorParameters) XXX_Size() int { - return m.Size() -} -func (m *VendorParameters) XXX_DiscardUnknown() { - xxx_messageInfo_VendorParameters.DiscardUnknown(m) -} - -var xxx_messageInfo_VendorParameters proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha2.AllocationResult") - proto.RegisterType((*AllocationResultModel)(nil), "k8s.io.api.resource.v1alpha2.AllocationResultModel") - proto.RegisterType((*DriverAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.DriverAllocationResult") - proto.RegisterType((*DriverRequests)(nil), "k8s.io.api.resource.v1alpha2.DriverRequests") - proto.RegisterType((*NamedResourcesAllocationResult)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAllocationResult") - proto.RegisterType((*NamedResourcesAttribute)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttribute") - proto.RegisterType((*NamedResourcesAttributeValue)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesAttributeValue") - proto.RegisterType((*NamedResourcesFilter)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesFilter") - proto.RegisterType((*NamedResourcesInstance)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesInstance") - proto.RegisterType((*NamedResourcesIntSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesIntSlice") - proto.RegisterType((*NamedResourcesRequest)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesRequest") - proto.RegisterType((*NamedResourcesResources)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesResources") - proto.RegisterType((*NamedResourcesStringSlice)(nil), "k8s.io.api.resource.v1alpha2.NamedResourcesStringSlice") - proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContext") - proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextList") - proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextSpec") - proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha2.PodSchedulingContextStatus") - proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaim") - proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimConsumerReference") - proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimList") - proto.RegisterType((*ResourceClaimParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParameters") - proto.RegisterType((*ResourceClaimParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersList") - proto.RegisterType((*ResourceClaimParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimParametersReference") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSchedulingStatus") - proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimSpec") - proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimStatus") - proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplate") - proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateList") - proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha2.ResourceClaimTemplateSpec") - proto.RegisterType((*ResourceClass)(nil), "k8s.io.api.resource.v1alpha2.ResourceClass") - proto.RegisterType((*ResourceClassList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassList") - proto.RegisterType((*ResourceClassParameters)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParameters") - proto.RegisterType((*ResourceClassParametersList)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersList") - proto.RegisterType((*ResourceClassParametersReference)(nil), "k8s.io.api.resource.v1alpha2.ResourceClassParametersReference") - proto.RegisterType((*ResourceFilter)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilter") - proto.RegisterType((*ResourceFilterModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceFilterModel") - proto.RegisterType((*ResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.ResourceHandle") - proto.RegisterType((*ResourceModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceModel") - proto.RegisterType((*ResourceRequest)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequest") - proto.RegisterType((*ResourceRequestModel)(nil), "k8s.io.api.resource.v1alpha2.ResourceRequestModel") - proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha2.ResourceSlice") - proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha2.ResourceSliceList") - proto.RegisterType((*StructuredResourceHandle)(nil), "k8s.io.api.resource.v1alpha2.StructuredResourceHandle") - proto.RegisterType((*VendorParameters)(nil), "k8s.io.api.resource.v1alpha2.VendorParameters") -} - -func init() { - proto.RegisterFile("k8s.io/api/resource/v1alpha2/generated.proto", fileDescriptor_4312f5b44a31ec02) -} - -var fileDescriptor_4312f5b44a31ec02 = []byte{ - // 2242 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x6c, 0x1c, 0x57, - 0xd9, 0xb3, 0xbb, 0x89, 0xd7, 0x9f, 0xed, 0xb5, 0x33, 0xb6, 0xe3, 0x4d, 0xea, 0xee, 0x6e, 0x47, - 0x20, 0x2c, 0x70, 0x76, 0x1b, 0xa7, 0x4d, 0xa3, 0x52, 0x90, 0x32, 0x71, 0x13, 0x2c, 0x9a, 0xd4, - 0x7d, 0x4b, 0xdc, 0xa6, 0xfc, 0x75, 0xbc, 0xf3, 0x62, 0x0f, 0xd9, 0x9d, 0xd9, 0xcc, 0x7b, 0xeb, - 0x26, 0xe2, 0x12, 0x55, 0x20, 0xb8, 0x20, 0x15, 0x81, 0x10, 0x9c, 0x38, 0x21, 0xc4, 0x85, 0x0b, - 0x5c, 0x39, 0x55, 0xd0, 0x1c, 0x83, 0x40, 0xa2, 0xe2, 0xb0, 0x22, 0xcb, 0x91, 0x23, 0xb7, 0x9e, - 0xd0, 0xbc, 0xf7, 0xe6, 0xe7, 0xcd, 0xce, 0xac, 0x77, 0x96, 0xc6, 0x4a, 0x4e, 0xde, 0x79, 0xef, - 0xfb, 0x7b, 0xdf, 0xff, 0x7b, 0x9f, 0x61, 0xe3, 0xce, 0x25, 0x52, 0xb7, 0x9c, 0x86, 0xd1, 0xb5, - 0x1a, 0x2e, 0x26, 0x4e, 0xcf, 0x6d, 0xe1, 0xc6, 0xe1, 0x79, 0xa3, 0xdd, 0x3d, 0x30, 0x36, 0x1b, - 0xfb, 0xd8, 0xc6, 0xae, 0x41, 0xb1, 0x59, 0xef, 0xba, 0x0e, 0x75, 0xd4, 0x35, 0x0e, 0x5d, 0x37, - 0xba, 0x56, 0xdd, 0x87, 0xae, 0xfb, 0xd0, 0x67, 0xcf, 0xed, 0x5b, 0xf4, 0xa0, 0xb7, 0x57, 0x6f, - 0x39, 0x9d, 0xc6, 0xbe, 0xb3, 0xef, 0x34, 0x18, 0xd2, 0x5e, 0xef, 0x36, 0xfb, 0x62, 0x1f, 0xec, - 0x17, 0x27, 0x76, 0x56, 0x8b, 0xb0, 0x6e, 0x39, 0xae, 0xc7, 0x36, 0xce, 0xf0, 0xec, 0x4b, 0x21, - 0x4c, 0xc7, 0x68, 0x1d, 0x58, 0x36, 0x76, 0xef, 0x37, 0xba, 0x77, 0xf6, 0x65, 0x79, 0xb3, 0x60, - 0x91, 0x46, 0x07, 0x53, 0x23, 0x89, 0x57, 0x23, 0x0d, 0xcb, 0xed, 0xd9, 0xd4, 0xea, 0x0c, 0xb3, - 0xb9, 0x78, 0x14, 0x02, 0x69, 0x1d, 0xe0, 0x8e, 0x11, 0xc7, 0xd3, 0x7e, 0x99, 0x83, 0xc5, 0xcb, - 0xed, 0xb6, 0xd3, 0x32, 0xa8, 0xe5, 0xd8, 0x08, 0x93, 0x5e, 0x9b, 0xaa, 0x0e, 0x2c, 0xf8, 0xe7, - 0xf9, 0x9a, 0x61, 0x9b, 0x6d, 0x4c, 0xca, 0x4a, 0x2d, 0xbf, 0x3e, 0xbb, 0xb9, 0x51, 0x1f, 0xa5, - 0xf4, 0x3a, 0x92, 0x90, 0xf4, 0xd5, 0x87, 0xfd, 0xea, 0xd4, 0xa0, 0x5f, 0x5d, 0x90, 0xd7, 0x09, - 0x8a, 0x53, 0x57, 0xf7, 0x60, 0xd1, 0x38, 0x34, 0xac, 0xb6, 0xb1, 0xd7, 0xc6, 0x6f, 0xda, 0x37, - 0x1c, 0x13, 0x93, 0x72, 0xae, 0xa6, 0xac, 0xcf, 0x6e, 0xd6, 0xa2, 0x1c, 0x3d, 0xcb, 0xd4, 0x0f, - 0xcf, 0xd7, 0x3d, 0x80, 0x26, 0x6e, 0xe3, 0x16, 0x75, 0x5c, 0x7d, 0x79, 0xd0, 0xaf, 0x2e, 0x5e, - 0x8e, 0x61, 0xa3, 0x21, 0x7a, 0x6a, 0x03, 0x66, 0xc8, 0x81, 0xe1, 0x62, 0x6f, 0xad, 0x9c, 0xaf, - 0x29, 0xeb, 0x45, 0xfd, 0x94, 0x10, 0x70, 0xa6, 0xe9, 0x6f, 0xa0, 0x10, 0x46, 0xfb, 0xa9, 0x02, - 0x2b, 0x71, 0xd5, 0x5c, 0x77, 0x4c, 0xdc, 0x56, 0xef, 0x41, 0xc9, 0x36, 0x3a, 0xd8, 0xf4, 0xcf, - 0xe5, 0xa9, 0xc7, 0x13, 0xf6, 0xb5, 0xd1, 0xea, 0xb9, 0x21, 0xe1, 0xc4, 0x49, 0xeb, 0xea, 0xa0, - 0x5f, 0x2d, 0xc9, 0x30, 0x28, 0xc6, 0x47, 0xfb, 0x7d, 0x0e, 0x4e, 0x6f, 0xb9, 0xd6, 0x21, 0x76, - 0x87, 0x8c, 0xf6, 0x63, 0x05, 0x56, 0x0f, 0xb1, 0x6d, 0x3a, 0x2e, 0xc2, 0x77, 0x7b, 0x98, 0xd0, - 0x1d, 0xc3, 0x35, 0x3a, 0x98, 0x62, 0xd7, 0x17, 0xef, 0x5c, 0x44, 0xbc, 0xc0, 0x49, 0xea, 0xdd, - 0x3b, 0xfb, 0x75, 0xe1, 0x24, 0x75, 0x64, 0xbc, 0xff, 0xfa, 0x3d, 0x8a, 0x6d, 0x62, 0x39, 0xb6, - 0x5e, 0x15, 0xda, 0x59, 0xdd, 0x4d, 0xa6, 0x8a, 0xd2, 0xd8, 0x79, 0xa2, 0xac, 0x18, 0x49, 0x9a, - 0x13, 0x46, 0xbd, 0x30, 0x5a, 0x4f, 0x89, 0x4a, 0xd7, 0x9f, 0x17, 0xe2, 0x24, 0xdb, 0x04, 0x25, - 0x33, 0xd4, 0x7e, 0x91, 0x83, 0x12, 0x57, 0x98, 0x10, 0x93, 0xa8, 0x9b, 0x00, 0x26, 0x5b, 0xf1, - 0x74, 0xcd, 0x54, 0x33, 0xa3, 0xab, 0x82, 0x38, 0x6c, 0x05, 0x3b, 0x28, 0x02, 0xa5, 0x12, 0x58, - 0xe4, 0x87, 0x8d, 0x28, 0x35, 0x37, 0x89, 0x52, 0xcb, 0x82, 0xd1, 0xe2, 0x6e, 0x8c, 0x1c, 0x1a, - 0x62, 0xa0, 0x7e, 0x13, 0x8a, 0xae, 0x10, 0xba, 0x9c, 0x67, 0xf1, 0x77, 0x6e, 0xbc, 0xf8, 0x13, - 0x47, 0xd5, 0x17, 0x05, 0xb3, 0xa2, 0x7f, 0x76, 0x14, 0x10, 0xd4, 0x74, 0xa8, 0x8c, 0xf6, 0x47, - 0xb5, 0x06, 0x05, 0x3b, 0xd4, 0xd0, 0x9c, 0xa0, 0x55, 0x60, 0xba, 0x61, 0x3b, 0xda, 0x5f, 0x14, - 0x58, 0x8d, 0x11, 0xa1, 0xd4, 0xb5, 0xf6, 0x7a, 0x14, 0x1f, 0x8d, 0xed, 0x79, 0x49, 0xc9, 0xf0, - 0xe1, 0x77, 0x8d, 0x76, 0x0f, 0x0b, 0x95, 0xbe, 0x9a, 0x29, 0x8c, 0x24, 0x0a, 0xfa, 0xe7, 0x04, - 0xa3, 0xb5, 0x51, 0x50, 0x28, 0xc6, 0x57, 0xfb, 0x4f, 0x1e, 0x46, 0x22, 0xa8, 0xdf, 0x86, 0xe2, - 0xdd, 0x9e, 0x61, 0x53, 0x8b, 0xde, 0x2f, 0x9f, 0x64, 0x42, 0xd6, 0x53, 0xed, 0x2e, 0x49, 0xfd, - 0x96, 0xc0, 0xd2, 0x4f, 0x0d, 0xfa, 0xd5, 0x79, 0xff, 0x8b, 0x4b, 0x11, 0x90, 0x54, 0x5f, 0x80, - 0xc2, 0x9e, 0xe3, 0xf0, 0xf0, 0x28, 0xea, 0xf3, 0x5e, 0x4a, 0xd2, 0x1d, 0xa7, 0xcd, 0xc1, 0xd8, - 0x96, 0x5a, 0x81, 0xbc, 0x65, 0xd3, 0xf2, 0x74, 0x4d, 0x59, 0xcf, 0xeb, 0x73, 0x9e, 0x51, 0xb7, - 0x6d, 0xca, 0x01, 0xbc, 0x0d, 0xb5, 0x05, 0x45, 0xcb, 0xa6, 0xcd, 0xb6, 0xd5, 0xc2, 0xe5, 0x22, - 0x93, 0xf0, 0xa5, 0x2c, 0x6a, 0xdc, 0x16, 0xb8, 0x5c, 0x4e, 0xff, 0x4b, 0xc8, 0xe9, 0x13, 0x56, - 0xbf, 0x00, 0x27, 0x09, 0x75, 0x2d, 0x7b, 0xbf, 0x7c, 0x82, 0x99, 0x75, 0x61, 0xd0, 0xaf, 0xce, - 0x36, 0xd9, 0x0a, 0x07, 0x15, 0xdb, 0xaa, 0x03, 0xb3, 0xfc, 0x17, 0x17, 0x68, 0x86, 0x09, 0xf4, - 0x4a, 0x16, 0x81, 0x9a, 0x21, 0x3a, 0x4f, 0xf1, 0x91, 0x05, 0xce, 0x2b, 0xca, 0x41, 0xfd, 0x22, - 0x4c, 0x1f, 0x62, 0xd7, 0x0b, 0xb1, 0x32, 0x30, 0xd1, 0x16, 0x07, 0xfd, 0xea, 0xdc, 0x2e, 0x5f, - 0xe2, 0xf0, 0x3e, 0x80, 0xb6, 0x05, 0xcb, 0x32, 0xaf, 0xab, 0x56, 0x9b, 0x62, 0x57, 0xdd, 0x80, - 0x22, 0x11, 0x55, 0x45, 0xb8, 0x6d, 0x10, 0x40, 0x7e, 0xb5, 0x41, 0x01, 0x84, 0xf6, 0x1b, 0x05, - 0x4e, 0xc7, 0x75, 0x48, 0xa8, 0x61, 0xb7, 0xc6, 0xf1, 0x7d, 0x0b, 0x20, 0x70, 0x41, 0x2f, 0x93, - 0x78, 0xc1, 0xfd, 0xf2, 0x44, 0x6e, 0x1f, 0xa6, 0xae, 0x60, 0x89, 0xa0, 0x08, 0x71, 0xed, 0xe2, - 0xb0, 0x98, 0xc2, 0x9a, 0x6b, 0x50, 0xb0, 0x6c, 0xca, 0x6b, 0x7b, 0x5e, 0x2f, 0x7a, 0x22, 0x6e, - 0xdb, 0x94, 0x20, 0xb6, 0xaa, 0xbd, 0x0e, 0x2b, 0xb1, 0x62, 0xc4, 0x53, 0x47, 0x46, 0x35, 0x3d, - 0x18, 0xca, 0x11, 0xc1, 0x0f, 0x15, 0xc3, 0x8c, 0x25, 0x74, 0xe6, 0x77, 0x18, 0x19, 0x9d, 0x96, - 0x23, 0x87, 0x85, 0xdc, 0x5f, 0x21, 0x28, 0xa4, 0xac, 0xe9, 0x70, 0x26, 0xd5, 0xb7, 0xd4, 0xcf, - 0xc3, 0x34, 0xf7, 0x23, 0x2e, 0xc1, 0x8c, 0x3e, 0x3b, 0xe8, 0x57, 0xa7, 0x39, 0x04, 0x41, 0xfe, - 0x9e, 0xf6, 0xc7, 0x1c, 0x2c, 0xef, 0x38, 0x66, 0xb3, 0x75, 0x80, 0xcd, 0x5e, 0xdb, 0xb2, 0xf7, - 0xaf, 0x38, 0x36, 0xc5, 0xf7, 0xa8, 0xfa, 0x1e, 0x14, 0xbd, 0x26, 0xce, 0x34, 0xa8, 0x21, 0xca, - 0xec, 0x8b, 0xa3, 0x32, 0x03, 0xa9, 0x7b, 0xd0, 0x5e, 0x13, 0xf3, 0xe6, 0xde, 0xf7, 0x70, 0x8b, - 0x5e, 0xc7, 0xd4, 0x08, 0x4d, 0x18, 0xae, 0xa1, 0x80, 0xaa, 0xfa, 0x0e, 0x14, 0x48, 0x17, 0xb7, - 0x44, 0x72, 0xbc, 0x38, 0x5a, 0x41, 0x49, 0x32, 0x36, 0xbb, 0xb8, 0x15, 0x7a, 0xa1, 0xf7, 0x85, - 0x18, 0x45, 0xf5, 0x3d, 0x2f, 0x9c, 0x0d, 0xda, 0x23, 0xac, 0x1f, 0x9a, 0xdd, 0xbc, 0x34, 0x01, - 0x6d, 0x86, 0xaf, 0x97, 0x04, 0xf5, 0x93, 0xfc, 0x1b, 0x09, 0xba, 0xda, 0x5f, 0x15, 0x28, 0x27, - 0xa1, 0xbd, 0x61, 0x11, 0xaa, 0x7e, 0x6b, 0x48, 0x75, 0xf5, 0xf1, 0x54, 0xe7, 0x61, 0x33, 0xc5, - 0x05, 0x8e, 0xe7, 0xaf, 0x44, 0xd4, 0xf6, 0x36, 0x9c, 0xb0, 0x28, 0xee, 0xf8, 0xd1, 0xb5, 0x99, - 0xfd, 0x6c, 0xfa, 0xbc, 0x20, 0x7f, 0x62, 0xdb, 0x23, 0x84, 0x38, 0x3d, 0xed, 0xc3, 0x94, 0x33, - 0x79, 0x8a, 0x55, 0x2f, 0xc1, 0x1c, 0x77, 0x7d, 0x6c, 0x7a, 0x6d, 0xa7, 0x08, 0x90, 0x65, 0x41, - 0x68, 0xae, 0x19, 0xd9, 0x43, 0x12, 0xa4, 0xfa, 0x2a, 0x94, 0xba, 0x0e, 0xc5, 0x36, 0xb5, 0x8c, - 0xb6, 0xdf, 0x01, 0x7b, 0xfe, 0xc8, 0xda, 0xc2, 0x1d, 0x69, 0x07, 0xc5, 0x20, 0xb5, 0x5f, 0x29, - 0x70, 0x36, 0xdd, 0x3a, 0xea, 0xf7, 0xa1, 0xe4, 0x9f, 0xf8, 0x4a, 0xdb, 0xb0, 0x3a, 0x7e, 0xb0, - 0x7d, 0x79, 0xbc, 0x76, 0x82, 0xe1, 0x84, 0xb4, 0x85, 0xc9, 0x4f, 0x8b, 0x33, 0x95, 0x24, 0x30, - 0x82, 0x62, 0xac, 0xb4, 0x5f, 0xe7, 0x60, 0x5e, 0x02, 0x39, 0x86, 0x90, 0x79, 0x4b, 0x0a, 0x99, - 0x46, 0x96, 0x63, 0xa6, 0xc5, 0xca, 0xad, 0x58, 0xac, 0x9c, 0xcf, 0x42, 0x74, 0x74, 0x90, 0x0c, - 0x14, 0xa8, 0x48, 0xf0, 0x57, 0x1c, 0x9b, 0xf4, 0x3a, 0x5e, 0xcb, 0x7a, 0x1b, 0xbb, 0xd8, 0xab, - 0x28, 0x1b, 0x50, 0x34, 0xba, 0xd6, 0x35, 0xd7, 0xe9, 0x75, 0xe3, 0x39, 0xf7, 0xf2, 0xce, 0x36, - 0x5b, 0x47, 0x01, 0x84, 0x07, 0xed, 0x4b, 0xc4, 0xa4, 0x9d, 0x89, 0x76, 0x82, 0xa2, 0x45, 0x0c, - 0x20, 0x82, 0x6a, 0x55, 0x48, 0xad, 0x56, 0x3a, 0xe4, 0x7b, 0x96, 0x29, 0x6a, 0xfe, 0x8b, 0x02, - 0x20, 0x7f, 0x73, 0x7b, 0xeb, 0xd3, 0x7e, 0xf5, 0x85, 0xb4, 0x8b, 0x27, 0xbd, 0xdf, 0xc5, 0xa4, - 0x7e, 0x73, 0x7b, 0x0b, 0x79, 0xc8, 0xda, 0x47, 0x0a, 0x9c, 0x92, 0x0e, 0x79, 0x0c, 0x29, 0x60, - 0x47, 0x4e, 0x01, 0x5f, 0xca, 0x60, 0xb2, 0x94, 0xd8, 0xff, 0x59, 0x1e, 0x56, 0x25, 0xb8, 0x48, - 0xbb, 0xfe, 0xe4, 0xdd, 0xfa, 0x7d, 0x98, 0x0f, 0xee, 0xef, 0x57, 0x5d, 0xa7, 0x23, 0xfc, 0xfb, - 0xab, 0x19, 0xce, 0x15, 0xb9, 0x70, 0xf8, 0xce, 0xc5, 0x5b, 0xbe, 0x6b, 0x51, 0xc2, 0x48, 0xe6, - 0x93, 0xf9, 0xee, 0xac, 0xb6, 0xa1, 0x64, 0x4a, 0xb7, 0xae, 0x72, 0x61, 0x9c, 0x07, 0x04, 0xf9, - 0xa6, 0x16, 0xa6, 0x18, 0x79, 0x1d, 0xc5, 0x68, 0x6b, 0xff, 0x50, 0xe0, 0xb9, 0x94, 0x53, 0x1e, - 0x83, 0x97, 0xbd, 0x2b, 0x7b, 0xd9, 0xcb, 0x13, 0x59, 0x23, 0xc5, 0xdf, 0x7e, 0xae, 0x40, 0xed, - 0x28, 0xfb, 0x65, 0x4c, 0x0e, 0x35, 0x28, 0xdc, 0xb1, 0x6c, 0x93, 0xf9, 0x4e, 0x24, 0xdc, 0xbf, - 0x6e, 0xd9, 0x26, 0x62, 0x3b, 0x41, 0x42, 0xc8, 0xa7, 0x5e, 0xfc, 0x1e, 0x28, 0xf0, 0xfc, 0xc8, - 0xea, 0x30, 0x46, 0x0b, 0xfc, 0x15, 0x58, 0xe8, 0xd9, 0xa4, 0x67, 0x51, 0xcf, 0x61, 0xa2, 0x05, - 0x6f, 0x69, 0xd0, 0xaf, 0x2e, 0xdc, 0x94, 0xb7, 0x50, 0x1c, 0x56, 0xfb, 0x6d, 0x2e, 0x96, 0x4f, - 0x58, 0xf9, 0xbd, 0x06, 0xa7, 0x22, 0xe5, 0x87, 0x90, 0xc8, 0x15, 0xff, 0x8c, 0x90, 0x21, 0x8a, - 0xc5, 0x01, 0xd0, 0x30, 0x8e, 0x17, 0x6a, 0xdd, 0xa8, 0xaa, 0x3f, 0xcb, 0x50, 0x93, 0x36, 0x90, - 0xcc, 0x47, 0xdd, 0x81, 0x52, 0xf8, 0x92, 0x71, 0xdd, 0x6b, 0x21, 0xb8, 0x19, 0xd6, 0xfd, 0x58, - 0xb8, 0x2c, 0xed, 0x7e, 0x3a, 0xb4, 0x82, 0x62, 0xf8, 0xda, 0x7f, 0x73, 0xb0, 0x94, 0x50, 0x8e, - 0x26, 0x7a, 0x07, 0xf9, 0x0e, 0x40, 0x48, 0x5d, 0xe8, 0xa4, 0x9e, 0xed, 0x35, 0x47, 0x2f, 0xb1, - 0xcb, 0x4a, 0xb8, 0x1a, 0xa1, 0xa8, 0x12, 0x98, 0x75, 0x31, 0xc1, 0xee, 0x21, 0x36, 0xaf, 0x3a, - 0xae, 0x78, 0xf5, 0x78, 0x2d, 0x83, 0xd2, 0x87, 0x4a, 0xa7, 0xbe, 0x24, 0x8e, 0x34, 0x8b, 0x42, - 0xc2, 0x28, 0xca, 0x45, 0x6d, 0xc2, 0x8a, 0x89, 0xa3, 0xcf, 0x47, 0x2c, 0xad, 0x60, 0x93, 0x55, - 0xc4, 0x62, 0xf8, 0xf0, 0xb4, 0x95, 0x04, 0x84, 0x92, 0x71, 0xb5, 0xbf, 0x2b, 0xb0, 0x22, 0x49, - 0xf6, 0x0d, 0xdc, 0xe9, 0xb6, 0x0d, 0x8a, 0x8f, 0xa1, 0x4e, 0xdc, 0x92, 0xda, 0x9f, 0x57, 0x32, - 0xa8, 0xcf, 0x17, 0x32, 0xad, 0x0d, 0xd2, 0xfe, 0xa6, 0xc0, 0x99, 0x44, 0x8c, 0x63, 0x48, 0xb4, - 0xef, 0xc8, 0x89, 0xf6, 0xc2, 0x04, 0xe7, 0x4a, 0x49, 0xb3, 0x8f, 0xd2, 0x4e, 0xd5, 0xe4, 0xd7, - 0xa4, 0x67, 0xaf, 0x5f, 0xd5, 0x3e, 0xce, 0x4b, 0x6d, 0x37, 0x39, 0x8e, 0xfe, 0x44, 0xce, 0x28, - 0xb9, 0xb1, 0x32, 0xca, 0x50, 0xa2, 0xcd, 0x67, 0x4c, 0xb4, 0x84, 0x4c, 0x96, 0x68, 0x6f, 0xc1, - 0xbc, 0x5c, 0x7d, 0x0a, 0x63, 0x0e, 0x1c, 0x18, 0xe9, 0xa6, 0x54, 0x9d, 0x64, 0x4a, 0xea, 0x1b, - 0xb0, 0x4c, 0xa8, 0xdb, 0x6b, 0xd1, 0x9e, 0x8b, 0xcd, 0xc8, 0x8b, 0xf1, 0x09, 0x96, 0x4f, 0xca, - 0x83, 0x7e, 0x75, 0xb9, 0x99, 0xb0, 0x8f, 0x12, 0xb1, 0xe2, 0x9d, 0x33, 0x21, 0x4f, 0x73, 0xe7, - 0x4c, 0xd2, 0x3a, 0x99, 0x8f, 0xe4, 0xce, 0x39, 0x6a, 0xb5, 0x67, 0xa1, 0x73, 0x1e, 0xe1, 0x65, - 0x23, 0x3b, 0x67, 0x9a, 0x30, 0x38, 0xe0, 0x55, 0xed, 0x88, 0xb2, 0x19, 0x9f, 0x0f, 0x64, 0x9a, - 0x1c, 0xbc, 0x0d, 0xd3, 0xb7, 0xd9, 0x9b, 0xe6, 0x98, 0x7d, 0xb7, 0x7f, 0x50, 0xfe, 0x10, 0xaa, - 0x2f, 0x08, 0x56, 0xd3, 0xfc, 0x9b, 0x20, 0x9f, 0x5a, 0xbc, 0xd3, 0x8e, 0x6a, 0xe5, 0x69, 0xee, - 0xb4, 0xa3, 0x72, 0xa6, 0xf8, 0xe7, 0x9f, 0xe5, 0x4e, 0x3b, 0xd1, 0xde, 0xc7, 0xdf, 0x69, 0x7b, - 0x37, 0x2f, 0xef, 0x2f, 0xe9, 0x1a, 0x2d, 0xff, 0x86, 0x1e, 0xdc, 0xbc, 0x6e, 0xf8, 0x1b, 0x28, - 0x84, 0xd1, 0x3e, 0x56, 0xa0, 0x24, 0x9b, 0x73, 0xa2, 0x46, 0xef, 0x81, 0x02, 0x4b, 0xae, 0x44, - 0x26, 0x3a, 0xc0, 0x3b, 0x9f, 0xc5, 0x9d, 0xf8, 0xf8, 0xee, 0x39, 0xc1, 0x70, 0x29, 0x61, 0x13, - 0x25, 0xb1, 0xd2, 0x7e, 0xa8, 0x40, 0x12, 0xb0, 0x6a, 0xa7, 0x4c, 0x5f, 0x37, 0xb3, 0x3c, 0x1d, - 0x0b, 0x4f, 0x1f, 0x67, 0xe6, 0xfa, 0xcf, 0x88, 0x46, 0xf9, 0xc0, 0x7a, 0x22, 0x8d, 0xd6, 0xa0, - 0xc0, 0xc2, 0x22, 0xe6, 0x0d, 0x5b, 0x06, 0x35, 0x10, 0xdb, 0x51, 0x5d, 0x28, 0x85, 0x05, 0xc0, - 0x5b, 0x67, 0x05, 0xe3, 0xc8, 0x27, 0xdf, 0xb0, 0x94, 0xc4, 0xe6, 0xef, 0xec, 0x70, 0x4d, 0x89, - 0x22, 0x8a, 0x71, 0xd0, 0x3e, 0x50, 0xc2, 0x36, 0x81, 0xab, 0xf7, 0x6e, 0x8a, 0x7a, 0x33, 0x8d, - 0x27, 0x82, 0x1f, 0x63, 0x69, 0xf8, 0x27, 0x39, 0x58, 0x88, 0xcd, 0x2e, 0x13, 0x27, 0xae, 0xca, - 0x93, 0x9e, 0xb8, 0xfe, 0x40, 0x81, 0x65, 0x57, 0x16, 0x24, 0xea, 0xf6, 0x9b, 0x99, 0xc6, 0xaf, - 0xdc, 0xef, 0xd7, 0x04, 0xfb, 0xe5, 0xa4, 0x5d, 0x94, 0xc8, 0x4d, 0xfb, 0x91, 0x02, 0x89, 0xe0, - 0xaa, 0x93, 0x62, 0x9b, 0x0b, 0xd9, 0x6c, 0xc3, 0xa7, 0xc3, 0xe3, 0x58, 0xe6, 0x4f, 0x91, 0xc7, - 0x5b, 0x3e, 0x2f, 0x79, 0xf2, 0xb5, 0x7a, 0x03, 0x8a, 0xb6, 0x63, 0xe2, 0x48, 0x0f, 0x19, 0x24, - 0xd9, 0x1b, 0x62, 0x1d, 0x05, 0x10, 0xb1, 0x50, 0xcc, 0x8f, 0x15, 0x8a, 0x07, 0x30, 0xef, 0x46, - 0x7d, 0x5e, 0xb4, 0x7e, 0x63, 0x76, 0x39, 0xdc, 0xae, 0x2b, 0x82, 0x87, 0x1c, 0x3d, 0x48, 0x26, - 0x2c, 0xf5, 0x6e, 0x4c, 0x7f, 0x4f, 0x6d, 0xef, 0xc6, 0x27, 0xad, 0xc9, 0xb5, 0xf1, 0x0f, 0x79, - 0x28, 0xa7, 0x65, 0x19, 0xf5, 0x03, 0x05, 0x56, 0x78, 0x20, 0xc5, 0xca, 0xe6, 0x64, 0xe1, 0x1a, - 0xdc, 0xb6, 0x77, 0x93, 0x68, 0xa2, 0x64, 0x56, 0xb2, 0x10, 0xd1, 0xa7, 0x97, 0xc9, 0xfe, 0x4b, - 0x63, 0x58, 0x08, 0xe9, 0x39, 0x27, 0x99, 0x95, 0xe4, 0xb8, 0x85, 0x23, 0x1d, 0xf7, 0xbb, 0x30, - 0xed, 0xb2, 0x07, 0x11, 0xef, 0x5e, 0x30, 0xc6, 0xe8, 0x33, 0xf9, 0xdf, 0x7e, 0xc2, 0x5e, 0x8d, - 0x7f, 0x13, 0xe4, 0x53, 0xd5, 0x7e, 0xa7, 0xc0, 0x50, 0xce, 0x9b, 0xa8, 0x72, 0x19, 0x00, 0xdd, - 0xff, 0x53, 0xa1, 0x01, 0x8b, 0x88, 0x16, 0x23, 0x44, 0x75, 0xfd, 0xe1, 0xe3, 0xca, 0xd4, 0xa3, - 0xc7, 0x95, 0xa9, 0x4f, 0x1e, 0x57, 0xa6, 0x1e, 0x0c, 0x2a, 0xca, 0xc3, 0x41, 0x45, 0x79, 0x34, - 0xa8, 0x28, 0x9f, 0x0c, 0x2a, 0xca, 0xbf, 0x06, 0x15, 0xe5, 0xc3, 0x7f, 0x57, 0xa6, 0xde, 0x5d, - 0x1b, 0xf5, 0x0f, 0x82, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x94, 0xb7, 0xe5, 0x3f, 0x28, - 0x00, 0x00, -} - -func (m *AllocationResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.Shareable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.AvailableOnNodes != nil { - { - size, err := m.AvailableOnNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ResourceHandles) > 0 { - for iNdEx := len(m.ResourceHandles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceHandles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AllocationResultModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllocationResultModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllocationResultModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DriverAllocationResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DriverAllocationResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DriverAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.AllocationResultModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.VendorRequestParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DriverRequests) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DriverRequests) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DriverRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Requests) > 0 { - for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesAllocationResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesAllocationResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesAttribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesAttribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.NamedResourcesAttributeValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesAttributeValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesAttributeValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesAttributeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.VersionValue != nil { - i -= len(*m.VersionValue) - copy(dAtA[i:], *m.VersionValue) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) - i-- - dAtA[i] = 0x52 - } - if m.StringSliceValue != nil { - { - size, err := m.StringSliceValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.IntSliceValue != nil { - { - size, err := m.IntSliceValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.IntValue != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) - i-- - dAtA[i] = 0x38 - } - if m.QuantityValue != nil { - { - size, err := m.QuantityValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.StringValue != nil { - i -= len(*m.StringValue) - copy(dAtA[i:], *m.StringValue) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) - i-- - dAtA[i] = 0x2a - } - if m.BoolValue != nil { - i-- - if *m.BoolValue { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - return len(dAtA) - i, nil -} - -func (m *NamedResourcesFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesInstance) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesInstance) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesIntSlice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesIntSlice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesIntSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ints) > 0 { - for iNdEx := len(m.Ints) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintGenerated(dAtA, i, uint64(m.Ints[iNdEx])) - i-- - dAtA[i] = 0x8 - } - } - return len(dAtA) - i, nil -} - -func (m *NamedResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NamedResourcesResources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesResources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Instances) > 0 { - for iNdEx := len(m.Instances) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Instances[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NamedResourcesStringSlice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedResourcesStringSlice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedResourcesStringSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Strings) > 0 { - for iNdEx := len(m.Strings) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Strings[iNdEx]) - copy(dAtA[i:], m.Strings[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strings[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PotentialNodes) > 0 { - for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PotentialNodes[iNdEx]) - copy(dAtA[i:], m.PotentialNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.SelectedNode) - copy(dAtA[i:], m.SelectedNode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimParameters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimParameters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DriverRequests) > 0 { - for iNdEx := len(m.DriverRequests) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DriverRequests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - i-- - if m.Shareable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if m.GeneratedFrom != nil { - { - size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimParametersList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimParametersList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimParametersReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimParametersReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UnsuitableNodes) > 0 { - for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UnsuitableNodes[iNdEx]) - copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.AllocationMode) - copy(dAtA[i:], m.AllocationMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) - i-- - dAtA[i] = 0x1a - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.ResourceClassName) - copy(dAtA[i:], m.ResourceClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceClassName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.DeallocationRequested { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if len(m.ReservedFor) > 0 { - for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Allocation != nil { - { - size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StructuredParameters != nil { - i-- - if *m.StructuredParameters { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.SuitableNodes != nil { - { - size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ParametersRef != nil { - { - size, err := m.ParametersRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassParameters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassParameters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Filters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.VendorParameters) > 0 { - for iNdEx := len(m.VendorParameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VendorParameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.GeneratedFrom != nil { - { - size, err := m.GeneratedFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassParametersList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassParametersList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassParametersList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceClassParametersReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClassParametersReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ResourceFilterModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceFilterModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceFilterModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceFilterModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResourceHandle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceHandle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StructuredData != nil { - { - size, err := m.StructuredData.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ResourceRequestModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.VendorParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceRequestModel) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceRequestModel) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceRequestModel) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NamedResources != nil { - { - size, err := m.NamedResources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ResourceModel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0x1a - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StructuredResourceHandle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StructuredResourceHandle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StructuredResourceHandle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x22 - { - size, err := m.VendorClaimParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.VendorClassParameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VendorParameters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VendorParameters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VendorParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceHandles) > 0 { - for _, e := range m.ResourceHandles { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AvailableOnNodes != nil { - l = m.AvailableOnNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *AllocationResultModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *DriverAllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.VendorRequestParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.AllocationResultModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DriverRequests) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VendorParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Requests) > 0 { - for _, e := range m.Requests { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesAllocationResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamedResourcesAttribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.NamedResourcesAttributeValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamedResourcesAttributeValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BoolValue != nil { - n += 2 - } - if m.StringValue != nil { - l = len(*m.StringValue) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.QuantityValue != nil { - l = m.QuantityValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IntValue != nil { - n += 1 + sovGenerated(uint64(*m.IntValue)) - } - if m.IntSliceValue != nil { - l = m.IntSliceValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StringSliceValue != nil { - l = m.StringSliceValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VersionValue != nil { - l = len(*m.VersionValue) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NamedResourcesFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamedResourcesInstance) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesIntSlice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ints) > 0 { - for _, e := range m.Ints { - n += 1 + sovGenerated(uint64(e)) - } - } - return n -} - -func (m *NamedResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamedResourcesResources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Instances) > 0 { - for _, e := range m.Instances { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedResourcesStringSlice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Strings) > 0 { - for _, s := range m.Strings { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSchedulingContextList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SelectedNode) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.PotentialNodes) > 0 { - for _, s := range m.PotentialNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for _, e := range m.ResourceClaims { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaim) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimConsumerReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.GeneratedFrom != nil { - l = m.GeneratedFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.DriverRequests) > 0 { - for _, e := range m.DriverRequests { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParametersList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimSchedulingStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UnsuitableNodes) > 0 { - for _, s := range m.UnsuitableNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResourceClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.AllocationMode) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Allocation != nil { - l = m.Allocation.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ReservedFor) > 0 { - for _, e := range m.ReservedFor { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - return n -} - -func (m *ResourceClaimTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClaimTemplateList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClaimTemplateSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - if m.ParametersRef != nil { - l = m.ParametersRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SuitableNodes != nil { - l = m.SuitableNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StructuredParameters != nil { - n += 2 - } - return n -} - -func (m *ResourceClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.GeneratedFrom != nil { - l = m.GeneratedFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.VendorParameters) > 0 { - for _, e := range m.VendorParameters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Filters) > 0 { - for _, e := range m.Filters { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParametersList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceClassParametersReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceFilterModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceFilterModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceHandle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - if m.StructuredData != nil { - l = m.StructuredData.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.VendorParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceRequestModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceRequestModel) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NamedResources != nil { - l = m.NamedResources.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceSlice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ResourceModel.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceSliceList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StructuredResourceHandle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.VendorClassParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.VendorClaimParameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VendorParameters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllocationResult) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceHandles := "[]ResourceHandle{" - for _, f := range this.ResourceHandles { - repeatedStringForResourceHandles += strings.Replace(strings.Replace(f.String(), "ResourceHandle", "ResourceHandle", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceHandles += "}" - s := strings.Join([]string{`&AllocationResult{`, - `ResourceHandles:` + repeatedStringForResourceHandles + `,`, - `AvailableOnNodes:` + strings.Replace(fmt.Sprintf("%v", this.AvailableOnNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, - `}`, - }, "") - return s -} -func (this *AllocationResultModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllocationResultModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesAllocationResult", "NamedResourcesAllocationResult", 1) + `,`, - `}`, - }, "") - return s -} -func (this *DriverAllocationResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DriverAllocationResult{`, - `VendorRequestParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorRequestParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `AllocationResultModel:` + strings.Replace(strings.Replace(this.AllocationResultModel.String(), "AllocationResultModel", "AllocationResultModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DriverRequests) String() string { - if this == nil { - return "nil" - } - repeatedStringForRequests := "[]ResourceRequest{" - for _, f := range this.Requests { - repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "ResourceRequest", "ResourceRequest", 1), `&`, ``, 1) + "," - } - repeatedStringForRequests += "}" - s := strings.Join([]string{`&DriverRequests{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Requests:` + repeatedStringForRequests + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesAllocationResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesAllocationResult{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesAttribute) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesAttribute{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NamedResourcesAttributeValue:` + strings.Replace(strings.Replace(this.NamedResourcesAttributeValue.String(), "NamedResourcesAttributeValue", "NamedResourcesAttributeValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesAttributeValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesAttributeValue{`, - `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, - `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, - `QuantityValue:` + strings.Replace(fmt.Sprintf("%v", this.QuantityValue), "Quantity", "resource.Quantity", 1) + `,`, - `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, - `IntSliceValue:` + strings.Replace(this.IntSliceValue.String(), "NamedResourcesIntSlice", "NamedResourcesIntSlice", 1) + `,`, - `StringSliceValue:` + strings.Replace(this.StringSliceValue.String(), "NamedResourcesStringSlice", "NamedResourcesStringSlice", 1) + `,`, - `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesFilter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesFilter{`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesInstance) String() string { - if this == nil { - return "nil" - } - repeatedStringForAttributes := "[]NamedResourcesAttribute{" - for _, f := range this.Attributes { - repeatedStringForAttributes += strings.Replace(strings.Replace(f.String(), "NamedResourcesAttribute", "NamedResourcesAttribute", 1), `&`, ``, 1) + "," - } - repeatedStringForAttributes += "}" - s := strings.Join([]string{`&NamedResourcesInstance{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Attributes:` + repeatedStringForAttributes + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesIntSlice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesIntSlice{`, - `Ints:` + fmt.Sprintf("%v", this.Ints) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesRequest{`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesResources) String() string { - if this == nil { - return "nil" - } - repeatedStringForInstances := "[]NamedResourcesInstance{" - for _, f := range this.Instances { - repeatedStringForInstances += strings.Replace(strings.Replace(f.String(), "NamedResourcesInstance", "NamedResourcesInstance", 1), `&`, ``, 1) + "," - } - repeatedStringForInstances += "}" - s := strings.Join([]string{`&NamedResourcesResources{`, - `Instances:` + repeatedStringForInstances + `,`, - `}`, - }, "") - return s -} -func (this *NamedResourcesStringSlice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedResourcesStringSlice{`, - `Strings:` + fmt.Sprintf("%v", this.Strings) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContext{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSchedulingContext{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingContextList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContextSpec{`, - `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, - `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" - for _, f := range this.ResourceClaims { - repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingContextStatus{`, - `ResourceClaims:` + repeatedStringForResourceClaims + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimConsumerReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimConsumerReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaim{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParameters) String() string { - if this == nil { - return "nil" - } - repeatedStringForDriverRequests := "[]DriverRequests{" - for _, f := range this.DriverRequests { - repeatedStringForDriverRequests += strings.Replace(strings.Replace(f.String(), "DriverRequests", "DriverRequests", 1), `&`, ``, 1) + "," - } - repeatedStringForDriverRequests += "}" - s := strings.Join([]string{`&ResourceClaimParameters{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, - `Shareable:` + fmt.Sprintf("%v", this.Shareable) + `,`, - `DriverRequests:` + repeatedStringForDriverRequests + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParametersList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaimParameters{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimParameters", "ResourceClaimParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimParametersList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimSchedulingStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSpec{`, - `ResourceClassName:` + fmt.Sprintf("%v", this.ResourceClassName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClaimParametersReference", "ResourceClaimParametersReference", 1) + `,`, - `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" - for _, f := range this.ReservedFor { - repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," - } - repeatedStringForReservedFor += "}" - s := strings.Join([]string{`&ResourceClaimStatus{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, - `ReservedFor:` + repeatedStringForReservedFor + `,`, - `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplateList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClaimTemplate{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClaimTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClaimTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ParametersRef:` + strings.Replace(this.ParametersRef.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, - `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `StructuredParameters:` + valueToStringGenerated(this.StructuredParameters) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClass", "ResourceClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParameters) String() string { - if this == nil { - return "nil" - } - repeatedStringForVendorParameters := "[]VendorParameters{" - for _, f := range this.VendorParameters { - repeatedStringForVendorParameters += strings.Replace(strings.Replace(f.String(), "VendorParameters", "VendorParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForVendorParameters += "}" - repeatedStringForFilters := "[]ResourceFilter{" - for _, f := range this.Filters { - repeatedStringForFilters += strings.Replace(strings.Replace(f.String(), "ResourceFilter", "ResourceFilter", 1), `&`, ``, 1) + "," - } - repeatedStringForFilters += "}" - s := strings.Join([]string{`&ResourceClassParameters{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `GeneratedFrom:` + strings.Replace(this.GeneratedFrom.String(), "ResourceClassParametersReference", "ResourceClassParametersReference", 1) + `,`, - `VendorParameters:` + repeatedStringForVendorParameters + `,`, - `Filters:` + repeatedStringForFilters + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParametersList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceClassParameters{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClassParameters", "ResourceClassParameters", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceClassParametersList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceClassParametersReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClassParametersReference{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFilter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFilter{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ResourceFilterModel:` + strings.Replace(strings.Replace(this.ResourceFilterModel.String(), "ResourceFilterModel", "ResourceFilterModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFilterModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFilterModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesFilter", "NamedResourcesFilter", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceHandle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceHandle{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `StructuredData:` + strings.Replace(this.StructuredData.String(), "StructuredResourceHandle", "StructuredResourceHandle", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesResources", "NamedResourcesResources", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRequest{`, - `VendorParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `ResourceRequestModel:` + strings.Replace(strings.Replace(this.ResourceRequestModel.String(), "ResourceRequestModel", "ResourceRequestModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequestModel) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRequestModel{`, - `NamedResources:` + strings.Replace(this.NamedResources.String(), "NamedResourcesRequest", "NamedResourcesRequest", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceSlice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceSlice{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `ResourceModel:` + strings.Replace(strings.Replace(this.ResourceModel.String(), "ResourceModel", "ResourceModel", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceSliceList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceSlice{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceSliceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *StructuredResourceHandle) String() string { - if this == nil { - return "nil" - } - repeatedStringForResults := "[]DriverAllocationResult{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DriverAllocationResult", "DriverAllocationResult", 1), `&`, ``, 1) + "," - } - repeatedStringForResults += "}" - s := strings.Join([]string{`&StructuredResourceHandle{`, - `VendorClassParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClassParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `VendorClaimParameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VendorClaimParameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s -} -func (this *VendorParameters) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VendorParameters{`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceHandles = append(m.ResourceHandles, ResourceHandle{}) - if err := m.ResourceHandles[len(m.ResourceHandles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableOnNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AvailableOnNodes == nil { - m.AvailableOnNodes = &v1.NodeSelector{} - } - if err := m.AvailableOnNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shareable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllocationResultModel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllocationResultModel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllocationResultModel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesAllocationResult{} - } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DriverAllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DriverAllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DriverAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorRequestParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorRequestParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationResultModel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AllocationResultModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DriverRequests) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DriverRequests: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DriverRequests: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requests = append(m.Requests, ResourceRequest{}) - if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAllocationResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAllocationResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAttribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAttribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAttribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResourcesAttributeValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.NamedResourcesAttributeValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesAttributeValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesAttributeValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesAttributeValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BoolValue = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StringValue = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuantityValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.QuantityValue == nil { - m.QuantityValue = &resource.Quantity{} - } - if err := m.QuantityValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IntValue = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IntSliceValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IntSliceValue == nil { - m.IntSliceValue = &NamedResourcesIntSlice{} - } - if err := m.IntSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringSliceValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StringSliceValue == nil { - m.StringSliceValue = &NamedResourcesStringSlice{} - } - if err := m.StringSliceValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.VersionValue = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesInstance) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesInstance: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesInstance: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, NamedResourcesAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesIntSlice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesIntSlice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesIntSlice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ints = append(m.Ints, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Ints) == 0 { - m.Ints = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Ints = append(m.Ints, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Ints", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesResources) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesResources: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesResources: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Instances", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Instances = append(m.Instances, NamedResourcesInstance{}) - if err := m.Instances[len(m.Instances)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedResourcesStringSlice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedResourcesStringSlice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedResourcesStringSlice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strings", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Strings = append(m.Strings, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSchedulingContext{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelectedNode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) - if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimParameters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParameters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParameters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GeneratedFrom == nil { - m.GeneratedFrom = &ResourceClaimParametersReference{} - } - if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shareable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shareable = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverRequests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverRequests = append(m.DriverRequests, DriverRequests{}) - if err := m.DriverRequests[len(m.DriverRequests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimParametersList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParametersList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParametersList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClaimParameters{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimParametersReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimParametersReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClaimParametersReference{} - } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllocationMode = AllocationMode(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Allocation == nil { - m.Allocation = &AllocationResult{} - } - if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) - if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeallocationRequested = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClaimTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParametersRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParametersRef == nil { - m.ParametersRef = &ResourceClassParametersReference{} - } - if err := m.ParametersRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SuitableNodes == nil { - m.SuitableNodes = &v1.NodeSelector{} - } - if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StructuredParameters", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.StructuredParameters = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassParameters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParameters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParameters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GeneratedFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GeneratedFrom == nil { - m.GeneratedFrom = &ResourceClassParametersReference{} - } - if err := m.GeneratedFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VendorParameters = append(m.VendorParameters, VendorParameters{}) - if err := m.VendorParameters[len(m.VendorParameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, ResourceFilter{}) - if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassParametersList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParametersList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParametersList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceClassParameters{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceClassParametersReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClassParametersReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFilterModel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ResourceFilterModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFilterModel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFilterModel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFilterModel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesFilter{} - } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceHandle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceHandle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StructuredData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StructuredData == nil { - m.StructuredData = &StructuredResourceHandle{} - } - if err := m.StructuredData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceModel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceModel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceModel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesResources{} - } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRequestModel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ResourceRequestModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRequestModel) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRequestModel: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequestModel: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamedResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamedResources == nil { - m.NamedResources = &NamedResourcesRequest{} - } - if err := m.NamedResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceSlice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceModel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ResourceModel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceSlice{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StructuredResourceHandle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StructuredResourceHandle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StructuredResourceHandle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorClassParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorClassParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VendorClaimParameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VendorClaimParameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, DriverAllocationResult{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VendorParameters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VendorParameters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VendorParameters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.proto deleted file mode 100644 index 4a6a5bab6..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ /dev/null @@ -1,749 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.api.resource.v1alpha2; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/api/resource/v1alpha2"; - -// AllocationResult contains attributes of an allocated resource. -message AllocationResult { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - repeated ResourceHandle resourceHandles = 1; - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - optional k8s.io.api.core.v1.NodeSelector availableOnNodes = 2; - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - optional bool shareable = 3; -} - -// AllocationResultModel must have one and only one field set. -message AllocationResultModel { - // NamedResources describes the allocation result when using the named resources model. - // - // +optional - optional NamedResourcesAllocationResult namedResources = 1; -} - -// DriverAllocationResult contains vendor parameters and the allocation result for -// one request. -message DriverAllocationResult { - // VendorRequestParameters are the per-request configuration parameters - // from the time that the claim was allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorRequestParameters = 1; - - optional AllocationResultModel allocationResultModel = 2; -} - -// DriverRequests describes all resources that are needed from one particular driver. -message DriverRequests { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - // VendorParameters are arbitrary setup parameters for all requests of the - // claim. They are ignored while allocating the claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 2; - - // Requests describes all resources that are needed from the driver. - // +listType=atomic - repeated ResourceRequest requests = 3; -} - -// NamedResourcesAllocationResult is used in AllocationResultModel. -message NamedResourcesAllocationResult { - // Name is the name of the selected resource instance. - optional string name = 1; -} - -// NamedResourcesAttribute is a combination of an attribute name and its value. -message NamedResourcesAttribute { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - optional string name = 1; - - optional NamedResourcesAttributeValue attributeValue = 2; -} - -// NamedResourcesAttributeValue must have one and only one field set. -message NamedResourcesAttributeValue { - // QuantityValue is a quantity. - optional k8s.io.apimachinery.pkg.api.resource.Quantity quantity = 6; - - // BoolValue is a true/false value. - optional bool bool = 2; - - // IntValue is a 64-bit integer. - optional int64 int = 7; - - // IntSliceValue is an array of 64-bit integers. - optional NamedResourcesIntSlice intSlice = 8; - - // StringValue is a string. - optional string string = 5; - - // StringSliceValue is an array of strings. - optional NamedResourcesStringSlice stringSlice = 9; - - // VersionValue is a semantic version according to semver.org spec 2.0.0. - optional string version = 10; -} - -// NamedResourcesFilter is used in ResourceFilterModel. -message NamedResourcesFilter { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - optional string selector = 1; -} - -// NamedResourcesInstance represents one individual hardware instance that can be selected based -// on its attributes. -message NamedResourcesInstance { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - optional string name = 1; - - // Attributes defines the attributes of this resource instance. - // The name of each attribute must be unique. - // - // +listType=atomic - // +optional - repeated NamedResourcesAttribute attributes = 2; -} - -// NamedResourcesIntSlice contains a slice of 64-bit integers. -message NamedResourcesIntSlice { - // Ints is the slice of 64-bit integers. - // - // +listType=atomic - repeated int64 ints = 1; -} - -// NamedResourcesRequest is used in ResourceRequestModel. -message NamedResourcesRequest { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - optional string selector = 1; -} - -// NamedResourcesResources is used in ResourceModel. -message NamedResourcesResources { - // The list of all individual resources instances currently available. - // - // +listType=atomic - repeated NamedResourcesInstance instances = 1; -} - -// NamedResourcesStringSlice contains a slice of strings. -message NamedResourcesStringSlice { - // Strings is the slice of strings. - // - // +listType=atomic - repeated string strings = 1; -} - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message PodSchedulingContext { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes where resources for the Pod are needed. - optional PodSchedulingContextSpec spec = 2; - - // Status describes where resources for the Pod can be allocated. - // +optional - optional PodSchedulingContextStatus status = 3; -} - -// PodSchedulingContextList is a collection of Pod scheduling objects. -message PodSchedulingContextList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of PodSchedulingContext objects. - repeated PodSchedulingContext items = 2; -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -message PodSchedulingContextSpec { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - optional string selectedNode = 1; - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string potentialNodes = 2; -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -message PodSchedulingContextStatus { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - repeated ResourceClaimSchedulingStatus resourceClaims = 1; -} - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClaim { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - optional ResourceClaimSpec spec = 2; - - // Status describes whether the resource is available and with which - // attributes. - // +optional - optional ResourceClaimStatus status = 3; -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -message ResourceClaimConsumerReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Resource is the type of resource being referenced, for example "pods". - optional string resource = 3; - - // Name is the name of resource being referenced. - optional string name = 4; - - // UID identifies exactly one incarnation of the resource. - optional string uid = 5; -} - -// ResourceClaimList is a collection of claims. -message ResourceClaimList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claims. - repeated ResourceClaim items = 2; -} - -// ResourceClaimParameters defines resource requests for a ResourceClaim in an -// in-tree format understood by Kubernetes. -message ResourceClaimParameters { - // Standard object metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the claim parameters when the parameter reference of the claim refers - // to some unknown type. - // +optional - optional ResourceClaimParametersReference generatedFrom = 2; - - // Shareable indicates whether the allocated claim is meant to be shareable - // by multiple consumers at the same time. - // +optional - optional bool shareable = 3; - - // DriverRequests describes all resources that are needed for the - // allocated claim. A single claim may use resources coming from - // different drivers. For each driver, this array has at most one - // entry which then may have one or more per-driver requests. - // - // May be empty, in which case the claim can always be allocated. - // - // +listType=atomic - repeated DriverRequests driverRequests = 4; -} - -// ResourceClaimParametersList is a collection of ResourceClaimParameters. -message ResourceClaimParametersList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceClaimParameters items = 2; -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -message ResourceClaimParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -message ResourceClaimSchedulingStatus { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - optional string name = 1; - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - repeated string unsuitableNodes = 2; -} - -// ResourceClaimSpec defines how a resource is to be allocated. -message ResourceClaimSpec { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - optional string resourceClassName = 1; - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - optional ResourceClaimParametersReference parametersRef = 2; - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - optional string allocationMode = 3; -} - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -message ResourceClaimStatus { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - optional string driverName = 1; - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - optional AllocationResult allocation = 2; - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +patchStrategy=merge - // +patchMergeKey=uid - // +optional - repeated ResourceClaimConsumerReference reservedFor = 3; - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - optional bool deallocationRequested = 4; -} - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -message ResourceClaimTemplate { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - optional ResourceClaimTemplateSpec spec = 2; -} - -// ResourceClaimTemplateList is a collection of claim templates. -message ResourceClaimTemplateList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource claim templates. - repeated ResourceClaimTemplate items = 2; -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -message ResourceClaimTemplateSpec { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - optional ResourceClaimSpec spec = 2; -} - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -message ResourceClass { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - optional string driverName = 2; - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - optional ResourceClassParametersReference parametersRef = 3; - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4; - - // If and only if allocation of claims using this class is handled - // via structured parameters, then StructuredParameters must be set to true. - // +optional - optional bool structuredParameters = 5; -} - -// ResourceClassList is a collection of classes. -message ResourceClassList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of resource classes. - repeated ResourceClass items = 2; -} - -// ResourceClassParameters defines resource requests for a ResourceClass in an -// in-tree format understood by Kubernetes. -message ResourceClassParameters { - // Standard object metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the class parameters when the parameter reference of the class refers - // to some unknown type. - // +optional - optional ResourceClassParametersReference generatedFrom = 2; - - // VendorParameters are arbitrary setup parameters for all claims using - // this class. They are ignored while allocating the claim. There must - // not be more than one entry per driver. - // - // +listType=atomic - // +optional - repeated VendorParameters vendorParameters = 3; - - // Filters describes additional contraints that must be met when using the class. - // - // +listType=atomic - repeated ResourceFilter filters = 4; -} - -// ResourceClassParametersList is a collection of ResourceClassParameters. -message ResourceClassParametersList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceClassParameters items = 2; -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -message ResourceClassParametersReference { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - optional string kind = 2; - - // Name is the name of resource being referenced. - optional string name = 3; - - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - optional string namespace = 4; -} - -// ResourceFilter is a filter for resources from one particular driver. -message ResourceFilter { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - optional ResourceFilterModel resourceFilterModel = 2; -} - -// ResourceFilterModel must have one and only one field set. -message ResourceFilterModel { - // NamedResources describes a resource filter using the named resources model. - // - // +optional - optional NamedResourcesFilter namedResources = 1; -} - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -message ResourceHandle { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - optional string driverName = 1; - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - optional string data = 2; - - // If StructuredData is set, then it needs to be used instead of Data. - // - // +optional - optional StructuredResourceHandle structuredData = 5; -} - -// ResourceModel must have one and only one field set. -message ResourceModel { - // NamedResources describes available resources using the named resources model. - // - // +optional - optional NamedResourcesResources namedResources = 1; -} - -// ResourceRequest is a request for resources from one particular driver. -message ResourceRequest { - // VendorParameters are arbitrary setup parameters for the requested - // resource. They are ignored while allocating a claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 1; - - optional ResourceRequestModel resourceRequestModel = 2; -} - -// ResourceRequestModel must have one and only one field set. -message ResourceRequestModel { - // NamedResources describes a request for resources with the named resources model. - // - // +optional - optional NamedResourcesRequest namedResources = 1; -} - -// ResourceSlice provides information about available -// resources on individual nodes. -message ResourceSlice { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // NodeName identifies the node which provides the resources - // if they are local to a node. - // - // A field selector can be used to list only ResourceSlice - // objects with a certain node name. - // - // +optional - optional string nodeName = 2; - - // DriverName identifies the DRA driver providing the capacity information. - // A field selector can be used to list only ResourceSlice - // objects with a certain driver name. - optional string driverName = 3; - - optional ResourceModel resourceModel = 4; -} - -// ResourceSliceList is a collection of ResourceSlices. -message ResourceSliceList { - // Standard list metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of node resource capacity objects. - repeated ResourceSlice items = 2; -} - -// StructuredResourceHandle is the in-tree representation of the allocation result. -message StructuredResourceHandle { - // VendorClassParameters are the per-claim configuration parameters - // from the resource class at the time that the claim was allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClassParameters = 1; - - // VendorClaimParameters are the per-claim configuration parameters - // from the resource claim parameters at the time that the claim was - // allocated. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClaimParameters = 2; - - // NodeName is the name of the node providing the necessary resources - // if the resources are local to a node. - // - // +optional - optional string nodeName = 4; - - // Results lists all allocated driver resources. - // - // +listType=atomic - repeated DriverAllocationResult results = 5; -} - -// VendorParameters are opaque parameters for one particular driver. -message VendorParameters { - // DriverName is the name used by the DRA driver kubelet plugin. - optional string driverName = 1; - - // Parameters can be arbitrary setup parameters. They are ignored while - // allocating a claim. - // - // +optional - optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; -} - diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/namedresources.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/namedresources.go deleted file mode 100644 index b80c5c143..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/namedresources.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesResources is used in ResourceModel. -type NamedResourcesResources struct { - // The list of all individual resources instances currently available. - // - // +listType=atomic - Instances []NamedResourcesInstance `json:"instances" protobuf:"bytes,1,name=instances"` -} - -// NamedResourcesInstance represents one individual hardware instance that can be selected based -// on its attributes. -type NamedResourcesInstance struct { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - Name string `json:"name" protobuf:"bytes,1,name=name"` - - // Attributes defines the attributes of this resource instance. - // The name of each attribute must be unique. - // - // +listType=atomic - // +optional - Attributes []NamedResourcesAttribute `json:"attributes,omitempty" protobuf:"bytes,2,opt,name=attributes"` -} - -// NamedResourcesAttribute is a combination of an attribute name and its value. -type NamedResourcesAttribute struct { - // Name is unique identifier among all resource instances managed by - // the driver on the node. It must be a DNS subdomain. - Name string `json:"name" protobuf:"bytes,1,name=name"` - - NamedResourcesAttributeValue `json:",inline" protobuf:"bytes,2,opt,name=attributeValue"` -} - -// The Go field names below have a Value suffix to avoid a conflict between the -// field "String" and the corresponding method. That method is required. -// The Kubernetes API is defined without that suffix to keep it more natural. - -// NamedResourcesAttributeValue must have one and only one field set. -type NamedResourcesAttributeValue struct { - // QuantityValue is a quantity. - QuantityValue *resource.Quantity `json:"quantity,omitempty" protobuf:"bytes,6,opt,name=quantity"` - // BoolValue is a true/false value. - BoolValue *bool `json:"bool,omitempty" protobuf:"bytes,2,opt,name=bool"` - // IntValue is a 64-bit integer. - IntValue *int64 `json:"int,omitempty" protobuf:"varint,7,opt,name=int"` - // IntSliceValue is an array of 64-bit integers. - IntSliceValue *NamedResourcesIntSlice `json:"intSlice,omitempty" protobuf:"varint,8,rep,name=intSlice"` - // StringValue is a string. - StringValue *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"` - // StringSliceValue is an array of strings. - StringSliceValue *NamedResourcesStringSlice `json:"stringSlice,omitempty" protobuf:"bytes,9,rep,name=stringSlice"` - // VersionValue is a semantic version according to semver.org spec 2.0.0. - VersionValue *string `json:"version,omitempty" protobuf:"bytes,10,opt,name=version"` -} - -// NamedResourcesIntSlice contains a slice of 64-bit integers. -type NamedResourcesIntSlice struct { - // Ints is the slice of 64-bit integers. - // - // +listType=atomic - Ints []int64 `json:"ints" protobuf:"bytes,1,opt,name=ints"` -} - -// NamedResourcesStringSlice contains a slice of strings. -type NamedResourcesStringSlice struct { - // Strings is the slice of strings. - // - // +listType=atomic - Strings []string `json:"strings" protobuf:"bytes,1,opt,name=strings"` -} - -// NamedResourcesRequest is used in ResourceRequestModel. -type NamedResourcesRequest struct { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - Selector string `json:"selector" protobuf:"bytes,1,name=selector"` -} - -// NamedResourcesFilter is used in ResourceFilterModel. -type NamedResourcesFilter struct { - // Selector is a CEL expression which must evaluate to true if a - // resource instance is suitable. The language is as defined in - // https://kubernetes.io/docs/reference/using-api/cel/ - // - // In addition, for each type NamedResourcesin AttributeValue there is a map that - // resolves to the corresponding value of the instance under evaluation. - // For example: - // - // attributes.quantity["a"].isGreaterThan(quantity("0")) && - // attributes.stringslice["b"].isSorted() - Selector string `json:"selector" protobuf:"bytes,1,name=selector"` -} - -// NamedResourcesAllocationResult is used in AllocationResultModel. -type NamedResourcesAllocationResult struct { - // Name is the name of the selected resource instance. - Name string `json:"name" protobuf:"bytes,1,name=name"` -} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/register.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/register.go deleted file mode 100644 index 893fb4c1e..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/register.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "resource.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &ResourceClass{}, - &ResourceClassList{}, - &ResourceClaim{}, - &ResourceClaimList{}, - &ResourceClaimTemplate{}, - &ResourceClaimTemplateList{}, - &PodSchedulingContext{}, - &PodSchedulingContextList{}, - &ResourceSlice{}, - &ResourceSliceList{}, - &ResourceClaimParameters{}, - &ResourceClaimParametersList{}, - &ResourceClassParameters{}, - &ResourceClassParametersList{}, - ) - - // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/types.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/types.go deleted file mode 100644 index 9005144cf..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/types.go +++ /dev/null @@ -1,737 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // Finalizer is the finalizer that gets set for claims - // which were allocated through a builtin controller. - Finalizer = "dra.k8s.io/delete-protection" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaim describes which resources are needed by a resource consumer. -// Its status tracks whether the resource has been allocated and what the -// resulting attributes are. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClaim struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes the desired attributes of a resource that then needs - // to be allocated. It can only be set once when creating the - // ResourceClaim. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes whether the resource is available and with which - // attributes. - // +optional - Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceClaimSpec defines how a resource is to be allocated. -type ResourceClaimSpec struct { - // ResourceClassName references the driver and additional parameters - // via the name of a ResourceClass that was created as part of the - // driver deployment. - ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"` - - // ParametersRef references a separate object with arbitrary parameters - // that will be used by the driver when allocating a resource for the - // claim. - // - // The object must be in the same namespace as the ResourceClaim. - // +optional - ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"` - - // Allocation can start immediately or when a Pod wants to use the - // resource. "WaitForFirstConsumer" is the default. - // +optional - AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"` -} - -// AllocationMode describes whether a ResourceClaim gets allocated immediately -// when it gets created (AllocationModeImmediate) or whether allocation is -// delayed until it is needed for a Pod -// (AllocationModeWaitForFirstConsumer). Other modes might get added in the -// future. -type AllocationMode string - -const ( - // When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is - // delayed until a Pod gets scheduled that needs the ResourceClaim. The - // scheduler will consider all resource requirements of that Pod and - // trigger allocation for a node that fits the Pod. - AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer" - - // When a ResourceClaim has AllocationModeImmediate, allocation starts - // as soon as the ResourceClaim gets created. This is done without - // considering the needs of Pods that will use the ResourceClaim - // because those Pods are not known yet. - AllocationModeImmediate AllocationMode = "Immediate" -) - -// ResourceClaimStatus tracks whether the resource has been allocated and what -// the resulting attributes are. -type ResourceClaimStatus struct { - // DriverName is a copy of the driver name from the ResourceClass at - // the time when allocation started. - // +optional - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Allocation is set by the resource driver once a resource or set of - // resources has been allocated successfully. If this is not specified, the - // resources have not been allocated yet. - // +optional - Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"` - - // ReservedFor indicates which entities are currently allowed to use - // the claim. A Pod which references a ResourceClaim which is not - // reserved for that Pod will not be started. - // - // There can be at most 32 such reservations. This may get increased in - // the future, but not reduced. - // - // +listType=map - // +listMapKey=uid - // +patchStrategy=merge - // +patchMergeKey=uid - // +optional - ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` - - // DeallocationRequested indicates that a ResourceClaim is to be - // deallocated. - // - // The driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // While DeallocationRequested is set, no new consumers may be added to - // ReservedFor. - // +optional - DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"` -} - -// ReservedForMaxSize is the maximum number of entries in -// claim.status.reservedFor. -const ResourceClaimReservedForMaxSize = 32 - -// AllocationResult contains attributes of an allocated resource. -type AllocationResult struct { - // ResourceHandles contain the state associated with an allocation that - // should be maintained throughout the lifetime of a claim. Each - // ResourceHandle contains data that should be passed to a specific kubelet - // plugin once it lands on a node. This data is returned by the driver - // after a successful allocation and is opaque to Kubernetes. Driver - // documentation may explain to users how to interpret this data if needed. - // - // Setting this field is optional. It has a maximum size of 32 entries. - // If null (or empty), it is assumed this allocation will be processed by a - // single kubelet plugin with no ResourceHandle data attached. The name of - // the kubelet plugin invoked will match the DriverName set in the - // ResourceClaimStatus this AllocationResult is embedded in. - // - // +listType=atomic - // +optional - ResourceHandles []ResourceHandle `json:"resourceHandles,omitempty" protobuf:"bytes,1,opt,name=resourceHandles"` - - // This field will get set by the resource driver after it has allocated - // the resource to inform the scheduler where it can schedule Pods using - // the ResourceClaim. - // - // Setting this field is optional. If null, the resource is available - // everywhere. - // +optional - AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"` - - // Shareable determines whether the resource supports more - // than one consumer at a time. - // +optional - Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"` -} - -// AllocationResultResourceHandlesMaxSize represents the maximum number of -// entries in allocation.resourceHandles. -const AllocationResultResourceHandlesMaxSize = 32 - -// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin. -type ResourceHandle struct { - // DriverName specifies the name of the resource driver whose kubelet - // plugin should be invoked to process this ResourceHandle's data once it - // lands on a node. This may differ from the DriverName set in - // ResourceClaimStatus this ResourceHandle is embedded in. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Data contains the opaque data associated with this ResourceHandle. It is - // set by the controller component of the resource driver whose name - // matches the DriverName set in the ResourceClaimStatus this - // ResourceHandle is embedded in. It is set at allocation time and is - // intended for processing by the kubelet plugin whose name matches - // the DriverName set in this ResourceHandle. - // - // The maximum size of this field is 16KiB. This may get increased in the - // future, but not reduced. - // +optional - Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - - // If StructuredData is set, then it needs to be used instead of Data. - // - // +optional - StructuredData *StructuredResourceHandle `json:"structuredData,omitempty" protobuf:"bytes,5,opt,name=structuredData"` -} - -// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data. -const ResourceHandleDataMaxSize = 16 * 1024 - -// StructuredResourceHandle is the in-tree representation of the allocation result. -type StructuredResourceHandle struct { - // VendorClassParameters are the per-claim configuration parameters - // from the resource class at the time that the claim was allocated. - // - // +optional - VendorClassParameters runtime.RawExtension `json:"vendorClassParameters,omitempty" protobuf:"bytes,1,opt,name=vendorClassParameters"` - - // VendorClaimParameters are the per-claim configuration parameters - // from the resource claim parameters at the time that the claim was - // allocated. - // - // +optional - VendorClaimParameters runtime.RawExtension `json:"vendorClaimParameters,omitempty" protobuf:"bytes,2,opt,name=vendorClaimParameters"` - - // NodeName is the name of the node providing the necessary resources - // if the resources are local to a node. - // - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,4,name=nodeName"` - - // Results lists all allocated driver resources. - // - // +listType=atomic - Results []DriverAllocationResult `json:"results" protobuf:"bytes,5,name=results"` -} - -// DriverAllocationResult contains vendor parameters and the allocation result for -// one request. -type DriverAllocationResult struct { - // VendorRequestParameters are the per-request configuration parameters - // from the time that the claim was allocated. - // - // +optional - VendorRequestParameters runtime.RawExtension `json:"vendorRequestParameters,omitempty" protobuf:"bytes,1,opt,name=vendorRequestParameters"` - - AllocationResultModel `json:",inline" protobuf:"bytes,2,name=allocationResultModel"` -} - -// AllocationResultModel must have one and only one field set. -type AllocationResultModel struct { - // NamedResources describes the allocation result when using the named resources model. - // - // +optional - NamedResources *NamedResourcesAllocationResult `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimList is a collection of claims. -type ResourceClaimList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claims. - Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes where resources for the Pod can be allocated. - // +optional - Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // +optional - SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +listType=atomic - // +optional - UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClass is used by administrators to influence how resources -// are allocated. -// -// This is an alpha type and requires enabling the DynamicResourceAllocation -// feature gate. -type ResourceClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DriverName defines the name of the dynamic resource driver that is - // used for allocation of a ResourceClaim that uses this class. - // - // Resource drivers have a unique name in forward domain order - // (acme.example.com). - DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"` - - // ParametersRef references an arbitrary separate object that may hold - // parameters that will be used by the driver when allocating a - // resource that uses this class. A dynamic resource driver can - // distinguish between parameters stored here and and those stored in - // ResourceClaimSpec. - // +optional - ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"` - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a ResourceClaim that has not been allocated yet. - // - // Setting this field is optional. If null, all nodes are candidates. - // +optional - SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"` - - // If and only if allocation of claims using this class is handled - // via structured parameters, then StructuredParameters must be set to true. - // +optional - StructuredParameters *bool `json:"structuredParameters,omitempty" protobuf:"bytes,5,opt,name=structuredParameters"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClassList is a collection of classes. -type ResourceClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource classes. - Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ResourceClassParametersReference contains enough information to let you -// locate the parameters for a ResourceClass. -type ResourceClassParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata. - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` - // Namespace that contains the referenced resource. Must be empty - // for cluster-scoped resources and non-empty for namespaced - // resources. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// ResourceClaimParametersReference contains enough information to let you -// locate the parameters for a ResourceClaim. The object must be in the same -// namespace as the ResourceClaim. -type ResourceClaimParametersReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced. This is the same - // value as in the parameter object's metadata, for example "ConfigMap". - Kind string `json:"kind" protobuf:"bytes,2,name=kind"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,3,name=name"` -} - -// ResourceClaimConsumerReference contains enough information to let you -// locate the consumer of a ResourceClaim. The user must be a resource in the same -// namespace as the ResourceClaim. -type ResourceClaimConsumerReference struct { - // APIGroup is the group for the resource being referenced. It is - // empty for the core API. This matches the group in the APIVersion - // that is used when creating the resources. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` - // Resource is the type of resource being referenced, for example "pods". - Resource string `json:"resource" protobuf:"bytes,3,name=resource"` - // Name is the name of resource being referenced. - Name string `json:"name" protobuf:"bytes,4,name=name"` - // UID identifies exactly one incarnation of the resource. - UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplate is used to produce ResourceClaim objects. -type ResourceClaimTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Describes the ResourceClaim that is to be generated. - // - // This field is immutable. A ResourceClaim will get created by the - // control plane for a Pod when needed and then not get updated - // anymore. - Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. -type ResourceClaimTemplateSpec struct { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec for the ResourceClaim. The entire content is copied unchanged - // into the ResourceClaim that gets created from this template. The - // same fields as in a ResourceClaim are also valid here. - Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.26 - -// ResourceClaimTemplateList is a collection of claim templates. -type ResourceClaimTemplateList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of resource claim templates. - Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceSlice provides information about available -// resources on individual nodes. -type ResourceSlice struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // NodeName identifies the node which provides the resources - // if they are local to a node. - // - // A field selector can be used to list only ResourceSlice - // objects with a certain node name. - // - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,2,opt,name=nodeName"` - - // DriverName identifies the DRA driver providing the capacity information. - // A field selector can be used to list only ResourceSlice - // objects with a certain driver name. - DriverName string `json:"driverName" protobuf:"bytes,3,name=driverName"` - - ResourceModel `json:",inline" protobuf:"bytes,4,name=resourceModel"` -} - -// ResourceModel must have one and only one field set. -type ResourceModel struct { - // NamedResources describes available resources using the named resources model. - // - // +optional - NamedResources *NamedResourcesResources `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceSliceList is a collection of ResourceSlices. -type ResourceSliceList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClaimParameters defines resource requests for a ResourceClaim in an -// in-tree format understood by Kubernetes. -type ResourceClaimParameters struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the claim parameters when the parameter reference of the claim refers - // to some unknown type. - // +optional - GeneratedFrom *ResourceClaimParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"` - - // Shareable indicates whether the allocated claim is meant to be shareable - // by multiple consumers at the same time. - // +optional - Shareable bool `json:"shareable,omitempty" protobuf:"bytes,3,opt,name=shareable"` - - // DriverRequests describes all resources that are needed for the - // allocated claim. A single claim may use resources coming from - // different drivers. For each driver, this array has at most one - // entry which then may have one or more per-driver requests. - // - // May be empty, in which case the claim can always be allocated. - // - // +listType=atomic - DriverRequests []DriverRequests `json:"driverRequests,omitempty" protobuf:"bytes,4,opt,name=driverRequests"` -} - -// DriverRequests describes all resources that are needed from one particular driver. -type DriverRequests struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // VendorParameters are arbitrary setup parameters for all requests of the - // claim. They are ignored while allocating the claim. - // - // +optional - VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,2,opt,name=vendorParameters"` - - // Requests describes all resources that are needed from the driver. - // +listType=atomic - Requests []ResourceRequest `json:"requests,omitempty" protobuf:"bytes,3,opt,name=requests"` -} - -// ResourceRequest is a request for resources from one particular driver. -type ResourceRequest struct { - // VendorParameters are arbitrary setup parameters for the requested - // resource. They are ignored while allocating a claim. - // - // +optional - VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,1,opt,name=vendorParameters"` - - ResourceRequestModel `json:",inline" protobuf:"bytes,2,name=resourceRequestModel"` -} - -// ResourceRequestModel must have one and only one field set. -type ResourceRequestModel struct { - // NamedResources describes a request for resources with the named resources model. - // - // +optional - NamedResources *NamedResourcesRequest `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClaimParametersList is a collection of ResourceClaimParameters. -type ResourceClaimParametersList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceClaimParameters `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClassParameters defines resource requests for a ResourceClass in an -// in-tree format understood by Kubernetes. -type ResourceClassParameters struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // If this object was created from some other resource, then this links - // back to that resource. This field is used to find the in-tree representation - // of the class parameters when the parameter reference of the class refers - // to some unknown type. - // +optional - GeneratedFrom *ResourceClassParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"` - - // VendorParameters are arbitrary setup parameters for all claims using - // this class. They are ignored while allocating the claim. There must - // not be more than one entry per driver. - // - // +listType=atomic - // +optional - VendorParameters []VendorParameters `json:"vendorParameters,omitempty" protobuf:"bytes,3,opt,name=vendorParameters"` - - // Filters describes additional contraints that must be met when using the class. - // - // +listType=atomic - Filters []ResourceFilter `json:"filters,omitempty" protobuf:"bytes,4,opt,name=filters"` -} - -// ResourceFilter is a filter for resources from one particular driver. -type ResourceFilter struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - ResourceFilterModel `json:",inline" protobuf:"bytes,2,name=resourceFilterModel"` -} - -// ResourceFilterModel must have one and only one field set. -type ResourceFilterModel struct { - // NamedResources describes a resource filter using the named resources model. - // - // +optional - NamedResources *NamedResourcesFilter `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.30 - -// ResourceClassParametersList is a collection of ResourceClassParameters. -type ResourceClassParametersList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of node resource capacity objects. - Items []ResourceClassParameters `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VendorParameters are opaque parameters for one particular driver. -type VendorParameters struct { - // DriverName is the name used by the DRA driver kubelet plugin. - DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"` - - // Parameters can be arbitrary setup parameters. They are ignored while - // allocating a claim. - // - // +optional - Parameters runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` -} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go deleted file mode 100644 index 11f9ffbea..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-codegen.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllocationResult = map[string]string{ - "": "AllocationResult contains attributes of an allocated resource.", - "resourceHandles": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", - "availableOnNodes": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", - "shareable": "Shareable determines whether the resource supports more than one consumer at a time.", -} - -func (AllocationResult) SwaggerDoc() map[string]string { - return map_AllocationResult -} - -var map_AllocationResultModel = map[string]string{ - "": "AllocationResultModel must have one and only one field set.", - "namedResources": "NamedResources describes the allocation result when using the named resources model.", -} - -func (AllocationResultModel) SwaggerDoc() map[string]string { - return map_AllocationResultModel -} - -var map_DriverAllocationResult = map[string]string{ - "": "DriverAllocationResult contains vendor parameters and the allocation result for one request.", - "vendorRequestParameters": "VendorRequestParameters are the per-request configuration parameters from the time that the claim was allocated.", -} - -func (DriverAllocationResult) SwaggerDoc() map[string]string { - return map_DriverAllocationResult -} - -var map_DriverRequests = map[string]string{ - "": "DriverRequests describes all resources that are needed from one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for all requests of the claim. They are ignored while allocating the claim.", - "requests": "Requests describes all resources that are needed from the driver.", -} - -func (DriverRequests) SwaggerDoc() map[string]string { - return map_DriverRequests -} - -var map_PodSchedulingContext = map[string]string{ - "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes where resources for the Pod are needed.", - "status": "Status describes where resources for the Pod can be allocated.", -} - -func (PodSchedulingContext) SwaggerDoc() map[string]string { - return map_PodSchedulingContext -} - -var map_PodSchedulingContextList = map[string]string{ - "": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "metadata": "Standard list metadata", - "items": "Items is the list of PodSchedulingContext objects.", -} - -func (PodSchedulingContextList) SwaggerDoc() map[string]string { - return map_PodSchedulingContextList -} - -var map_PodSchedulingContextSpec = map[string]string{ - "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", -} - -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingContextSpec -} - -var map_PodSchedulingContextStatus = map[string]string{ - "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", -} - -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingContextStatus -} - -var map_ResourceClaim = map[string]string{ - "": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", - "status": "Status describes whether the resource is available and with which attributes.", -} - -func (ResourceClaim) SwaggerDoc() map[string]string { - return map_ResourceClaim -} - -var map_ResourceClaimConsumerReference = map[string]string{ - "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "resource": "Resource is the type of resource being referenced, for example \"pods\".", - "name": "Name is the name of resource being referenced.", - "uid": "UID identifies exactly one incarnation of the resource.", -} - -func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { - return map_ResourceClaimConsumerReference -} - -var map_ResourceClaimList = map[string]string{ - "": "ResourceClaimList is a collection of claims.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claims.", -} - -func (ResourceClaimList) SwaggerDoc() map[string]string { - return map_ResourceClaimList -} - -var map_ResourceClaimParameters = map[string]string{ - "": "ResourceClaimParameters defines resource requests for a ResourceClaim in an in-tree format understood by Kubernetes.", - "metadata": "Standard object metadata", - "generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the claim parameters when the parameter reference of the claim refers to some unknown type.", - "shareable": "Shareable indicates whether the allocated claim is meant to be shareable by multiple consumers at the same time.", - "driverRequests": "DriverRequests describes all resources that are needed for the allocated claim. A single claim may use resources coming from different drivers. For each driver, this array has at most one entry which then may have one or more per-driver requests.\n\nMay be empty, in which case the claim can always be allocated.", -} - -func (ResourceClaimParameters) SwaggerDoc() map[string]string { - return map_ResourceClaimParameters -} - -var map_ResourceClaimParametersList = map[string]string{ - "": "ResourceClaimParametersList is a collection of ResourceClaimParameters.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceClaimParametersList) SwaggerDoc() map[string]string { - return map_ResourceClaimParametersList -} - -var map_ResourceClaimParametersReference = map[string]string{ - "": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", - "name": "Name is the name of resource being referenced.", -} - -func (ResourceClaimParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClaimParametersReference -} - -var map_ResourceClaimSchedulingStatus = map[string]string{ - "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "name": "Name matches the pod.spec.resourceClaims[*].Name field.", - "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", -} - -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimSchedulingStatus -} - -var map_ResourceClaimSpec = map[string]string{ - "": "ResourceClaimSpec defines how a resource is to be allocated.", - "resourceClassName": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", - "parametersRef": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", - "allocationMode": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", -} - -func (ResourceClaimSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimSpec -} - -var map_ResourceClaimStatus = map[string]string{ - "": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", - "driverName": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", - "allocation": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", - "deallocationRequested": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", -} - -func (ResourceClaimStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimStatus -} - -var map_ResourceClaimTemplate = map[string]string{ - "": "ResourceClaimTemplate is used to produce ResourceClaim objects.", - "metadata": "Standard object metadata", - "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", -} - -func (ResourceClaimTemplate) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplate -} - -var map_ResourceClaimTemplateList = map[string]string{ - "": "ResourceClaimTemplateList is a collection of claim templates.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource claim templates.", -} - -func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateList -} - -var map_ResourceClaimTemplateSpec = map[string]string{ - "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", - "metadata": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", - "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", -} - -func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { - return map_ResourceClaimTemplateSpec -} - -var map_ResourceClass = map[string]string{ - "": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", - "metadata": "Standard object metadata", - "driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", - "parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", - "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", - "structuredParameters": "If and only if allocation of claims using this class is handled via structured parameters, then StructuredParameters must be set to true.", -} - -func (ResourceClass) SwaggerDoc() map[string]string { - return map_ResourceClass -} - -var map_ResourceClassList = map[string]string{ - "": "ResourceClassList is a collection of classes.", - "metadata": "Standard list metadata", - "items": "Items is the list of resource classes.", -} - -func (ResourceClassList) SwaggerDoc() map[string]string { - return map_ResourceClassList -} - -var map_ResourceClassParameters = map[string]string{ - "": "ResourceClassParameters defines resource requests for a ResourceClass in an in-tree format understood by Kubernetes.", - "metadata": "Standard object metadata", - "generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the class parameters when the parameter reference of the class refers to some unknown type.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for all claims using this class. They are ignored while allocating the claim. There must not be more than one entry per driver.", - "filters": "Filters describes additional contraints that must be met when using the class.", -} - -func (ResourceClassParameters) SwaggerDoc() map[string]string { - return map_ResourceClassParameters -} - -var map_ResourceClassParametersList = map[string]string{ - "": "ResourceClassParametersList is a collection of ResourceClassParameters.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceClassParametersList) SwaggerDoc() map[string]string { - return map_ResourceClassParametersList -} - -var map_ResourceClassParametersReference = map[string]string{ - "": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", - "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", - "kind": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", - "name": "Name is the name of resource being referenced.", - "namespace": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", -} - -func (ResourceClassParametersReference) SwaggerDoc() map[string]string { - return map_ResourceClassParametersReference -} - -var map_ResourceFilter = map[string]string{ - "": "ResourceFilter is a filter for resources from one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", -} - -func (ResourceFilter) SwaggerDoc() map[string]string { - return map_ResourceFilter -} - -var map_ResourceFilterModel = map[string]string{ - "": "ResourceFilterModel must have one and only one field set.", - "namedResources": "NamedResources describes a resource filter using the named resources model.", -} - -func (ResourceFilterModel) SwaggerDoc() map[string]string { - return map_ResourceFilterModel -} - -var map_ResourceHandle = map[string]string{ - "": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", - "driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", - "data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", - "structuredData": "If StructuredData is set, then it needs to be used instead of Data.", -} - -func (ResourceHandle) SwaggerDoc() map[string]string { - return map_ResourceHandle -} - -var map_ResourceModel = map[string]string{ - "": "ResourceModel must have one and only one field set.", - "namedResources": "NamedResources describes available resources using the named resources model.", -} - -func (ResourceModel) SwaggerDoc() map[string]string { - return map_ResourceModel -} - -var map_ResourceRequest = map[string]string{ - "": "ResourceRequest is a request for resources from one particular driver.", - "vendorParameters": "VendorParameters are arbitrary setup parameters for the requested resource. They are ignored while allocating a claim.", -} - -func (ResourceRequest) SwaggerDoc() map[string]string { - return map_ResourceRequest -} - -var map_ResourceRequestModel = map[string]string{ - "": "ResourceRequestModel must have one and only one field set.", - "namedResources": "NamedResources describes a request for resources with the named resources model.", -} - -func (ResourceRequestModel) SwaggerDoc() map[string]string { - return map_ResourceRequestModel -} - -var map_ResourceSlice = map[string]string{ - "": "ResourceSlice provides information about available resources on individual nodes.", - "metadata": "Standard object metadata", - "nodeName": "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.", - "driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.", -} - -func (ResourceSlice) SwaggerDoc() map[string]string { - return map_ResourceSlice -} - -var map_ResourceSliceList = map[string]string{ - "": "ResourceSliceList is a collection of ResourceSlices.", - "metadata": "Standard list metadata", - "items": "Items is the list of node resource capacity objects.", -} - -func (ResourceSliceList) SwaggerDoc() map[string]string { - return map_ResourceSliceList -} - -var map_StructuredResourceHandle = map[string]string{ - "": "StructuredResourceHandle is the in-tree representation of the allocation result.", - "vendorClassParameters": "VendorClassParameters are the per-claim configuration parameters from the resource class at the time that the claim was allocated.", - "vendorClaimParameters": "VendorClaimParameters are the per-claim configuration parameters from the resource claim parameters at the time that the claim was allocated.", - "nodeName": "NodeName is the name of the node providing the necessary resources if the resources are local to a node.", - "results": "Results lists all allocated driver resources.", -} - -func (StructuredResourceHandle) SwaggerDoc() map[string]string { - return map_StructuredResourceHandle -} - -var map_VendorParameters = map[string]string{ - "": "VendorParameters are opaque parameters for one particular driver.", - "driverName": "DriverName is the name used by the DRA driver kubelet plugin.", - "parameters": "Parameters can be arbitrary setup parameters. They are ignored while allocating a claim.", -} - -func (VendorParameters) SwaggerDoc() map[string]string { - return map_VendorParameters -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go deleted file mode 100644 index 52de8e1ad..000000000 --- a/constraint/vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go +++ /dev/null @@ -1,1126 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { - *out = *in - if in.ResourceHandles != nil { - in, out := &in.ResourceHandles, &out.ResourceHandles - *out = make([]ResourceHandle, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AvailableOnNodes != nil { - in, out := &in.AvailableOnNodes, &out.AvailableOnNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. -func (in *AllocationResult) DeepCopy() *AllocationResult { - if in == nil { - return nil - } - out := new(AllocationResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllocationResultModel) DeepCopyInto(out *AllocationResultModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesAllocationResult) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResultModel. -func (in *AllocationResultModel) DeepCopy() *AllocationResultModel { - if in == nil { - return nil - } - out := new(AllocationResultModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverAllocationResult) DeepCopyInto(out *DriverAllocationResult) { - *out = *in - in.VendorRequestParameters.DeepCopyInto(&out.VendorRequestParameters) - in.AllocationResultModel.DeepCopyInto(&out.AllocationResultModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverAllocationResult. -func (in *DriverAllocationResult) DeepCopy() *DriverAllocationResult { - if in == nil { - return nil - } - out := new(DriverAllocationResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverRequests) DeepCopyInto(out *DriverRequests) { - *out = *in - in.VendorParameters.DeepCopyInto(&out.VendorParameters) - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make([]ResourceRequest, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRequests. -func (in *DriverRequests) DeepCopy() *DriverRequests { - if in == nil { - return nil - } - out := new(DriverRequests) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAllocationResult) DeepCopyInto(out *NamedResourcesAllocationResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAllocationResult. -func (in *NamedResourcesAllocationResult) DeepCopy() *NamedResourcesAllocationResult { - if in == nil { - return nil - } - out := new(NamedResourcesAllocationResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAttribute) DeepCopyInto(out *NamedResourcesAttribute) { - *out = *in - in.NamedResourcesAttributeValue.DeepCopyInto(&out.NamedResourcesAttributeValue) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttribute. -func (in *NamedResourcesAttribute) DeepCopy() *NamedResourcesAttribute { - if in == nil { - return nil - } - out := new(NamedResourcesAttribute) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttributeValue) { - *out = *in - if in.QuantityValue != nil { - in, out := &in.QuantityValue, &out.QuantityValue - x := (*in).DeepCopy() - *out = &x - } - if in.BoolValue != nil { - in, out := &in.BoolValue, &out.BoolValue - *out = new(bool) - **out = **in - } - if in.IntValue != nil { - in, out := &in.IntValue, &out.IntValue - *out = new(int64) - **out = **in - } - if in.IntSliceValue != nil { - in, out := &in.IntSliceValue, &out.IntSliceValue - *out = new(NamedResourcesIntSlice) - (*in).DeepCopyInto(*out) - } - if in.StringValue != nil { - in, out := &in.StringValue, &out.StringValue - *out = new(string) - **out = **in - } - if in.StringSliceValue != nil { - in, out := &in.StringSliceValue, &out.StringSliceValue - *out = new(NamedResourcesStringSlice) - (*in).DeepCopyInto(*out) - } - if in.VersionValue != nil { - in, out := &in.VersionValue, &out.VersionValue - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttributeValue. -func (in *NamedResourcesAttributeValue) DeepCopy() *NamedResourcesAttributeValue { - if in == nil { - return nil - } - out := new(NamedResourcesAttributeValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesFilter) DeepCopyInto(out *NamedResourcesFilter) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesFilter. -func (in *NamedResourcesFilter) DeepCopy() *NamedResourcesFilter { - if in == nil { - return nil - } - out := new(NamedResourcesFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make([]NamedResourcesAttribute, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesInstance. -func (in *NamedResourcesInstance) DeepCopy() *NamedResourcesInstance { - if in == nil { - return nil - } - out := new(NamedResourcesInstance) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesIntSlice) DeepCopyInto(out *NamedResourcesIntSlice) { - *out = *in - if in.Ints != nil { - in, out := &in.Ints, &out.Ints - *out = make([]int64, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesIntSlice. -func (in *NamedResourcesIntSlice) DeepCopy() *NamedResourcesIntSlice { - if in == nil { - return nil - } - out := new(NamedResourcesIntSlice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesRequest) DeepCopyInto(out *NamedResourcesRequest) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesRequest. -func (in *NamedResourcesRequest) DeepCopy() *NamedResourcesRequest { - if in == nil { - return nil - } - out := new(NamedResourcesRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) { - *out = *in - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = make([]NamedResourcesInstance, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesResources. -func (in *NamedResourcesResources) DeepCopy() *NamedResourcesResources { - if in == nil { - return nil - } - out := new(NamedResourcesResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedResourcesStringSlice) DeepCopyInto(out *NamedResourcesStringSlice) { - *out = *in - if in.Strings != nil { - in, out := &in.Strings, &out.Strings - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesStringSlice. -func (in *NamedResourcesStringSlice) DeepCopy() *NamedResourcesStringSlice { - if in == nil { - return nil - } - out := new(NamedResourcesStringSlice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { - if in == nil { - return nil - } - out := new(PodSchedulingContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSchedulingContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { - if in == nil { - return nil - } - out := new(PodSchedulingContextList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { - *out = *in - if in.PotentialNodes != nil { - in, out := &in.PotentialNodes, &out.PotentialNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { - if in == nil { - return nil - } - out := new(PodSchedulingContextSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { - *out = *in - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]ResourceClaimSchedulingStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { - if in == nil { - return nil - } - out := new(PodSchedulingContextStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. -func (in *ResourceClaim) DeepCopy() *ResourceClaim { - if in == nil { - return nil - } - out := new(ResourceClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. -func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { - if in == nil { - return nil - } - out := new(ResourceClaimConsumerReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. -func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { - if in == nil { - return nil - } - out := new(ResourceClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParameters) DeepCopyInto(out *ResourceClaimParameters) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.GeneratedFrom != nil { - in, out := &in.GeneratedFrom, &out.GeneratedFrom - *out = new(ResourceClaimParametersReference) - **out = **in - } - if in.DriverRequests != nil { - in, out := &in.DriverRequests, &out.DriverRequests - *out = make([]DriverRequests, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParameters. -func (in *ResourceClaimParameters) DeepCopy() *ResourceClaimParameters { - if in == nil { - return nil - } - out := new(ResourceClaimParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimParameters) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParametersList) DeepCopyInto(out *ResourceClaimParametersList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaimParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersList. -func (in *ResourceClaimParametersList) DeepCopy() *ResourceClaimParametersList { - if in == nil { - return nil - } - out := new(ResourceClaimParametersList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimParametersList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersReference. -func (in *ResourceClaimParametersReference) DeepCopy() *ResourceClaimParametersReference { - if in == nil { - return nil - } - out := new(ResourceClaimParametersReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { - *out = *in - if in.UnsuitableNodes != nil { - in, out := &in.UnsuitableNodes, &out.UnsuitableNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { - if in == nil { - return nil - } - out := new(ResourceClaimSchedulingStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { - *out = *in - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClaimParametersReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. -func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { - if in == nil { - return nil - } - out := new(ResourceClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { - *out = *in - if in.Allocation != nil { - in, out := &in.Allocation, &out.Allocation - *out = new(AllocationResult) - (*in).DeepCopyInto(*out) - } - if in.ReservedFor != nil { - in, out := &in.ReservedFor, &out.ReservedFor - *out = make([]ResourceClaimConsumerReference, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. -func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { - if in == nil { - return nil - } - out := new(ResourceClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. -func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { - if in == nil { - return nil - } - out := new(ResourceClaimTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClaimTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. -func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { - if in == nil { - return nil - } - out := new(ResourceClaimTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. -func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { - if in == nil { - return nil - } - out := new(ResourceClaimTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClass) DeepCopyInto(out *ResourceClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ParametersRef != nil { - in, out := &in.ParametersRef, &out.ParametersRef - *out = new(ResourceClassParametersReference) - **out = **in - } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - if in.StructuredParameters != nil { - in, out := &in.StructuredParameters, &out.StructuredParameters - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClass. -func (in *ResourceClass) DeepCopy() *ResourceClass { - if in == nil { - return nil - } - out := new(ResourceClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassList) DeepCopyInto(out *ResourceClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassList. -func (in *ResourceClassList) DeepCopy() *ResourceClassList { - if in == nil { - return nil - } - out := new(ResourceClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParameters) DeepCopyInto(out *ResourceClassParameters) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.GeneratedFrom != nil { - in, out := &in.GeneratedFrom, &out.GeneratedFrom - *out = new(ResourceClassParametersReference) - **out = **in - } - if in.VendorParameters != nil { - in, out := &in.VendorParameters, &out.VendorParameters - *out = make([]VendorParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Filters != nil { - in, out := &in.Filters, &out.Filters - *out = make([]ResourceFilter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParameters. -func (in *ResourceClassParameters) DeepCopy() *ResourceClassParameters { - if in == nil { - return nil - } - out := new(ResourceClassParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassParameters) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParametersList) DeepCopyInto(out *ResourceClassParametersList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceClassParameters, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersList. -func (in *ResourceClassParametersList) DeepCopy() *ResourceClassParametersList { - if in == nil { - return nil - } - out := new(ResourceClassParametersList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceClassParametersList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersReference. -func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersReference { - if in == nil { - return nil - } - out := new(ResourceClassParametersReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) { - *out = *in - in.ResourceFilterModel.DeepCopyInto(&out.ResourceFilterModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter. -func (in *ResourceFilter) DeepCopy() *ResourceFilter { - if in == nil { - return nil - } - out := new(ResourceFilter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFilterModel) DeepCopyInto(out *ResourceFilterModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesFilter) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilterModel. -func (in *ResourceFilterModel) DeepCopy() *ResourceFilterModel { - if in == nil { - return nil - } - out := new(ResourceFilterModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) { - *out = *in - if in.StructuredData != nil { - in, out := &in.StructuredData, &out.StructuredData - *out = new(StructuredResourceHandle) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHandle. -func (in *ResourceHandle) DeepCopy() *ResourceHandle { - if in == nil { - return nil - } - out := new(ResourceHandle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceModel) DeepCopyInto(out *ResourceModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesResources) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceModel. -func (in *ResourceModel) DeepCopy() *ResourceModel { - if in == nil { - return nil - } - out := new(ResourceModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequest) DeepCopyInto(out *ResourceRequest) { - *out = *in - in.VendorParameters.DeepCopyInto(&out.VendorParameters) - in.ResourceRequestModel.DeepCopyInto(&out.ResourceRequestModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequest. -func (in *ResourceRequest) DeepCopy() *ResourceRequest { - if in == nil { - return nil - } - out := new(ResourceRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequestModel) DeepCopyInto(out *ResourceRequestModel) { - *out = *in - if in.NamedResources != nil { - in, out := &in.NamedResources, &out.NamedResources - *out = new(NamedResourcesRequest) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequestModel. -func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel { - if in == nil { - return nil - } - out := new(ResourceRequestModel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.ResourceModel.DeepCopyInto(&out.ResourceModel) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. -func (in *ResourceSlice) DeepCopy() *ResourceSlice { - if in == nil { - return nil - } - out := new(ResourceSlice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceSlice) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceSlice, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. -func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { - if in == nil { - return nil - } - out := new(ResourceSliceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceSliceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) { - *out = *in - in.VendorClassParameters.DeepCopyInto(&out.VendorClassParameters) - in.VendorClaimParameters.DeepCopyInto(&out.VendorClaimParameters) - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]DriverAllocationResult, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuredResourceHandle. -func (in *StructuredResourceHandle) DeepCopy() *StructuredResourceHandle { - if in == nil { - return nil - } - out := new(StructuredResourceHandle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VendorParameters) DeepCopyInto(out *VendorParameters) { - *out = *in - in.Parameters.DeepCopyInto(&out.Parameters) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VendorParameters. -func (in *VendorParameters) DeepCopy() *VendorParameters { - if in == nil { - return nil - } - out := new(VendorParameters) - in.DeepCopyInto(out) - return out -} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/doc.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/doc.go new file mode 100644 index 000000000..ffc21307d --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName=resource.k8s.io + +// Package v1alpha3 is the v1alpha3 version of the resource API. +package v1alpha3 // import "k8s.io/api/resource/v1alpha3" diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go new file mode 100644 index 000000000..540f7b818 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -0,0 +1,8488 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1alpha3/generated.proto + +package v1alpha3 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{1} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{2} +} +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) +} +func (m *BasicDevice) XXX_Size() int { + return m.Size() +} +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{3} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{4} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{5} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{6} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{7} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{8} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{9} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{10} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{11} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{12} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{13} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{14} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{15} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{16} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{17} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{18} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{19} +} +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) +} +func (m *NetworkDeviceData) XXX_Size() int { + return m.Size() +} +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{20} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{21} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{22} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{23} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{24} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{25} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{26} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{27} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{28} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{29} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{30} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{31} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_66649ee9bbcd89d2, []int{32} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1alpha3.AllocatedDeviceStatus") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1alpha3.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1alpha3.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1alpha3.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1alpha3.DeviceAttribute") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1alpha3.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1alpha3.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1alpha3.NetworkDeviceData") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1alpha3.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1alpha3.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2) +} + +var fileDescriptor_66649ee9bbcd89d2 = []byte{ + // 2030 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0x1c, 0x57, + 0xdd, 0xb3, 0xe3, 0xcf, 0xdf, 0xfa, 0x2b, 0x2f, 0xa4, 0x38, 0xa6, 0xec, 0x3a, 0x53, 0x04, 0x4e, + 0x9b, 0xee, 0x36, 0x4e, 0xd5, 0x16, 0xc2, 0x01, 0x8f, 0xed, 0x06, 0x47, 0x89, 0xe3, 0x3c, 0xb7, + 0x11, 0x81, 0x12, 0x78, 0x9e, 0x7d, 0xb6, 0x07, 0xcf, 0xce, 0x4c, 0xe7, 0xbd, 0x71, 0xea, 0x0b, + 0xaa, 0xe0, 0x1e, 0xf1, 0x0f, 0x20, 0x0e, 0x48, 0x48, 0x5c, 0x80, 0xff, 0x00, 0x24, 0x90, 0x88, + 0xe0, 0x12, 0x09, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xce, 0xdc, 0x73, 0x42, 0xef, 0xcd, 0x9b, 0xcf, + 0xdd, 0x71, 0xc6, 0x55, 0xb1, 0xd2, 0xdb, 0xce, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xb8, 0x72, + 0xf8, 0x0e, 0x6b, 0xd9, 0x5e, 0x9b, 0xf8, 0x76, 0x3b, 0xa0, 0xcc, 0x0b, 0x03, 0x8b, 0xb6, 0x8f, + 0xae, 0x12, 0xc7, 0x3f, 0x20, 0xd7, 0xda, 0xfb, 0xd4, 0xa5, 0x01, 0xe1, 0xb4, 0xd3, 0xf2, 0x03, + 0x8f, 0x7b, 0xe8, 0xe5, 0x88, 0xba, 0x45, 0x7c, 0xbb, 0x15, 0x53, 0xb7, 0x62, 0xea, 0xc5, 0xd7, + 0xf7, 0x6d, 0x7e, 0x10, 0xee, 0xb6, 0x2c, 0xaf, 0xdb, 0xde, 0xf7, 0xf6, 0xbd, 0xb6, 0x64, 0xda, + 0x0d, 0xf7, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x22, 0x61, 0x8b, 0x46, 0x46, 0xb5, 0xe5, 0x05, 0x42, + 0x6d, 0x51, 0xe1, 0xe2, 0x9b, 0x29, 0x4d, 0x97, 0x58, 0x07, 0xb6, 0x4b, 0x83, 0xe3, 0xb6, 0x7f, + 0xb8, 0x9f, 0xb7, 0xf7, 0x34, 0x5c, 0xac, 0xdd, 0xa5, 0x9c, 0x0c, 0xd3, 0xd5, 0x2e, 0xe3, 0x0a, + 0x42, 0x97, 0xdb, 0xdd, 0x41, 0x35, 0x6f, 0x3d, 0x8f, 0x81, 0x59, 0x07, 0xb4, 0x4b, 0x8a, 0x7c, + 0xc6, 0xaf, 0x75, 0xb8, 0xb0, 0xea, 0x38, 0x9e, 0x25, 0x60, 0xeb, 0xf4, 0xc8, 0xb6, 0xe8, 0x0e, + 0x27, 0x3c, 0x64, 0xe8, 0xeb, 0x30, 0xde, 0x09, 0xec, 0x23, 0x1a, 0x2c, 0x68, 0x4b, 0xda, 0xf2, + 0x94, 0x39, 0xfb, 0xb8, 0xd7, 0x1c, 0xe9, 0xf7, 0x9a, 0xe3, 0xeb, 0x12, 0x8a, 0x15, 0x16, 0x2d, + 0xc1, 0xa8, 0xef, 0x79, 0xce, 0x42, 0x4d, 0x52, 0x4d, 0x2b, 0xaa, 0xd1, 0x6d, 0xcf, 0x73, 0xb0, + 0xc4, 0x48, 0x49, 0x52, 0xf2, 0x82, 0x5e, 0x90, 0x24, 0xa1, 0x58, 0x61, 0x91, 0x05, 0x60, 0x79, + 0x6e, 0xc7, 0xe6, 0xb6, 0xe7, 0xb2, 0x85, 0xd1, 0x25, 0x7d, 0xb9, 0xbe, 0xd2, 0x6e, 0xa5, 0x61, + 0x4e, 0x0e, 0xd6, 0xf2, 0x0f, 0xf7, 0x05, 0x80, 0xb5, 0x84, 0xff, 0x5a, 0x47, 0x57, 0x5b, 0x6b, + 0x31, 0x9f, 0x89, 0x94, 0x70, 0x48, 0x40, 0x0c, 0x67, 0xc4, 0xa2, 0x3b, 0x30, 0xda, 0x21, 0x9c, + 0x2c, 0x8c, 0x2d, 0x69, 0xcb, 0xf5, 0x95, 0xd7, 0x4b, 0xc5, 0x2b, 0xbf, 0xb5, 0x30, 0x79, 0xb8, + 0xf1, 0x11, 0xa7, 0x2e, 0x13, 0xc2, 0x93, 0xd3, 0xad, 0x13, 0x4e, 0xb0, 0x14, 0x84, 0x76, 0xa1, + 0xee, 0x52, 0xfe, 0xd0, 0x0b, 0x0e, 0x05, 0x70, 0x61, 0x5c, 0xca, 0xcd, 0x9a, 0x3d, 0x98, 0x9d, + 0xad, 0x2d, 0xc5, 0x20, 0xcf, 0x2d, 0xd8, 0xcc, 0xb9, 0x7e, 0xaf, 0x59, 0xdf, 0x4a, 0xe5, 0xe0, + 0xac, 0x50, 0xe3, 0xef, 0x1a, 0xcc, 0xab, 0x28, 0xd9, 0x9e, 0x8b, 0x29, 0x0b, 0x1d, 0x8e, 0x7e, + 0x04, 0x13, 0x91, 0xe3, 0x98, 0x8c, 0x50, 0x7d, 0xe5, 0xcd, 0x93, 0x95, 0x46, 0xda, 0x8a, 0x62, + 0xcc, 0x39, 0x75, 0xa6, 0x89, 0x08, 0xcf, 0x70, 0x2c, 0x15, 0xdd, 0x83, 0x69, 0xd7, 0xeb, 0xd0, + 0x1d, 0xea, 0x50, 0x8b, 0x7b, 0x81, 0x8c, 0x5e, 0x7d, 0x65, 0x29, 0xab, 0x45, 0xd4, 0x8a, 0xf0, + 0xff, 0x56, 0x86, 0xce, 0x9c, 0xef, 0xf7, 0x9a, 0xd3, 0x59, 0x08, 0xce, 0xc9, 0x31, 0x3e, 0xd5, + 0xa1, 0x6e, 0x12, 0x66, 0x5b, 0x91, 0x46, 0xf4, 0x53, 0x00, 0xc2, 0x79, 0x60, 0xef, 0x86, 0x5c, + 0x9e, 0x45, 0xc4, 0xfd, 0x9b, 0x27, 0x9f, 0x25, 0xc3, 0xde, 0x5a, 0x4d, 0x78, 0x37, 0x5c, 0x1e, + 0x1c, 0x9b, 0xaf, 0xc4, 0x19, 0x90, 0x22, 0x7e, 0xf6, 0xaf, 0xe6, 0xcc, 0xdd, 0x90, 0x38, 0xf6, + 0x9e, 0x4d, 0x3b, 0x5b, 0xa4, 0x4b, 0x71, 0x46, 0x23, 0x3a, 0x82, 0x49, 0x8b, 0xf8, 0xc4, 0xb2, + 0xf9, 0xf1, 0x42, 0x4d, 0x6a, 0x7f, 0xbb, 0xba, 0xf6, 0x35, 0xc5, 0x19, 0xe9, 0xbe, 0xa4, 0x74, + 0x4f, 0xc6, 0xe0, 0x41, 0xcd, 0x89, 0xae, 0x45, 0x07, 0xe6, 0x0a, 0xb6, 0xa3, 0x79, 0xd0, 0x0f, + 0xe9, 0x71, 0x54, 0x71, 0x58, 0xfc, 0x44, 0x6b, 0x30, 0x76, 0x44, 0x9c, 0x90, 0xca, 0xfa, 0xca, + 0x27, 0x6c, 0x79, 0x8c, 0x63, 0xa9, 0x38, 0xe2, 0xfd, 0x56, 0xed, 0x1d, 0x6d, 0xf1, 0x10, 0x66, + 0x72, 0xb6, 0x0e, 0xd1, 0xb5, 0x9e, 0xd7, 0xd5, 0x3a, 0xa9, 0xf6, 0x52, 0xe5, 0x77, 0x43, 0xe2, + 0x72, 0x9b, 0x1f, 0x67, 0x94, 0x19, 0x37, 0xe0, 0xdc, 0xda, 0xc6, 0x2d, 0xd5, 0x4f, 0x54, 0xdc, + 0xd1, 0x0a, 0x00, 0xfd, 0xc8, 0x0f, 0x28, 0x13, 0xb5, 0xa4, 0xba, 0x4a, 0x52, 0xae, 0x1b, 0x09, + 0x06, 0x67, 0xa8, 0x8c, 0x23, 0x50, 0x5d, 0x42, 0xf4, 0x19, 0x97, 0x74, 0xa9, 0xe2, 0x4b, 0x2a, + 0x51, 0xfa, 0x54, 0x62, 0xd0, 0x4d, 0x18, 0xdb, 0x15, 0x91, 0x51, 0xe6, 0x5f, 0xae, 0x1c, 0x44, + 0x73, 0xaa, 0xdf, 0x6b, 0x8e, 0x49, 0x00, 0x8e, 0x44, 0x18, 0x8f, 0x6a, 0xf0, 0xd5, 0x62, 0xc1, + 0xac, 0x79, 0xee, 0x9e, 0xbd, 0x1f, 0x06, 0xf2, 0x03, 0x7d, 0x07, 0xc6, 0x23, 0x91, 0xca, 0xa2, + 0xe5, 0xb8, 0xab, 0xed, 0x48, 0xe8, 0xb3, 0x5e, 0xf3, 0xa5, 0x22, 0x6b, 0x84, 0xc1, 0x8a, 0x0f, + 0x2d, 0xc3, 0x64, 0x40, 0x3f, 0x0c, 0x29, 0xe3, 0x4c, 0xe6, 0xdd, 0x94, 0x39, 0x2d, 0x52, 0x07, + 0x2b, 0x18, 0x4e, 0xb0, 0xe8, 0x63, 0x0d, 0xce, 0x47, 0x55, 0x99, 0xb3, 0x41, 0x55, 0xe4, 0xd5, + 0x2a, 0x39, 0x91, 0x63, 0x34, 0xbf, 0xa2, 0x8c, 0x3d, 0x3f, 0x04, 0x89, 0x87, 0xa9, 0x32, 0xfe, + 0xa3, 0xc1, 0x4b, 0xc3, 0x3b, 0x08, 0xda, 0x83, 0x89, 0x40, 0xfe, 0x8a, 0x8b, 0xf7, 0x7a, 0x15, + 0x83, 0xd4, 0x31, 0xcb, 0xfb, 0x51, 0xf4, 0xcd, 0x70, 0x2c, 0x1c, 0x59, 0x30, 0x6e, 0x49, 0x9b, + 0x54, 0x95, 0x5e, 0x3f, 0x5d, 0xbf, 0xcb, 0x7b, 0x20, 0x19, 0x42, 0x11, 0x18, 0x2b, 0xd1, 0xc6, + 0x6f, 0x35, 0x98, 0x2b, 0x54, 0x11, 0x6a, 0x80, 0x6e, 0xbb, 0x5c, 0xa6, 0x95, 0x1e, 0xc5, 0x68, + 0xd3, 0xe5, 0xf7, 0x44, 0xb2, 0x63, 0x81, 0x40, 0x97, 0x60, 0x74, 0x57, 0x8c, 0x40, 0x11, 0x8e, + 0x49, 0x73, 0xa6, 0xdf, 0x6b, 0x4e, 0x99, 0x9e, 0xe7, 0x44, 0x14, 0x12, 0x85, 0xbe, 0x01, 0xe3, + 0x8c, 0x07, 0xb6, 0xbb, 0xbf, 0x30, 0x2a, 0xb3, 0x45, 0xf6, 0xfb, 0x1d, 0x09, 0x89, 0xc8, 0x14, + 0x1a, 0xbd, 0x0a, 0x13, 0x47, 0x34, 0x90, 0x15, 0x32, 0x26, 0x29, 0x65, 0x37, 0xbd, 0x17, 0x81, + 0x22, 0xd2, 0x98, 0xc0, 0xf8, 0x7d, 0x0d, 0xea, 0x2a, 0x80, 0x0e, 0xb1, 0xbb, 0xe8, 0x7e, 0x26, + 0xa1, 0xa2, 0x48, 0xbc, 0x76, 0x8a, 0x48, 0x98, 0xf3, 0x71, 0xf3, 0x1a, 0x92, 0x81, 0x14, 0xea, + 0x96, 0xe7, 0x32, 0x1e, 0x10, 0xdb, 0x55, 0xe9, 0x9a, 0x6f, 0x10, 0x27, 0x25, 0x9e, 0x62, 0x33, + 0xcf, 0x2b, 0x05, 0xf5, 0x14, 0xc6, 0x70, 0x56, 0x2e, 0x7a, 0x90, 0x84, 0x58, 0x97, 0x1a, 0xde, + 0xaa, 0xa4, 0x41, 0x1c, 0xbe, 0x5a, 0x74, 0xff, 0xaa, 0xc1, 0x42, 0x19, 0x53, 0xae, 0x1e, 0xb5, + 0xcf, 0x54, 0x8f, 0xb5, 0xb3, 0xab, 0xc7, 0x3f, 0x69, 0x99, 0xd8, 0x33, 0x86, 0x7e, 0x0c, 0x93, + 0x62, 0x19, 0x92, 0xbb, 0x4d, 0xb4, 0x0e, 0xbc, 0x51, 0x6d, 0x75, 0xba, 0xb3, 0xfb, 0x13, 0x6a, + 0xf1, 0xdb, 0x94, 0x93, 0xb4, 0x19, 0xa7, 0x30, 0x9c, 0x48, 0x15, 0x9b, 0x13, 0xf3, 0xa9, 0x75, + 0x9a, 0x41, 0x24, 0x4d, 0xdb, 0xf1, 0xa9, 0x95, 0xf6, 0x6b, 0xf1, 0x85, 0xa5, 0x20, 0xe3, 0x97, + 0xd9, 0x60, 0x30, 0x96, 0x0f, 0x46, 0x99, 0x8b, 0xb5, 0xb3, 0x73, 0xf1, 0x1f, 0x93, 0x56, 0x20, + 0xed, 0xbb, 0x65, 0x33, 0x8e, 0x3e, 0x18, 0x70, 0x73, 0xab, 0x9a, 0x9b, 0x05, 0xb7, 0x74, 0x72, + 0x52, 0x65, 0x31, 0x24, 0xe3, 0xe2, 0x2d, 0x18, 0xb3, 0x39, 0xed, 0xc6, 0xf5, 0x75, 0xb9, 0xb2, + 0x8f, 0xcd, 0x19, 0x25, 0x75, 0x6c, 0x53, 0xf0, 0xe3, 0x48, 0x8c, 0xf1, 0x24, 0x7f, 0x02, 0xe1, + 0x7b, 0xf4, 0x43, 0x98, 0x62, 0x6a, 0x22, 0xc7, 0x5d, 0xe2, 0x4a, 0x15, 0x3d, 0xc9, 0x7a, 0x77, + 0x4e, 0xa9, 0x9a, 0x8a, 0x21, 0x0c, 0xa7, 0x12, 0x33, 0x15, 0x5c, 0x3b, 0x55, 0x05, 0x17, 0xe2, + 0x5f, 0x5a, 0xc1, 0x01, 0x0c, 0x0b, 0x20, 0xfa, 0x01, 0x8c, 0x7b, 0x3e, 0xf9, 0x30, 0xa4, 0x2a, + 0x2a, 0xcf, 0xd9, 0xe0, 0xee, 0x48, 0xda, 0x61, 0x69, 0x02, 0x42, 0x67, 0x84, 0xc6, 0x4a, 0xa4, + 0xf1, 0x48, 0x83, 0xf9, 0x62, 0x33, 0x3b, 0x45, 0xb7, 0xd8, 0x86, 0xd9, 0x2e, 0xe1, 0xd6, 0x41, + 0x32, 0x50, 0xd4, 0x5d, 0x69, 0xb9, 0xdf, 0x6b, 0xce, 0xde, 0xce, 0x61, 0x9e, 0xf5, 0x9a, 0xe8, + 0xdd, 0xd0, 0x71, 0x8e, 0xf3, 0x3b, 0x63, 0x81, 0xdf, 0xf8, 0xb9, 0x0e, 0x33, 0xb9, 0xde, 0x5d, + 0x61, 0x3b, 0x5a, 0x85, 0xb9, 0x4e, 0xea, 0x6c, 0x81, 0x50, 0x66, 0x7c, 0x59, 0x11, 0x67, 0x33, + 0x45, 0xf2, 0x15, 0xe9, 0xf3, 0xa9, 0xa3, 0x7f, 0xee, 0xa9, 0x73, 0x0f, 0x66, 0x49, 0x32, 0xad, + 0x6f, 0x7b, 0x1d, 0xaa, 0x66, 0x65, 0x4b, 0x71, 0xcd, 0xae, 0xe6, 0xb0, 0xcf, 0x7a, 0xcd, 0x2f, + 0x15, 0x67, 0xbc, 0x80, 0xe3, 0x82, 0x14, 0xf4, 0x0a, 0x8c, 0x59, 0x5e, 0xe8, 0x72, 0x39, 0x50, + 0xf5, 0xb4, 0x54, 0xd6, 0x04, 0x10, 0x47, 0x38, 0x74, 0x15, 0xea, 0xa4, 0xd3, 0xb5, 0xdd, 0x55, + 0xcb, 0xa2, 0x8c, 0xc9, 0x6b, 0xdc, 0x64, 0x34, 0xa5, 0x57, 0x53, 0x30, 0xce, 0xd2, 0x18, 0xff, + 0xd5, 0xe2, 0x1d, 0xb1, 0x64, 0x97, 0x41, 0x97, 0xc5, 0x66, 0x24, 0x51, 0x2a, 0x30, 0x99, 0xe5, + 0x46, 0x82, 0x71, 0x8c, 0xcf, 0x5c, 0xb7, 0x6b, 0x95, 0xae, 0xdb, 0x7a, 0x85, 0xeb, 0xf6, 0xe8, + 0x89, 0xd7, 0xed, 0xc2, 0x89, 0xc7, 0x2a, 0x9c, 0xf8, 0x03, 0x98, 0x2d, 0xec, 0xf4, 0x37, 0x41, + 0xb7, 0xa8, 0xa3, 0x8a, 0xee, 0x39, 0xb7, 0xde, 0x81, 0x1b, 0x81, 0x39, 0xd1, 0xef, 0x35, 0xf5, + 0xb5, 0x8d, 0x5b, 0x58, 0x08, 0x31, 0x7e, 0xa7, 0xc1, 0xb9, 0x81, 0x9b, 0x31, 0xba, 0x0e, 0x33, + 0xb6, 0xcb, 0x69, 0xb0, 0x47, 0x2c, 0xba, 0x95, 0xa6, 0xf8, 0x05, 0x75, 0xaa, 0x99, 0xcd, 0x2c, + 0x12, 0xe7, 0x69, 0xd1, 0x45, 0xd0, 0x6d, 0x3f, 0xde, 0xae, 0xa5, 0xb6, 0xcd, 0x6d, 0x86, 0x05, + 0x4c, 0xd4, 0xc3, 0x01, 0x09, 0x3a, 0x0f, 0x49, 0x40, 0x57, 0x3b, 0x1d, 0x71, 0xdf, 0x50, 0x3e, + 0x4d, 0xea, 0xe1, 0xbb, 0x79, 0x34, 0x2e, 0xd2, 0x1b, 0xbf, 0xd1, 0xe0, 0x62, 0x69, 0x27, 0xa9, + 0xfc, 0x80, 0x42, 0x00, 0x7c, 0x12, 0x90, 0x2e, 0xe5, 0x34, 0x60, 0x43, 0xa6, 0x6b, 0x85, 0x77, + 0x89, 0x64, 0x70, 0x6f, 0x27, 0x82, 0x70, 0x46, 0xa8, 0xf1, 0xab, 0x1a, 0xcc, 0x60, 0x15, 0x8f, + 0x68, 0x55, 0xfc, 0xff, 0xaf, 0x0b, 0x77, 0x73, 0xeb, 0xc2, 0x73, 0x52, 0x23, 0x67, 0x5c, 0xd9, + 0xc2, 0x80, 0xee, 0x8b, 0x25, 0x9a, 0xf0, 0x90, 0x55, 0xbb, 0xf8, 0xe4, 0x85, 0x4a, 0xc6, 0x34, + 0x08, 0xd1, 0x37, 0x56, 0x02, 0x8d, 0xbe, 0x06, 0x8d, 0x1c, 0xbd, 0xe8, 0xf4, 0x61, 0x97, 0x06, + 0x98, 0xee, 0xd1, 0x80, 0xba, 0x16, 0x45, 0x57, 0x60, 0x92, 0xf8, 0xf6, 0x8d, 0xc0, 0x0b, 0x7d, + 0x15, 0xd1, 0x64, 0x94, 0xaf, 0x6e, 0x6f, 0x4a, 0x38, 0x4e, 0x28, 0x04, 0x75, 0x6c, 0x91, 0xca, + 0xab, 0xcc, 0x7a, 0x1d, 0xc1, 0x71, 0x42, 0x91, 0xb4, 0xef, 0xd1, 0xd2, 0xf6, 0x6d, 0x82, 0x1e, + 0xda, 0x1d, 0x75, 0x27, 0x78, 0x43, 0x11, 0xe8, 0xef, 0x6f, 0xae, 0x3f, 0xeb, 0x35, 0x2f, 0x95, + 0x3d, 0xfe, 0xf1, 0x63, 0x9f, 0xb2, 0xd6, 0xfb, 0x9b, 0xeb, 0x58, 0x30, 0x1b, 0x7f, 0xd6, 0xe0, + 0x5c, 0xee, 0x90, 0x67, 0xb0, 0xd2, 0x6c, 0xe7, 0x57, 0x9a, 0xd7, 0x4e, 0x11, 0xb2, 0x92, 0xa5, + 0xc6, 0x2e, 0x1c, 0x42, 0x6e, 0x35, 0xef, 0x15, 0x1f, 0xc3, 0x2e, 0x57, 0xbe, 0x39, 0x94, 0xbf, + 0x80, 0x19, 0x7f, 0xab, 0xc1, 0xf9, 0x21, 0x59, 0x84, 0x1e, 0x00, 0xa4, 0x33, 0x66, 0x88, 0xd3, + 0x86, 0x28, 0x1c, 0xb8, 0xe7, 0xce, 0xca, 0x27, 0xaa, 0x14, 0x9a, 0x91, 0x88, 0x18, 0xd4, 0x03, + 0xca, 0x68, 0x70, 0x44, 0x3b, 0xef, 0x7a, 0x81, 0x72, 0xdd, 0xb7, 0x4f, 0xe1, 0xba, 0x81, 0xec, + 0x4d, 0xef, 0x5e, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xd0, 0x83, 0xd4, 0x85, 0xd1, 0xdb, 0xeb, 0xb5, + 0x4a, 0x27, 0xca, 0x3f, 0x1b, 0x9f, 0xe0, 0xcc, 0x7f, 0x6a, 0x70, 0x21, 0x67, 0xe4, 0x7b, 0xb4, + 0xeb, 0x3b, 0x84, 0xd3, 0x33, 0x68, 0x46, 0xf7, 0x73, 0xcd, 0xe8, 0xed, 0x53, 0x78, 0x32, 0x36, + 0xb2, 0xf4, 0x16, 0xf3, 0x0f, 0x0d, 0x2e, 0x0e, 0xe5, 0x38, 0x83, 0xe2, 0xfa, 0x5e, 0xbe, 0xb8, + 0xae, 0x7d, 0x86, 0x73, 0x95, 0xdf, 0x1c, 0x2e, 0x96, 0xfa, 0xe1, 0x0b, 0x39, 0x3d, 0x8c, 0x3f, + 0x68, 0x30, 0x1d, 0x53, 0x8a, 0x75, 0xa9, 0xc2, 0xce, 0xbc, 0x02, 0xa0, 0xfe, 0x30, 0x89, 0x6f, + 0xf7, 0x7a, 0x6a, 0xf7, 0x8d, 0x04, 0x83, 0x33, 0x54, 0xe8, 0x26, 0xa0, 0xd8, 0xc2, 0x1d, 0x47, + 0x2e, 0x05, 0x62, 0xf5, 0xd4, 0x25, 0xef, 0xa2, 0xe2, 0x45, 0x78, 0x80, 0x02, 0x0f, 0xe1, 0x32, + 0xfe, 0xa2, 0xa5, 0x73, 0x5b, 0x82, 0x5f, 0x54, 0xcf, 0x4b, 0xe3, 0x4a, 0x3d, 0x9f, 0x9d, 0x3b, + 0x92, 0xf2, 0x85, 0x9d, 0x3b, 0xd2, 0xba, 0x92, 0x92, 0x78, 0xa4, 0x17, 0x4e, 0x21, 0x4b, 0xa1, + 0xea, 0x96, 0x77, 0x2b, 0xf3, 0x37, 0x59, 0x7d, 0xe5, 0xd5, 0x6a, 0xe6, 0x88, 0x34, 0x1d, 0xba, + 0xe3, 0x5f, 0x81, 0x49, 0xd7, 0xeb, 0x44, 0xfb, 0x70, 0x61, 0xbb, 0xd8, 0x52, 0x70, 0x9c, 0x50, + 0x0c, 0xfc, 0x91, 0x33, 0xfa, 0xf9, 0xfc, 0x91, 0x23, 0x37, 0x22, 0xc7, 0x11, 0x04, 0xf1, 0xf5, + 0x21, 0xdd, 0x88, 0x14, 0x1c, 0x27, 0x14, 0xe8, 0x4e, 0x3a, 0x5f, 0xc6, 0x65, 0x4c, 0xbe, 0x56, + 0x65, 0x44, 0x97, 0x0f, 0x14, 0xd3, 0x7c, 0xfc, 0xb4, 0x31, 0xf2, 0xe4, 0x69, 0x63, 0xe4, 0x93, + 0xa7, 0x8d, 0x91, 0x8f, 0xfb, 0x0d, 0xed, 0x71, 0xbf, 0xa1, 0x3d, 0xe9, 0x37, 0xb4, 0x4f, 0xfa, + 0x0d, 0xed, 0xd3, 0x7e, 0x43, 0xfb, 0xc5, 0xbf, 0x1b, 0x23, 0xdf, 0x7f, 0xf9, 0xa4, 0x7f, 0x95, + 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x60, 0x85, 0x64, 0x74, 0x1e, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]resource.Quantity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkDeviceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]resource.Quantity) + } + var mapkey QualifiedName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InterfaceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllNodes = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.proto new file mode 100644 index 000000000..e802a0143 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/generated.proto @@ -0,0 +1,884 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1alpha3; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1alpha3"; + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +message NetworkDeviceData { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + optional string interfaceName = 1; + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + repeated string ips = 2; + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + optional string hardwareAddress = 3; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + repeated AllocatedDeviceStatus devices = 4; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/register.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/register.go new file mode 100644 index 000000000..8573758e3 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "resource.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha3"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DeviceClass{}, + &DeviceClassList{}, + &ResourceClaim{}, + &ResourceClaimList{}, + &ResourceClaimTemplate{}, + &ResourceClaimTemplateList{}, + &ResourceSlice{}, + &ResourceSliceList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/types.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/types.go new file mode 100644 index 000000000..fb4d7041d --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/types.go @@ -0,0 +1,1081 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSlice + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]resource.Quantity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// Limit for the sum of the number of entries in both attributes and capacity. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name. +const DeviceMaxDomainLength = 63 + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceSliceList + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaim + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` + + // Potential future extension, ignored by older schedulers. This is + // fine because scoring allows users to define a preference, without + // making it a hard requirement. + // + // Score *SomeScoringStruct +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector +// evaluation. +// +// There is no overall budget for selecting a device, so the actual time +// required for that is proportional to the number of CEL selectors and how +// often they need to be evaluated, which can vary depending on several factors +// (number of devices, cluster utilization, additional constraints). +// +// Validation against this limit and [CELSelectorExpressionMaxLength] happens +// only when setting an expression for the first time or when changing it. If +// the limits are changed in a future Kubernetes release, existing users are +// guaranteed that existing expressions will continue to be valid. +// +// However, the kube-scheduler also applies this cost limit at runtime, so it +// could happen that a valid expression fails at runtime after an up- or +// downgrade. This can also happen without version skew when the cost estimate +// underestimated the actual cost. That this might happen is the reason why +// kube-scheduler enforces the runtime limit instead of relying on validation. +// +// According to +// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22, +// this gives roughly 0.1 second for each expression evaluation. +// However, this depends on how fast the machine is. +const CELSelectorExpressionMaxCost = 1000000 + +// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string. +const CELSelectorExpressionMaxLength = 10 * 1024 + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` + + // Potential future extension, not part of the current design: + // A CEL expression which compares different devices and returns + // true if they match. + // + // Because it would be part of a one-of, old schedulers will not + // accidentally ignore this additional, for them unknown match + // criteria. + // + // MatchExpression string +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// OpaqueParametersMaxLength is the maximum length of the raw data in an +// [OpaqueDeviceConfiguration.Parameters] field. +const OpaqueParametersMaxLength = 10 * 1024 + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` +} + +// ResourceClaimReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 256 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimList + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClass + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,DeviceClassList + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplate + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:prerelease-lifecycle-gen:replacement=resource.k8s.io,v1beta1,ResourceClaimTemplateList + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +type AllocatedDeviceStatus struct { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,3,rep,name=device"` + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"` + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"` + + // NetworkData contains network-related information specific to the device. + // + // +optional + NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"` +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +type NetworkDeviceData struct { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"` + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"` + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"` +} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go new file mode 100644 index 000000000..b41609d11 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -0,0 +1,377 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocatedDeviceStatus = map[string]string{ + "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.", + "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", + "networkData": "NetworkData contains network-related information specific to the device.", +} + +func (AllocatedDeviceStatus) SwaggerDoc() map[string]string { + return map_AllocatedDeviceStatus +} + +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_NetworkDeviceData = map[string]string{ + "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.", + "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.", + "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.", + "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.", +} + +func (NetworkDeviceData) SwaggerDoc() map[string]string { + return map_NetworkDeviceData +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000..07ba47b59 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,866 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + corev1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Data.DeepCopyInto(&out.Data) + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(NetworkDeviceData) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus. +func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus { + if in == nil { + return nil + } + out := new(AllocatedDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { + if in == nil { + return nil + } + out := new(BasicDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { + if in == nil { + return nil + } + out := new(CELDeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { + if in == nil { + return nil + } + out := new(DeviceAllocationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { + *out = *in + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { + if in == nil { + return nil + } + out := new(DeviceAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { + *out = *in + if in.IntValue != nil { + in, out := &in.IntValue, &out.IntValue + *out = new(int64) + **out = **in + } + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.VersionValue != nil { + in, out := &in.VersionValue, &out.VersionValue + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { + if in == nil { + return nil + } + out := new(DeviceAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { + if in == nil { + return nil + } + out := new(DeviceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { + if in == nil { + return nil + } + out := new(DeviceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { + if in == nil { + return nil + } + out := new(DeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { + if in == nil { + return nil + } + out := new(DeviceConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { + if in == nil { + return nil + } + out := new(DeviceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { + *out = *in + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil + } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) { + *out = *in + if in.IPs != nil { + in, out := &in.IPs, &out.IPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData. +func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData { + if in == nil { + return nil + } + out := new(NetworkDeviceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { + if in == nil { + return nil + } + out := new(OpaqueDeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]AllocatedDeviceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. +func (in *ResourceSlice) DeepCopy() *ResourceSlice { + if in == nil { + return nil + } + out := new(ResourceSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { + if in == nil { + return nil + } + out := new(ResourceSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { + *out = *in + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { + if in == nil { + return nil + } + out := new(ResourceSliceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..9f57ab670 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,218 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *DeviceClass) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClass"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *DeviceClassList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "DeviceClassList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaim) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaim"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplate"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceClaimTemplateList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceSlice) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSlice"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *ResourceSliceList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "resource.k8s.io", Version: "v1beta1", Kind: "ResourceSliceList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/doc.go b/constraint/vendor/k8s.io/api/resource/v1beta1/doc.go new file mode 100644 index 000000000..88c35c6ca --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:prerelease-lifecycle-gen=true +// +groupName=resource.k8s.io + +// Package v1beta1 is the v1beta1 version of the resource API. +package v1beta1 // import "k8s.io/api/resource/v1beta1" diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/resource/v1beta1/generated.pb.go new file mode 100644 index 000000000..df4e68f30 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/generated.pb.go @@ -0,0 +1,8655 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/resource/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} } +func (*AllocatedDeviceStatus) ProtoMessage() {} +func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{0} +} +func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src) +} +func (m *AllocatedDeviceStatus) XXX_Size() int { + return m.Size() +} +func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo + +func (m *AllocationResult) Reset() { *m = AllocationResult{} } +func (*AllocationResult) ProtoMessage() {} +func (*AllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{1} +} +func (m *AllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocationResult.Merge(m, src) +} +func (m *AllocationResult) XXX_Size() int { + return m.Size() +} +func (m *AllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_AllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocationResult proto.InternalMessageInfo + +func (m *BasicDevice) Reset() { *m = BasicDevice{} } +func (*BasicDevice) ProtoMessage() {} +func (*BasicDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{2} +} +func (m *BasicDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BasicDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicDevice.Merge(m, src) +} +func (m *BasicDevice) XXX_Size() int { + return m.Size() +} +func (m *BasicDevice) XXX_DiscardUnknown() { + xxx_messageInfo_BasicDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicDevice proto.InternalMessageInfo + +func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} } +func (*CELDeviceSelector) ProtoMessage() {} +func (*CELDeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{3} +} +func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CELDeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_CELDeviceSelector.Merge(m, src) +} +func (m *CELDeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *CELDeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{4} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} } +func (*DeviceAllocationConfiguration) ProtoMessage() {} +func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{5} +} +func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src) +} +func (m *DeviceAllocationConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo + +func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} } +func (*DeviceAllocationResult) ProtoMessage() {} +func (*DeviceAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{6} +} +func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAllocationResult.Merge(m, src) +} +func (m *DeviceAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo + +func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} } +func (*DeviceAttribute) ProtoMessage() {} +func (*DeviceAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{7} +} +func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceAttribute.Merge(m, src) +} +func (m *DeviceAttribute) XXX_Size() int { + return m.Size() +} +func (m *DeviceAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceAttribute.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo + +func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} } +func (*DeviceCapacity) ProtoMessage() {} +func (*DeviceCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{8} +} +func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceCapacity.Merge(m, src) +} +func (m *DeviceCapacity) XXX_Size() int { + return m.Size() +} +func (m *DeviceCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo + +func (m *DeviceClaim) Reset() { *m = DeviceClaim{} } +func (*DeviceClaim) ProtoMessage() {} +func (*DeviceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{9} +} +func (m *DeviceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaim.Merge(m, src) +} +func (m *DeviceClaim) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo + +func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} } +func (*DeviceClaimConfiguration) ProtoMessage() {} +func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{10} +} +func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src) +} +func (m *DeviceClaimConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo + +func (m *DeviceClass) Reset() { *m = DeviceClass{} } +func (*DeviceClass) ProtoMessage() {} +func (*DeviceClass) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{11} +} +func (m *DeviceClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClass.Merge(m, src) +} +func (m *DeviceClass) XXX_Size() int { + return m.Size() +} +func (m *DeviceClass) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClass.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClass proto.InternalMessageInfo + +func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} } +func (*DeviceClassConfiguration) ProtoMessage() {} +func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{12} +} +func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassConfiguration.Merge(m, src) +} +func (m *DeviceClassConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo + +func (m *DeviceClassList) Reset() { *m = DeviceClassList{} } +func (*DeviceClassList) ProtoMessage() {} +func (*DeviceClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{13} +} +func (m *DeviceClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassList.Merge(m, src) +} +func (m *DeviceClassList) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassList) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo + +func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} } +func (*DeviceClassSpec) ProtoMessage() {} +func (*DeviceClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{14} +} +func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceClassSpec.Merge(m, src) +} +func (m *DeviceClassSpec) XXX_Size() int { + return m.Size() +} +func (m *DeviceClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo + +func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} } +func (*DeviceConfiguration) ProtoMessage() {} +func (*DeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{15} +} +func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConfiguration.Merge(m, src) +} +func (m *DeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *DeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo + +func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} } +func (*DeviceConstraint) ProtoMessage() {} +func (*DeviceConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{16} +} +func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceConstraint.Merge(m, src) +} +func (m *DeviceConstraint) XXX_Size() int { + return m.Size() +} +func (m *DeviceConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo + +func (m *DeviceRequest) Reset() { *m = DeviceRequest{} } +func (*DeviceRequest) ProtoMessage() {} +func (*DeviceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{17} +} +func (m *DeviceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequest.Merge(m, src) +} +func (m *DeviceRequest) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo + +func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} } +func (*DeviceRequestAllocationResult) ProtoMessage() {} +func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{18} +} +func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src) +} +func (m *DeviceRequestAllocationResult) XXX_Size() int { + return m.Size() +} +func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo + +func (m *DeviceSelector) Reset() { *m = DeviceSelector{} } +func (*DeviceSelector) ProtoMessage() {} +func (*DeviceSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{19} +} +func (m *DeviceSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeviceSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeviceSelector.Merge(m, src) +} +func (m *DeviceSelector) XXX_Size() int { + return m.Size() +} +func (m *DeviceSelector) XXX_DiscardUnknown() { + xxx_messageInfo_DeviceSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo + +func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} } +func (*NetworkDeviceData) ProtoMessage() {} +func (*NetworkDeviceData) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{20} +} +func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkDeviceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkDeviceData.Merge(m, src) +} +func (m *NetworkDeviceData) XXX_Size() int { + return m.Size() +} +func (m *NetworkDeviceData) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo + +func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} } +func (*OpaqueDeviceConfiguration) ProtoMessage() {} +func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{21} +} +func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src) +} +func (m *OpaqueDeviceConfiguration) XXX_Size() int { + return m.Size() +} +func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo + +func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } +func (*ResourceClaim) ProtoMessage() {} +func (*ResourceClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{22} +} +func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaim.Merge(m, src) +} +func (m *ResourceClaim) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaim) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo + +func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } +func (*ResourceClaimConsumerReference) ProtoMessage() {} +func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{23} +} +func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src) +} +func (m *ResourceClaimConsumerReference) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo + +func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } +func (*ResourceClaimList) ProtoMessage() {} +func (*ResourceClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{24} +} +func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimList.Merge(m, src) +} +func (m *ResourceClaimList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo + +func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } +func (*ResourceClaimSpec) ProtoMessage() {} +func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{25} +} +func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimSpec.Merge(m, src) +} +func (m *ResourceClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo + +func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } +func (*ResourceClaimStatus) ProtoMessage() {} +func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{26} +} +func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimStatus.Merge(m, src) +} +func (m *ResourceClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo + +func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } +func (*ResourceClaimTemplate) ProtoMessage() {} +func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{27} +} +func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplate.Merge(m, src) +} +func (m *ResourceClaimTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo + +func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } +func (*ResourceClaimTemplateList) ProtoMessage() {} +func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{28} +} +func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src) +} +func (m *ResourceClaimTemplateList) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo + +func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } +func (*ResourceClaimTemplateSpec) ProtoMessage() {} +func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{29} +} +func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src) +} +func (m *ResourceClaimTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo + +func (m *ResourcePool) Reset() { *m = ResourcePool{} } +func (*ResourcePool) ProtoMessage() {} +func (*ResourcePool) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{30} +} +func (m *ResourcePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourcePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourcePool.Merge(m, src) +} +func (m *ResourcePool) XXX_Size() int { + return m.Size() +} +func (m *ResourcePool) XXX_DiscardUnknown() { + xxx_messageInfo_ResourcePool.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourcePool proto.InternalMessageInfo + +func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } +func (*ResourceSlice) ProtoMessage() {} +func (*ResourceSlice) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{31} +} +func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSlice) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSlice.Merge(m, src) +} +func (m *ResourceSlice) XXX_Size() int { + return m.Size() +} +func (m *ResourceSlice) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSlice.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo + +func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } +func (*ResourceSliceList) ProtoMessage() {} +func (*ResourceSliceList) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{32} +} +func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceList.Merge(m, src) +} +func (m *ResourceSliceList) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo + +func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } +func (*ResourceSliceSpec) ProtoMessage() {} +func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_ba331e3ec6484c27, []int{33} +} +func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSliceSpec.Merge(m, src) +} +func (m *ResourceSliceSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceSliceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1beta1.AllocatedDeviceStatus") + proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1beta1.AllocationResult") + proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1beta1.BasicDevice") + proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.AttributesEntry") + proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.CapacityEntry") + proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1beta1.CELDeviceSelector") + proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1beta1.Device") + proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationConfiguration") + proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationResult") + proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.DeviceAttribute") + proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.DeviceCapacity") + proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1beta1.DeviceClaim") + proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClaimConfiguration") + proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1beta1.DeviceClass") + proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClassConfiguration") + proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1beta1.DeviceClassList") + proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1beta1.DeviceClassSpec") + proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceConfiguration") + proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1beta1.DeviceConstraint") + proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1beta1.DeviceRequest") + proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceRequestAllocationResult") + proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1beta1.DeviceSelector") + proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1beta1.NetworkDeviceData") + proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.OpaqueDeviceConfiguration") + proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1beta1.ResourceClaim") + proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimConsumerReference") + proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimList") + proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimSpec") + proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimStatus") + proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplate") + proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateList") + proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateSpec") + proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1beta1.ResourcePool") + proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1beta1.ResourceSlice") + proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceList") + proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceSpec") +} + +func init() { + proto.RegisterFile("k8s.io/api/resource/v1beta1/generated.proto", fileDescriptor_ba331e3ec6484c27) +} + +var fileDescriptor_ba331e3ec6484c27 = []byte{ + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0x4b, 0x8f, 0x1b, 0x49, + 0x79, 0xda, 0xed, 0x79, 0x7d, 0x9e, 0x57, 0x2a, 0x64, 0x71, 0x26, 0xc2, 0x9e, 0x74, 0x24, 0xf0, + 0x66, 0xb3, 0xed, 0x8d, 0x81, 0x28, 0xca, 0x5e, 0x70, 0xcf, 0xcc, 0x06, 0x43, 0x32, 0x99, 0xad, + 0x61, 0x43, 0xb4, 0x6c, 0x10, 0x35, 0xed, 0x9a, 0x99, 0x66, 0xec, 0x6e, 0xa7, 0xbb, 0x7a, 0xb2, + 0x73, 0x40, 0xa0, 0x3d, 0xaf, 0x10, 0x77, 0xc4, 0x85, 0x03, 0x12, 0x12, 0x42, 0xfc, 0x02, 0x90, + 0x40, 0x88, 0x88, 0x03, 0xac, 0xe0, 0xb2, 0xe2, 0x60, 0x88, 0xf7, 0x07, 0x70, 0xcf, 0x09, 0x55, + 0x75, 0xf5, 0xd3, 0x6e, 0xd3, 0x83, 0x96, 0x51, 0xf6, 0xe6, 0xfe, 0xde, 0xf5, 0xbd, 0xab, 0x0c, + 0xaf, 0x1d, 0xdf, 0xf6, 0x74, 0xcb, 0x69, 0x92, 0x81, 0xd5, 0x74, 0xa9, 0xe7, 0xf8, 0xae, 0x49, + 0x9b, 0x27, 0x37, 0xf7, 0x29, 0x23, 0x37, 0x9b, 0x87, 0xd4, 0xa6, 0x2e, 0x61, 0xb4, 0xab, 0x0f, + 0x5c, 0x87, 0x39, 0xe8, 0x4a, 0x40, 0xac, 0x93, 0x81, 0xa5, 0x87, 0xc4, 0xba, 0x24, 0x5e, 0x7f, + 0xfd, 0xd0, 0x62, 0x47, 0xfe, 0xbe, 0x6e, 0x3a, 0xfd, 0xe6, 0xa1, 0x73, 0xe8, 0x34, 0x05, 0xcf, + 0xbe, 0x7f, 0x20, 0xbe, 0xc4, 0x87, 0xf8, 0x15, 0xc8, 0x5a, 0xd7, 0x12, 0x8a, 0x4d, 0xc7, 0xe5, + 0x4a, 0xb3, 0xfa, 0xd6, 0xbf, 0x12, 0xd3, 0xf4, 0x89, 0x79, 0x64, 0xd9, 0xd4, 0x3d, 0x6d, 0x0e, + 0x8e, 0x0f, 0xd3, 0xd6, 0x9e, 0x85, 0xcb, 0x6b, 0xf6, 0x29, 0x23, 0x93, 0x74, 0x35, 0xf3, 0xb8, + 0x5c, 0xdf, 0x66, 0x56, 0x7f, 0x5c, 0xcd, 0xad, 0xff, 0xc6, 0xe0, 0x99, 0x47, 0xb4, 0x4f, 0xb2, + 0x7c, 0xda, 0xcf, 0x55, 0xb8, 0xd4, 0xee, 0xf5, 0x1c, 0x93, 0xc3, 0xb6, 0xe8, 0x89, 0x65, 0xd2, + 0x3d, 0x46, 0x98, 0xef, 0xa1, 0x2f, 0xc2, 0x5c, 0xd7, 0xb5, 0x4e, 0xa8, 0x5b, 0x55, 0x36, 0x94, + 0xc6, 0xa2, 0xb1, 0xf2, 0x6c, 0x58, 0x9f, 0x19, 0x0d, 0xeb, 0x73, 0x5b, 0x02, 0x8a, 0x25, 0x16, + 0x6d, 0x40, 0x79, 0xe0, 0x38, 0xbd, 0x6a, 0x49, 0x50, 0x2d, 0x49, 0xaa, 0xf2, 0xae, 0xe3, 0xf4, + 0xb0, 0xc0, 0x08, 0x49, 0x42, 0x72, 0x55, 0xcd, 0x48, 0x12, 0x50, 0x2c, 0xb1, 0xc8, 0x04, 0x30, + 0x1d, 0xbb, 0x6b, 0x31, 0xcb, 0xb1, 0xbd, 0x6a, 0x79, 0x43, 0x6d, 0x54, 0x5a, 0x4d, 0x3d, 0x8e, + 0x72, 0x74, 0x30, 0x7d, 0x70, 0x7c, 0xc8, 0x01, 0x9e, 0xce, 0xfd, 0xa7, 0x9f, 0xdc, 0xd4, 0x37, + 0x43, 0x3e, 0x03, 0x49, 0xe1, 0x10, 0x81, 0x3c, 0x9c, 0x10, 0x8b, 0x1e, 0x40, 0xb9, 0x4b, 0x18, + 0xa9, 0xce, 0x6e, 0x28, 0x8d, 0x4a, 0xeb, 0xf5, 0x5c, 0xf1, 0xd2, 0x6f, 0x3a, 0x26, 0x4f, 0xb7, + 0xdf, 0x67, 0xd4, 0xf6, 0xb8, 0xf0, 0xe8, 0x74, 0x5b, 0x84, 0x11, 0x2c, 0x04, 0x21, 0x02, 0x15, + 0x9b, 0xb2, 0xa7, 0x8e, 0x7b, 0xcc, 0x81, 0xd5, 0x39, 0x21, 0x57, 0xd7, 0xa7, 0x24, 0xa7, 0xbe, + 0x23, 0xe9, 0xc5, 0xb1, 0x39, 0x97, 0xb1, 0x3a, 0x1a, 0xd6, 0x2b, 0x3b, 0xb1, 0x18, 0x9c, 0x94, + 0xa9, 0xfd, 0x59, 0x81, 0x35, 0x19, 0x24, 0xcb, 0xb1, 0x31, 0xf5, 0xfc, 0x1e, 0x43, 0xdf, 0x85, + 0xf9, 0xc0, 0x6f, 0x9e, 0x08, 0x50, 0xa5, 0xf5, 0xe5, 0xa9, 0x3a, 0x03, 0x65, 0x59, 0x29, 0xc6, + 0xaa, 0x3c, 0xd1, 0x7c, 0x80, 0xf7, 0x70, 0x28, 0x14, 0x3d, 0x84, 0x25, 0xdb, 0xe9, 0xd2, 0x3d, + 0xda, 0xa3, 0x26, 0x73, 0x5c, 0x11, 0xbb, 0x4a, 0x6b, 0x23, 0xa9, 0x84, 0x57, 0x0a, 0xf7, 0xfe, + 0x4e, 0x82, 0xce, 0x58, 0x1b, 0x0d, 0xeb, 0x4b, 0x49, 0x08, 0x4e, 0xc9, 0xd1, 0xfe, 0xa1, 0x42, + 0xc5, 0x20, 0x9e, 0x65, 0x06, 0x1a, 0xd1, 0x0f, 0x00, 0x08, 0x63, 0xae, 0xb5, 0xef, 0x33, 0x71, + 0x14, 0x1e, 0xf5, 0xdb, 0x53, 0x8f, 0x92, 0xe0, 0xd6, 0xdb, 0x11, 0xeb, 0xb6, 0xcd, 0xdc, 0x53, + 0xe3, 0x5a, 0x18, 0xfe, 0x18, 0xf1, 0xc1, 0x3f, 0xeb, 0xcb, 0x6f, 0xfb, 0xa4, 0x67, 0x1d, 0x58, + 0xb4, 0xbb, 0x43, 0xfa, 0x14, 0x27, 0x14, 0x22, 0x1f, 0x16, 0x4c, 0x32, 0x20, 0xa6, 0xc5, 0x4e, + 0xab, 0x25, 0xa1, 0xfc, 0x56, 0x61, 0xe5, 0x9b, 0x92, 0x31, 0x50, 0x7d, 0x55, 0xaa, 0x5e, 0x08, + 0xc1, 0xe3, 0x8a, 0x23, 0x55, 0xeb, 0xc7, 0xb0, 0x9a, 0x31, 0x1d, 0xad, 0x81, 0x7a, 0x4c, 0x4f, + 0x83, 0x6a, 0xc3, 0xfc, 0x27, 0x32, 0x60, 0xf6, 0x84, 0xf4, 0x7c, 0x2a, 0x6a, 0xab, 0xd2, 0xba, + 0x51, 0x24, 0xc0, 0xa1, 0x50, 0x1c, 0xb0, 0xde, 0x29, 0xdd, 0x56, 0xd6, 0x8f, 0x60, 0x39, 0x65, + 0xea, 0x04, 0x55, 0xed, 0xb4, 0xaa, 0xd7, 0x0a, 0xa8, 0x0a, 0x45, 0x26, 0x34, 0x69, 0x77, 0xe1, + 0xc2, 0xe6, 0xf6, 0x3d, 0xd9, 0x47, 0x64, 0xc4, 0x51, 0x0b, 0x80, 0xbe, 0x3f, 0x70, 0xa9, 0xc7, + 0x6b, 0x48, 0x76, 0x93, 0xa8, 0x4c, 0xb7, 0x23, 0x0c, 0x4e, 0x50, 0x69, 0x3e, 0xc8, 0xee, 0xc0, + 0xfb, 0x8b, 0x4d, 0xfa, 0x54, 0xf2, 0x45, 0x15, 0x28, 0xfc, 0x29, 0x30, 0xa8, 0x03, 0xb3, 0xfb, + 0x3c, 0x2a, 0xd2, 0xf6, 0x46, 0xd1, 0xf8, 0x19, 0x8b, 0xa3, 0x61, 0x7d, 0x56, 0x00, 0x70, 0x20, + 0x41, 0xfb, 0xb0, 0x04, 0x5f, 0xc8, 0x56, 0xca, 0xa6, 0x63, 0x1f, 0x58, 0x87, 0xbe, 0x2b, 0x3e, + 0xd0, 0xd7, 0x60, 0x2e, 0x90, 0x28, 0x0d, 0x6a, 0x84, 0xcd, 0x6c, 0x4f, 0x40, 0x5f, 0x0c, 0xeb, + 0xaf, 0x64, 0x59, 0x03, 0x0c, 0x96, 0x7c, 0xa8, 0x01, 0x0b, 0x2e, 0x7d, 0xe2, 0x53, 0x8f, 0x79, + 0x22, 0xe3, 0x16, 0x8d, 0x25, 0x9e, 0x35, 0x58, 0xc2, 0x70, 0x84, 0x45, 0x3f, 0x84, 0x8b, 0x41, + 0x35, 0xa6, 0x4c, 0x90, 0x95, 0xf8, 0x46, 0x91, 0x10, 0x25, 0xf9, 0x8c, 0x2b, 0xd2, 0xd4, 0x8b, + 0x13, 0x90, 0x78, 0x92, 0x26, 0xed, 0x13, 0x05, 0x5e, 0x99, 0xdc, 0x38, 0x10, 0x85, 0x79, 0x57, + 0xfc, 0x0a, 0x6b, 0xf6, 0x4e, 0x01, 0x7b, 0xe4, 0x19, 0xf3, 0xbb, 0x50, 0xf0, 0xed, 0xe1, 0x50, + 0x36, 0xda, 0x87, 0x39, 0x53, 0x98, 0x24, 0x8b, 0xf3, 0xce, 0x99, 0x9a, 0x5c, 0xfa, 0xfc, 0xd1, + 0xdc, 0x09, 0xc0, 0x58, 0x4a, 0xd6, 0x7e, 0xa9, 0xc0, 0x6a, 0xa6, 0x7a, 0x50, 0x0d, 0x54, 0xcb, + 0x66, 0x22, 0xa3, 0xd4, 0x20, 0x3e, 0x1d, 0x9b, 0x3d, 0xe4, 0x79, 0x8e, 0x39, 0x02, 0x5d, 0x85, + 0xf2, 0x3e, 0x9f, 0x7a, 0x3c, 0x16, 0x0b, 0xc6, 0xf2, 0x68, 0x58, 0x5f, 0x34, 0x1c, 0xa7, 0x17, + 0x50, 0x08, 0x14, 0xfa, 0x12, 0xcc, 0x79, 0xcc, 0xb5, 0xec, 0xc3, 0x6a, 0x59, 0x64, 0x8a, 0xe8, + 0xf1, 0x7b, 0x02, 0x12, 0x90, 0x49, 0x34, 0xba, 0x0e, 0xf3, 0x27, 0xd4, 0x15, 0xc5, 0x31, 0x2b, + 0x28, 0x45, 0x0b, 0x7d, 0x18, 0x80, 0x02, 0xd2, 0x90, 0x40, 0xa3, 0xb0, 0x92, 0xae, 0x3e, 0xb4, + 0x17, 0x56, 0xae, 0x32, 0x36, 0x79, 0xc6, 0x06, 0x66, 0xec, 0xb1, 0xb7, 0x7d, 0x62, 0x33, 0x8b, + 0x9d, 0x1a, 0xcb, 0xd2, 0x29, 0xb3, 0x81, 0xa2, 0x40, 0x96, 0xf6, 0xab, 0x12, 0x54, 0xa4, 0x9e, + 0x1e, 0xb1, 0xfa, 0xe8, 0x51, 0x22, 0x67, 0x83, 0x70, 0x5f, 0x2f, 0x1e, 0x6e, 0x63, 0x2d, 0xec, + 0x8c, 0x13, 0x72, 0xbc, 0x0b, 0x15, 0xd3, 0xb1, 0x3d, 0xe6, 0x12, 0xcb, 0x96, 0x05, 0x91, 0x1e, + 0xcb, 0x53, 0x72, 0x5b, 0x72, 0x19, 0x17, 0xa5, 0xfc, 0x4a, 0x0c, 0xf3, 0x70, 0x52, 0x2c, 0x7a, + 0x1c, 0xa5, 0x91, 0x2a, 0x14, 0x7c, 0xb5, 0x88, 0x02, 0x7e, 0xf2, 0x62, 0x19, 0xf4, 0x47, 0x05, + 0xaa, 0x79, 0x4c, 0xa9, 0x7a, 0x57, 0xfe, 0x97, 0x7a, 0x2f, 0x9d, 0x5b, 0xbd, 0xff, 0x4e, 0x49, + 0x84, 0xdd, 0xf3, 0xd0, 0xf7, 0x60, 0x81, 0x6f, 0x58, 0x62, 0x61, 0x52, 0xc6, 0xac, 0x98, 0xb2, + 0x8f, 0x3d, 0xd8, 0xff, 0x3e, 0x35, 0xd9, 0x7d, 0xca, 0x48, 0xdc, 0xe9, 0x63, 0x18, 0x8e, 0xa4, + 0xa2, 0x1d, 0x28, 0x7b, 0x03, 0x6a, 0x9e, 0x61, 0xc2, 0x09, 0xcb, 0xf6, 0x06, 0xd4, 0x8c, 0x67, + 0x01, 0xff, 0xc2, 0x42, 0x8e, 0xf6, 0xd3, 0x64, 0x24, 0x3c, 0x2f, 0x1d, 0x89, 0x1c, 0xff, 0x2a, + 0xe7, 0xe6, 0xdf, 0xdf, 0x46, 0x9d, 0x46, 0x58, 0x77, 0xcf, 0xf2, 0x18, 0x7a, 0x6f, 0xcc, 0xc7, + 0x7a, 0x31, 0x1f, 0x73, 0x6e, 0xe1, 0xe1, 0xa8, 0xbc, 0x42, 0x48, 0xc2, 0xbf, 0xf7, 0x61, 0xd6, + 0x62, 0xb4, 0x1f, 0x16, 0x56, 0xa3, 0xa8, 0x83, 0xe3, 0xbe, 0xd0, 0xe1, 0xec, 0x38, 0x90, 0xa2, + 0xfd, 0x25, 0x7d, 0x00, 0xee, 0x78, 0xf4, 0x1e, 0x2c, 0x7a, 0x72, 0xd4, 0x87, 0xcd, 0xa1, 0xc8, + 0xfa, 0x10, 0x2d, 0x8c, 0x17, 0xa4, 0xa6, 0xc5, 0x10, 0xe2, 0xe1, 0x58, 0x60, 0xa2, 0x72, 0x4b, + 0x67, 0xa9, 0xdc, 0x4c, 0xe8, 0x73, 0x2b, 0xf7, 0x09, 0x4c, 0x8a, 0x1e, 0x7a, 0x17, 0xe6, 0x9c, + 0x01, 0x79, 0x12, 0x75, 0xd5, 0xe9, 0x3b, 0xe1, 0x03, 0x41, 0x3a, 0x29, 0x45, 0x80, 0xab, 0x0c, + 0xd0, 0x58, 0x4a, 0xd4, 0x7e, 0xac, 0xc0, 0x5a, 0xb6, 0x85, 0x9d, 0xa1, 0x49, 0xec, 0xc2, 0x4a, + 0x9f, 0x30, 0xf3, 0x28, 0x9a, 0x55, 0xf2, 0xe6, 0xd5, 0x18, 0x0d, 0xeb, 0x2b, 0xf7, 0x53, 0x98, + 0x17, 0xc3, 0x3a, 0x7a, 0xcb, 0xef, 0xf5, 0x4e, 0xd3, 0x5b, 0x68, 0x86, 0x5f, 0xfb, 0x40, 0x85, + 0xe5, 0x54, 0xc3, 0x2e, 0xb0, 0x73, 0xb5, 0x61, 0xb5, 0x1b, 0xfb, 0x9a, 0x23, 0xa4, 0x19, 0x9f, + 0x97, 0xc4, 0xc9, 0x34, 0x11, 0x7c, 0x59, 0xfa, 0x74, 0xde, 0xa8, 0x9f, 0x76, 0xde, 0x3c, 0x84, + 0x15, 0x12, 0xed, 0x01, 0xf7, 0x9d, 0x2e, 0x95, 0x53, 0x58, 0x97, 0x5c, 0x2b, 0xed, 0x14, 0xf6, + 0xc5, 0xb0, 0xfe, 0xb9, 0xec, 0xf6, 0xc0, 0xe1, 0x38, 0x23, 0x05, 0x5d, 0x83, 0x59, 0xd3, 0xf1, + 0x6d, 0x26, 0x46, 0xb5, 0x1a, 0x97, 0xc9, 0x26, 0x07, 0xe2, 0x00, 0x87, 0x6e, 0x42, 0x85, 0x74, + 0xfb, 0x96, 0xdd, 0x36, 0x4d, 0xea, 0x79, 0xe2, 0x4e, 0xb8, 0x10, 0xcc, 0xff, 0x76, 0x0c, 0xc6, + 0x49, 0x1a, 0xed, 0xdf, 0x4a, 0xb8, 0x79, 0xe6, 0x2c, 0x49, 0xe8, 0x55, 0xbe, 0x71, 0x09, 0x94, + 0x8c, 0x4b, 0x62, 0x6b, 0x12, 0x60, 0x1c, 0xe2, 0x13, 0x77, 0xf7, 0x52, 0xa1, 0xbb, 0xbb, 0x5a, + 0xe0, 0xee, 0x5e, 0x9e, 0x7a, 0x77, 0xcf, 0x9c, 0x78, 0xb6, 0xc0, 0x89, 0xbf, 0x13, 0xae, 0x32, + 0xd1, 0x45, 0xa1, 0x03, 0xaa, 0x49, 0x7b, 0x13, 0xba, 0xe0, 0x78, 0x2e, 0x8c, 0xdd, 0x32, 0x8c, + 0xf9, 0xd1, 0xb0, 0xae, 0x6e, 0x6e, 0xdf, 0xc3, 0x5c, 0x86, 0xf6, 0x6b, 0x05, 0x2e, 0x8c, 0x5d, + 0xb3, 0xd1, 0x9b, 0xb0, 0x6c, 0xd9, 0x8c, 0xba, 0x07, 0xc4, 0xa4, 0x3b, 0x71, 0x82, 0x5f, 0x92, + 0x87, 0x5a, 0xee, 0x24, 0x91, 0x38, 0x4d, 0x8b, 0x2e, 0x83, 0x6a, 0x0d, 0xc2, 0x95, 0x5d, 0x68, + 0xeb, 0xec, 0x7a, 0x98, 0xc3, 0x78, 0x35, 0x1c, 0x11, 0xb7, 0xfb, 0x94, 0xb8, 0xb4, 0xdd, 0xed, + 0xf2, 0x3b, 0x8c, 0x74, 0x69, 0x54, 0x0d, 0x5f, 0x4f, 0xa3, 0x71, 0x96, 0x5e, 0xfb, 0x85, 0x02, + 0x97, 0x73, 0xfb, 0x48, 0xe1, 0xc7, 0x18, 0x02, 0x30, 0x20, 0x2e, 0xe9, 0x53, 0x46, 0x5d, 0x4f, + 0x0e, 0xd5, 0x33, 0xbe, 0x71, 0x44, 0xf3, 0x7a, 0x37, 0x12, 0x84, 0x13, 0x42, 0xb5, 0x9f, 0x95, + 0x60, 0x19, 0xcb, 0x70, 0x04, 0xcb, 0xe1, 0xff, 0x7f, 0x4b, 0xd8, 0x4d, 0x6d, 0x09, 0xd3, 0x33, + 0x23, 0x65, 0x5b, 0xde, 0x9e, 0x80, 0x1e, 0xf1, 0xe5, 0x9c, 0x30, 0xdf, 0x2b, 0x74, 0x9b, 0x4a, + 0xcb, 0x14, 0x7c, 0x71, 0x08, 0x82, 0x6f, 0x2c, 0xe5, 0x69, 0x23, 0x05, 0x6a, 0x29, 0x7a, 0xde, + 0xe5, 0xfd, 0x3e, 0x75, 0x31, 0x3d, 0xa0, 0x2e, 0xb5, 0x4d, 0x8a, 0x6e, 0xc0, 0x02, 0x19, 0x58, + 0x77, 0x5d, 0xc7, 0x1f, 0xc8, 0x78, 0x46, 0x23, 0xbc, 0xbd, 0xdb, 0x11, 0x70, 0x1c, 0x51, 0x70, + 0xea, 0xd0, 0x20, 0x99, 0x55, 0x89, 0x7d, 0x3a, 0x80, 0xe3, 0x88, 0x22, 0x6a, 0xdd, 0xe5, 0xdc, + 0xd6, 0x6d, 0x80, 0xea, 0x5b, 0x5d, 0x79, 0xd5, 0x78, 0x43, 0x12, 0xa8, 0xef, 0x74, 0xb6, 0x5e, + 0x0c, 0xeb, 0x57, 0xf3, 0x9e, 0x11, 0xd9, 0xe9, 0x80, 0x7a, 0xfa, 0x3b, 0x9d, 0x2d, 0xcc, 0x99, + 0xb5, 0xdf, 0x2b, 0x70, 0x21, 0x75, 0xc8, 0x73, 0x58, 0x65, 0x1e, 0xa4, 0x57, 0x99, 0xeb, 0xc5, + 0x23, 0x96, 0xb3, 0xcc, 0x1c, 0x65, 0xce, 0x20, 0xb6, 0x99, 0xbd, 0xec, 0xb3, 0x5a, 0xa3, 0xe8, + 0x55, 0x21, 0xff, 0x2d, 0x4d, 0xfb, 0x53, 0x09, 0x2e, 0x4e, 0xc8, 0x21, 0xf4, 0x18, 0x20, 0x1e, + 0x2f, 0x52, 0xdf, 0xf4, 0xbb, 0xcf, 0xd8, 0xd5, 0x79, 0x45, 0x3c, 0x76, 0xc5, 0xd0, 0x84, 0x40, + 0xe4, 0x42, 0xc5, 0xa5, 0x1e, 0x75, 0x4f, 0x68, 0xf7, 0x2d, 0xc7, 0x95, 0x7e, 0x7b, 0xb3, 0xb8, + 0xdf, 0xc6, 0x32, 0x37, 0xbe, 0x69, 0xe1, 0x58, 0x2e, 0x4e, 0x2a, 0x41, 0x8f, 0x63, 0xff, 0x05, + 0x2f, 0xb8, 0xad, 0x22, 0xe7, 0x49, 0xbf, 0x3d, 0x4f, 0xf1, 0xe4, 0xdf, 0x15, 0xb8, 0x94, 0xb2, + 0xf1, 0x5b, 0xb4, 0x3f, 0xe8, 0x11, 0x46, 0xcf, 0xa1, 0x0b, 0x3d, 0x4a, 0x75, 0xa1, 0x5b, 0xc5, + 0xfd, 0x18, 0xda, 0x98, 0x7b, 0x6b, 0xf9, 0x9b, 0x02, 0x97, 0x27, 0x72, 0x9c, 0x43, 0x59, 0x7d, + 0x3b, 0x5d, 0x56, 0xad, 0xb3, 0x1f, 0x2b, 0xa7, 0xbc, 0xfe, 0x9a, 0x77, 0x28, 0x51, 0x67, 0x9f, + 0xc1, 0xa1, 0xa1, 0xfd, 0x46, 0x81, 0xa5, 0x90, 0x92, 0xef, 0x48, 0x05, 0xf6, 0xe4, 0x16, 0x80, + 0xfc, 0xcb, 0x25, 0xbc, 0xc9, 0xab, 0xb1, 0xd9, 0x77, 0x23, 0x0c, 0x4e, 0x50, 0xa1, 0x6f, 0x00, + 0x0a, 0x0d, 0xdc, 0xeb, 0x89, 0x55, 0x80, 0xef, 0x9b, 0xaa, 0xe0, 0x5d, 0x97, 0xbc, 0x08, 0x8f, + 0x51, 0xe0, 0x09, 0x5c, 0xda, 0x1f, 0x94, 0x78, 0x5a, 0x0b, 0xf0, 0x4b, 0xea, 0x78, 0x61, 0x5b, + 0xae, 0xe3, 0x93, 0xe3, 0x46, 0x50, 0xbe, 0xac, 0xe3, 0x46, 0x18, 0x97, 0x53, 0x0f, 0x1f, 0xaa, + 0x99, 0x43, 0x88, 0x3a, 0x28, 0xba, 0xd9, 0x7d, 0x33, 0xf1, 0x37, 0x5b, 0xa5, 0xf5, 0x6a, 0x21, + 0x6b, 0x78, 0x8e, 0x4e, 0xdc, 0xea, 0x6f, 0xc0, 0x82, 0xed, 0x74, 0x83, 0x15, 0x38, 0xb3, 0x52, + 0xec, 0x48, 0x38, 0x8e, 0x28, 0xc6, 0xfe, 0x09, 0x2a, 0x7f, 0x3a, 0xff, 0x04, 0x89, 0x35, 0xa8, + 0xd7, 0xe3, 0x04, 0xe1, 0x85, 0x21, 0x5e, 0x83, 0x24, 0x1c, 0x47, 0x14, 0x68, 0x27, 0x1e, 0x2c, + 0x73, 0x22, 0x22, 0xd7, 0x0a, 0x0c, 0xe6, 0xfc, 0x49, 0x62, 0xb4, 0x9f, 0x3d, 0xaf, 0xcd, 0x7c, + 0xf4, 0xbc, 0x36, 0xf3, 0xf1, 0xf3, 0xda, 0xcc, 0x8f, 0x46, 0x35, 0xe5, 0xd9, 0xa8, 0xa6, 0x7c, + 0x34, 0xaa, 0x29, 0x1f, 0x8f, 0x6a, 0xca, 0xbf, 0x46, 0x35, 0xe5, 0x27, 0x9f, 0xd4, 0x66, 0xde, + 0xbd, 0x32, 0xe5, 0x1f, 0xe9, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x26, 0xe2, 0x5c, 0xf8, 0xaf, + 0x1e, 0x00, 0x00, +} + +func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocatedDeviceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocatedDeviceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NetworkData != nil { + { + size, err := m.NetworkData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x1a + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BasicDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Attributes) > 0 { + keysForAttributes := make([]string, 0, len(m.Attributes)) + for k := range m.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAttributes[iNdEx]) + copy(dAtA[i:], keysForAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CELDeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CELDeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VersionValue != nil { + i -= len(*m.VersionValue) + copy(dAtA[i:], *m.VersionValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VersionValue))) + i-- + dAtA[i] = 0x2a + } + if m.StringValue != nil { + i -= len(*m.StringValue) + copy(dAtA[i:], *m.StringValue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StringValue))) + i-- + dAtA[i] = 0x22 + } + if m.BoolValue != nil { + i-- + if *m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.IntValue != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntValue)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *DeviceCapacity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceCapacity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceCapacity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClaimConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClaimConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClaimConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DeviceConfiguration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Config) > 0 { + for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Config[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Opaque != nil { + { + size, err := m.Opaque.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeviceConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MatchAttribute != nil { + i -= len(*m.MatchAttribute) + copy(dAtA[i:], *m.MatchAttribute) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchAttribute))) + i-- + dAtA[i] = 0x12 + } + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Requests[iNdEx]) + copy(dAtA[i:], m.Requests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Requests[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeviceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x28 + i -= len(m.AllocationMode) + copy(dAtA[i:], m.AllocationMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllocationMode))) + i-- + dAtA[i] = 0x22 + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DeviceClassName) + copy(dAtA[i:], m.DeviceClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeviceClassName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceRequestAllocationResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceRequestAllocationResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AdminAccess != nil { + i-- + if *m.AdminAccess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Device) + copy(dAtA[i:], m.Device) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Device))) + i-- + dAtA[i] = 0x22 + i -= len(m.Pool) + copy(dAtA[i:], m.Pool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Pool))) + i-- + dAtA[i] = 0x1a + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0x12 + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeviceSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeviceSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeviceSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CEL != nil { + { + size, err := m.CEL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkDeviceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkDeviceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkDeviceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HardwareAddress) + copy(dAtA[i:], m.HardwareAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HardwareAddress))) + i-- + dAtA[i] = 0x1a + if len(m.IPs) > 0 { + for iNdEx := len(m.IPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IPs[iNdEx]) + copy(dAtA[i:], m.IPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.InterfaceName) + copy(dAtA[i:], m.InterfaceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InterfaceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OpaqueDeviceConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpaqueDeviceConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimConsumerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimConsumerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimConsumerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ReservedFor) > 0 { + for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReservedFor[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Allocation != nil { + { + size, err := m.Allocation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceClaimTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceClaimTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceClaimTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourcePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourcePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourcePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceSliceCount)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSlice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSlice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceSliceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSliceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSliceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.AllNodes { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllocatedDeviceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NetworkData != nil { + l = m.NetworkData.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BasicDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CELDeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceAllocationConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IntValue != nil { + n += 1 + sovGenerated(uint64(*m.IntValue)) + } + if m.BoolValue != nil { + n += 2 + } + if m.StringValue != nil { + l = len(*m.StringValue) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VersionValue != nil { + l = len(*m.VersionValue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceCapacity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClaimConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeviceConfiguration.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeviceClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Config) > 0 { + for _, e := range m.Config { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Opaque != nil { + l = m.Opaque.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, s := range m.Requests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchAttribute != nil { + l = len(*m.MatchAttribute) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeviceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeviceClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.AllocationMode) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceRequestAllocationResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Pool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Device) + n += 1 + l + sovGenerated(uint64(l)) + if m.AdminAccess != nil { + n += 2 + } + return n +} + +func (m *DeviceSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CEL != nil { + l = m.CEL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkDeviceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.InterfaceName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.IPs) > 0 { + for _, s := range m.IPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HardwareAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OpaqueDeviceConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimConsumerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Devices.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Allocation != nil { + l = m.Allocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ReservedFor) > 0 { + for _, e := range m.ReservedFor { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceClaimTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceClaimTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourcePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + n += 1 + sovGenerated(uint64(m.ResourceSliceCount)) + return n +} + +func (m *ResourceSlice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceSliceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceSliceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Pool.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllocatedDeviceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&AllocatedDeviceStatus{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `NetworkData:` + strings.Replace(this.NetworkData.String(), "NetworkDeviceData", "NetworkDeviceData", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllocationResult{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BasicDevice) String() string { + if this == nil { + return "nil" + } + keysForAttributes := make([]string, 0, len(this.Attributes)) + for k := range this.Attributes { + keysForAttributes = append(keysForAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes) + mapStringForAttributes := "map[QualifiedName]DeviceAttribute{" + for _, k := range keysForAttributes { + mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)]) + } + mapStringForAttributes += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "map[QualifiedName]DeviceCapacity{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&BasicDevice{`, + `Attributes:` + mapStringForAttributes + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *CELDeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CELDeviceSelector{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Basic:` + strings.Replace(this.Basic.String(), "BasicDevice", "BasicDevice", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAllocationConfiguration{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAllocationResult) String() string { + if this == nil { + return "nil" + } + repeatedStringForResults := "[]DeviceRequestAllocationResult{" + for _, f := range this.Results { + repeatedStringForResults += strings.Replace(strings.Replace(f.String(), "DeviceRequestAllocationResult", "DeviceRequestAllocationResult", 1), `&`, ``, 1) + "," + } + repeatedStringForResults += "}" + repeatedStringForConfig := "[]DeviceAllocationConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceAllocationConfiguration", "DeviceAllocationConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceAllocationResult{`, + `Results:` + repeatedStringForResults + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceAttribute) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceAttribute{`, + `IntValue:` + valueToStringGenerated(this.IntValue) + `,`, + `BoolValue:` + valueToStringGenerated(this.BoolValue) + `,`, + `StringValue:` + valueToStringGenerated(this.StringValue) + `,`, + `VersionValue:` + valueToStringGenerated(this.VersionValue) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceCapacity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceCapacity{`, + `Value:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaim) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequests := "[]DeviceRequest{" + for _, f := range this.Requests { + repeatedStringForRequests += strings.Replace(strings.Replace(f.String(), "DeviceRequest", "DeviceRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForRequests += "}" + repeatedStringForConstraints := "[]DeviceConstraint{" + for _, f := range this.Constraints { + repeatedStringForConstraints += strings.Replace(strings.Replace(f.String(), "DeviceConstraint", "DeviceConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForConstraints += "}" + repeatedStringForConfig := "[]DeviceClaimConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClaimConfiguration", "DeviceClaimConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClaim{`, + `Requests:` + repeatedStringForRequests + `,`, + `Constraints:` + repeatedStringForConstraints + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClaimConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClaimConfiguration{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceClassSpec", "DeviceClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceClassConfiguration{`, + `DeviceConfiguration:` + strings.Replace(strings.Replace(this.DeviceConfiguration.String(), "DeviceConfiguration", "DeviceConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeviceClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeviceClass", "DeviceClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeviceClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeviceClassSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + repeatedStringForConfig := "[]DeviceClassConfiguration{" + for _, f := range this.Config { + repeatedStringForConfig += strings.Replace(strings.Replace(f.String(), "DeviceClassConfiguration", "DeviceClassConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForConfig += "}" + s := strings.Join([]string{`&DeviceClassSpec{`, + `Selectors:` + repeatedStringForSelectors + `,`, + `Config:` + repeatedStringForConfig + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConfiguration{`, + `Opaque:` + strings.Replace(this.Opaque.String(), "OpaqueDeviceConfiguration", "OpaqueDeviceConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceConstraint{`, + `Requests:` + fmt.Sprintf("%v", this.Requests) + `,`, + `MatchAttribute:` + valueToStringGenerated(this.MatchAttribute) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]DeviceSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&DeviceRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DeviceClassName:` + fmt.Sprintf("%v", this.DeviceClassName) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `AllocationMode:` + fmt.Sprintf("%v", this.AllocationMode) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceRequestAllocationResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceRequestAllocationResult{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + fmt.Sprintf("%v", this.Pool) + `,`, + `Device:` + fmt.Sprintf("%v", this.Device) + `,`, + `AdminAccess:` + valueToStringGenerated(this.AdminAccess) + `,`, + `}`, + }, "") + return s +} +func (this *DeviceSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeviceSelector{`, + `CEL:` + strings.Replace(this.CEL.String(), "CELDeviceSelector", "CELDeviceSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkDeviceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkDeviceData{`, + `InterfaceName:` + fmt.Sprintf("%v", this.InterfaceName) + `,`, + `IPs:` + fmt.Sprintf("%v", this.IPs) + `,`, + `HardwareAddress:` + fmt.Sprintf("%v", this.HardwareAddress) + `,`, + `}`, + }, "") + return s +} +func (this *OpaqueDeviceConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OpaqueDeviceConfiguration{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceClaimStatus", "ResourceClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimConsumerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimConsumerReference{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimSpec{`, + `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForReservedFor := "[]ResourceClaimConsumerReference{" + for _, f := range this.ReservedFor { + repeatedStringForReservedFor += strings.Replace(strings.Replace(f.String(), "ResourceClaimConsumerReference", "ResourceClaimConsumerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForReservedFor += "}" + repeatedStringForDevices := "[]AllocatedDeviceStatus{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "AllocatedDeviceStatus", "AllocatedDeviceStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceClaimStatus{`, + `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, + `ReservedFor:` + repeatedStringForReservedFor + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimTemplateSpec", "ResourceClaimTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceClaimTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceClaimTemplate", "ResourceClaimTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceClaimTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceClaimTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceClaimTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceClaimSpec", "ResourceClaimSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourcePool) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourcePool{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `ResourceSliceCount:` + fmt.Sprintf("%v", this.ResourceSliceCount) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSlice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceSlice{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceSliceSpec", "ResourceSliceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceSlice{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceSlice", "ResourceSlice", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceSliceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceSliceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDevices := "[]Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(strings.Replace(f.String(), "Device", "Device", 1), `&`, ``, 1) + "," + } + repeatedStringForDevices += "}" + s := strings.Join([]string{`&ResourceSliceSpec{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `Pool:` + strings.Replace(strings.Replace(this.Pool.String(), "ResourcePool", "ResourcePool", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `AllNodes:` + fmt.Sprintf("%v", this.AllNodes) + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllocatedDeviceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocatedDeviceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocatedDeviceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkData == nil { + m.NetworkData = &NetworkDeviceData{} + } + if err := m.NetworkData.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BasicDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[QualifiedName]DeviceAttribute) + } + var mapkey QualifiedName + mapvalue := &DeviceAttribute{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceAttribute{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(map[QualifiedName]DeviceCapacity) + } + var mapkey QualifiedName + mapvalue := &DeviceCapacity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = QualifiedName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &DeviceCapacity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[QualifiedName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CELDeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CELDeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CELDeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BasicDevice{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = AllocationConfigSource(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, DeviceRequestAllocationResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceAllocationConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceAttribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceAttribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceAttribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntValue = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BoolValue = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StringValue = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.VersionValue = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceCapacity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceCapacity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceCapacity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, DeviceRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, DeviceConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClaimConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClaimConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClaimConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClaimConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeviceConfiguration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeviceClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config, DeviceClassConfiguration{}) + if err := m.Config[len(m.Config)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Opaque", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Opaque == nil { + m.Opaque = &OpaqueDeviceConfiguration{} + } + if err := m.Opaque.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchAttribute", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FullyQualifiedName(dAtA[iNdEx:postIndex]) + m.MatchAttribute = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeviceClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, DeviceSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllocationMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllocationMode = DeviceAllocationMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceRequestAllocationResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceRequestAllocationResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Device = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AdminAccess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AdminAccess = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeviceSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeviceSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeviceSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CEL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CEL == nil { + m.CEL = &CELDeviceSelector{} + } + if err := m.CEL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkDeviceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkDeviceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkDeviceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InterfaceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InterfaceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPs = append(m.IPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HardwareAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HardwareAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpaqueDeviceConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimConsumerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimConsumerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Devices.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocation == nil { + m.Allocation = &AllocationResult{} + } + if err := m.Allocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReservedFor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReservedFor = append(m.ReservedFor, ResourceClaimConsumerReference{}) + if err := m.ReservedFor[len(m.ReservedFor)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, AllocatedDeviceStatus{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceClaimTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceClaimTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceClaimTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourcePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourcePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourcePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSliceCount", wireType) + } + m.ResourceSliceCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceSliceCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSlice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSlice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSlice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceSlice{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSliceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSliceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSliceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllNodes", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllNodes = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/resource/v1beta1/generated.proto new file mode 100644 index 000000000..4ea13e033 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/generated.proto @@ -0,0 +1,892 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.resource.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/resource/v1beta1"; + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +message AllocatedDeviceStatus { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 2; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 3; + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 4; + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 5; + + // NetworkData contains network-related information specific to the device. + // + // +optional + optional NetworkDeviceData networkData = 6; +} + +// AllocationResult contains attributes of an allocated resource. +message AllocationResult { + // Devices is the result of allocating devices. + // + // +optional + optional DeviceAllocationResult devices = 1; + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; +} + +// BasicDevice defines one device instance. +message BasicDevice { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map attributes = 1; + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + map capacity = 2; +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +message CELDeviceSelector { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + optional string expression = 1; +} + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +message Device { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + optional string name = 1; + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + optional BasicDevice basic = 2; +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +message DeviceAllocationConfiguration { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + optional string source = 1; + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 2; + + optional DeviceConfiguration deviceConfiguration = 3; +} + +// DeviceAllocationResult is the result of allocating devices. +message DeviceAllocationResult { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + repeated DeviceRequestAllocationResult results = 1; + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + repeated DeviceAllocationConfiguration config = 2; +} + +// DeviceAttribute must have exactly one field set. +message DeviceAttribute { + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + optional int64 int = 2; + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + optional bool bool = 3; + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string string = 4; + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + optional string version = 5; +} + +// DeviceCapacity describes a quantity associated with a device. +message DeviceCapacity { + // Value defines how much of a certain device capacity is available. + // + // +required + optional .k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +message DeviceClaim { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + repeated DeviceRequest requests = 1; + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + repeated DeviceConstraint constraints = 2; + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + repeated DeviceClaimConfiguration config = 3; +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +message DeviceClaimConfiguration { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + optional DeviceConfiguration deviceConfiguration = 2; +} + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message DeviceClass { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + optional DeviceClassSpec spec = 2; +} + +// DeviceClassConfiguration is used in DeviceClass. +message DeviceClassConfiguration { + optional DeviceConfiguration deviceConfiguration = 1; +} + +// DeviceClassList is a collection of classes. +message DeviceClassList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource classes. + repeated DeviceClass items = 2; +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +message DeviceClassSpec { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 1; + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + repeated DeviceClassConfiguration config = 2; +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +message DeviceConfiguration { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + optional OpaqueDeviceConfiguration opaque = 1; +} + +// DeviceConstraint must have exactly one field set besides Requests. +message DeviceConstraint { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + repeated string requests = 1; + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + optional string matchAttribute = 2; +} + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +message DeviceRequest { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + optional string name = 1; + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + optional string deviceClassName = 2; + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + repeated DeviceSelector selectors = 3; + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + optional string allocationMode = 4; + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + optional int64 count = 5; + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 6; +} + +// DeviceRequestAllocationResult contains the allocation result for one request. +message DeviceRequestAllocationResult { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + optional string request = 1; + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 2; + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + optional string pool = 3; + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + optional string device = 4; + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + optional bool adminAccess = 5; +} + +// DeviceSelector must have exactly one field set. +message DeviceSelector { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + optional CELDeviceSelector cel = 1; +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +message NetworkDeviceData { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + optional string interfaceName = 1; + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + repeated string ips = 2; + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + optional string hardwareAddress = 3; +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +message OpaqueDeviceConfiguration { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + optional string driver = 1; + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; +} + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaim { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + optional ResourceClaimSpec spec = 2; + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + optional ResourceClaimStatus status = 3; +} + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +message ResourceClaimConsumerReference { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + optional string apiGroup = 1; + + // Resource is the type of resource being referenced, for example "pods". + // +required + optional string resource = 3; + + // Name is the name of resource being referenced. + // +required + optional string name = 4; + + // UID identifies exactly one incarnation of the resource. + // +required + optional string uid = 5; +} + +// ResourceClaimList is a collection of claims. +message ResourceClaimList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claims. + repeated ResourceClaim items = 2; +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +message ResourceClaimSpec { + // Devices defines how to request devices. + // + // +optional + optional DeviceClaim devices = 1; +} + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +message ResourceClaimStatus { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + optional AllocationResult allocation = 1; + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + repeated ResourceClaimConsumerReference reservedFor = 2; + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + repeated AllocatedDeviceStatus devices = 4; +} + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceClaimTemplate { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + optional ResourceClaimTemplateSpec spec = 2; +} + +// ResourceClaimTemplateList is a collection of claim templates. +message ResourceClaimTemplateList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource claim templates. + repeated ResourceClaimTemplate items = 2; +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +message ResourceClaimTemplateSpec { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + optional ResourceClaimSpec spec = 2; +} + +// ResourcePool describes the pool that ResourceSlices belong to. +message ResourcePool { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + optional string name = 1; + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + optional int64 generation = 2; + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + optional int64 resourceSliceCount = 3; +} + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +message ResourceSlice { + // Standard object metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + optional ResourceSliceSpec spec = 2; +} + +// ResourceSliceList is a collection of ResourceSlices. +message ResourceSliceList { + // Standard list metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of resource ResourceSlices. + repeated ResourceSlice items = 2; +} + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +message ResourceSliceSpec { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + optional string driver = 1; + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + optional ResourcePool pool = 2; + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + optional string nodeName = 3; + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 4; + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + optional bool allNodes = 5; + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + repeated Device devices = 6; +} + diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/register.go b/constraint/vendor/k8s.io/api/resource/v1beta1/register.go new file mode 100644 index 000000000..ce0a1d930 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "resource.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DeviceClass{}, + &DeviceClassList{}, + &ResourceClaim{}, + &ResourceClaimList{}, + &ResourceClaimTemplate{}, + &ResourceClaimTemplateList{}, + &ResourceSlice{}, + &ResourceSliceList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/types.go b/constraint/vendor/k8s.io/api/resource/v1beta1/types.go new file mode 100644 index 000000000..ca79c5a66 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/types.go @@ -0,0 +1,1084 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" +) + +const ( + // Finalizer is the finalizer that gets set for claims + // which were allocated through a builtin controller. + // Reserved for use by Kubernetes, DRA driver controllers must + // use their own finalizer. + Finalizer = "resource.kubernetes.io/delete-protection" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceSlice represents one or more resources in a pool of similar resources, +// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many +// ResourceSlices comprise a pool is determined by the driver. +// +// At the moment, the only supported resources are devices with attributes and capacities. +// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. +// The ResourceSlice in which a device gets published may change over time. The unique identifier +// for a device is the tuple , , . +// +// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number +// and updates all ResourceSlices with that new number and new resource definitions. A consumer +// must only use ResourceSlices with the highest generation number and ignore all others. +// +// When allocating all resources in a pool matching certain criteria or when +// looking for the best solution among several different alternatives, a +// consumer should check the number of ResourceSlices in a pool (included in +// each ResourceSlice) to determine whether its view of a pool is complete and +// if not, should wait until the driver has completed updating the pool. +// +// For resources that are not local to a node, the node name is not set. Instead, +// the driver may use a node selector to specify where the devices are available. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceSlice struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Contains the information published by the driver. + // + // Changing the spec automatically increments the metadata.generation number. + Spec ResourceSliceSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +const ( + // ResourceSliceSelectorNodeName can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.NodeName]. + ResourceSliceSelectorNodeName = "spec.nodeName" + // ResourceSliceSelectorDriver can be used in a [metav1.ListOptions] + // field selector to filter based on [ResourceSliceSpec.Driver]. + ResourceSliceSelectorDriver = "spec.driver" +) + +// ResourceSliceSpec contains the information published by the driver in one ResourceSlice. +type ResourceSliceSpec struct { + // Driver identifies the DRA driver providing the capacity information. + // A field selector can be used to list only ResourceSlice + // objects with a certain driver name. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. This field is immutable. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Pool describes the pool that this ResourceSlice belongs to. + // + // +required + Pool ResourcePool `json:"pool" protobuf:"bytes,2,name=pool"` + + // NodeName identifies the node which provides the resources in this pool. + // A field selector can be used to list only ResourceSlice + // objects belonging to a certain node. + // + // This field can be used to limit access from nodes to ResourceSlices with + // the same node name. It also indicates to autoscalers that adding + // new nodes of the same type as some old node might also make new + // resources available. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // This field is immutable. + // + // +optional + // +oneOf=NodeSelection + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,3,opt,name=nodeName"` + + // NodeSelector defines which nodes have access to the resources in the pool, + // when that pool is not limited to a single node. + // + // Must use exactly one term. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,4,opt,name=nodeSelector"` + + // AllNodes indicates that all nodes have access to the resources in the pool. + // + // Exactly one of NodeName, NodeSelector and AllNodes must be set. + // + // +optional + // +oneOf=NodeSelection + AllNodes bool `json:"allNodes,omitempty" protobuf:"bytes,5,opt,name=allNodes"` + + // Devices lists some or all of the devices in this pool. + // + // Must not have more than 128 entries. + // + // +optional + // +listType=atomic + Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"` +} + +// ResourcePool describes the pool that ResourceSlices belong to. +type ResourcePool struct { + // Name is used to identify the pool. For node-local devices, this + // is often the node name, but this is not required. + // + // It must not be longer than 253 characters and must consist of one or more DNS sub-domains + // separated by slashes. This field is immutable. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Generation tracks the change in a pool over time. Whenever a driver + // changes something about one or more of the resources in a pool, it + // must change the generation in all ResourceSlices which are part of + // that pool. Consumers of ResourceSlices should only consider + // resources from the pool with the highest generation number. The + // generation may be reset by drivers, which should be fine for + // consumers, assuming that all ResourceSlices in a pool are updated to + // match or deleted. + // + // Combined with ResourceSliceCount, this mechanism enables consumers to + // detect pools which are comprised of multiple ResourceSlices and are + // in an incomplete state. + // + // +required + Generation int64 `json:"generation" protobuf:"bytes,2,name=generation"` + + // ResourceSliceCount is the total number of ResourceSlices in the pool at this + // generation number. Must be greater than zero. + // + // Consumers can use this to check whether they have seen all ResourceSlices + // belonging to the same pool. + // + // +required + ResourceSliceCount int64 `json:"resourceSliceCount" protobuf:"bytes,3,name=resourceSliceCount"` +} + +const ResourceSliceMaxSharedCapacity = 128 +const ResourceSliceMaxDevices = 128 +const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name. + +// Device represents one individual hardware instance that can be selected based +// on its attributes. Besides the name, exactly one field must be set. +type Device struct { + // Name is unique identifier among all devices managed by + // the driver in the pool. It must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // Basic defines one device instance. + // + // +optional + // +oneOf=deviceType + Basic *BasicDevice `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"` +} + +// BasicDevice defines one device instance. +type BasicDevice struct { + // Attributes defines the set of attributes for this device. + // The name of each attribute must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Attributes map[QualifiedName]DeviceAttribute `json:"attributes,omitempty" protobuf:"bytes,1,rep,name=attributes"` + + // Capacity defines the set of capacities for this device. + // The name of each capacity must be unique in that set. + // + // The maximum number of attributes and capacities combined is 32. + // + // +optional + Capacity map[QualifiedName]DeviceCapacity `json:"capacity,omitempty" protobuf:"bytes,2,rep,name=capacity"` +} + +// DeviceCapacity describes a quantity associated with a device. +type DeviceCapacity struct { + // Value defines how much of a certain device capacity is available. + // + // +required + Value resource.Quantity `json:"value" protobuf:"bytes,1,rep,name=value"` + + // potential future addition: fields which define how to "consume" + // capacity (= share a single device between different consumers). +} + +// Limit for the sum of the number of entries in both attributes and capacity. +const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32 + +// QualifiedName is the name of a device attribute or capacity. +// +// Attributes and capacities are defined either by the owner of the specific +// driver (usually the vendor) or by some 3rd party (e.g. the Kubernetes +// project). Because they are sometimes compared across devices, a given name +// is expected to mean the same thing and have the same type on all devices. +// +// Names must be either a C identifier (e.g. "theName") or a DNS subdomain +// followed by a slash ("/") followed by a C identifier +// (e.g. "dra.example.com/theName"). Names which do not include the +// domain prefix are assumed to be part of the driver's domain. Attributes +// or capacities defined by 3rd parties must include the domain prefix. +// +// The maximum length for the DNS subdomain is 63 characters (same as +// for driver names) and the maximum length of the C identifier +// is 32. +type QualifiedName string + +// FullyQualifiedName is a QualifiedName where the domain is set. +type FullyQualifiedName string + +// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name. +const DeviceMaxDomainLength = 63 + +// DeviceMaxIDLength is the maximum length of the identifier in a device attribute or capacity name (`/`). +const DeviceMaxIDLength = 32 + +// DeviceAttribute must have exactly one field set. +type DeviceAttribute struct { + // The Go field names below have a Value suffix to avoid a conflict between the + // field "String" and the corresponding method. That method is required. + // The Kubernetes API is defined without that suffix to keep it more natural. + + // IntValue is a number. + // + // +optional + // +oneOf=ValueType + IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"` + + // BoolValue is a true/false value. + // + // +optional + // +oneOf=ValueType + BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"` + + // StringValue is a string. Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"` + + // VersionValue is a semantic version according to semver.org spec 2.0.0. + // Must not be longer than 64 characters. + // + // +optional + // +oneOf=ValueType + VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"` +} + +// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value. +const DeviceAttributeMaxValueLength = 64 + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceSliceList is a collection of ResourceSlices. +type ResourceSliceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource ResourceSlices. + Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaim describes a request for access to resources in the cluster, +// for use by workloads. For example, if a workload needs an accelerator device +// with specific properties, this is how that request is expressed. The status +// stanza tracks whether this claim has been satisfied and what specific +// resources have been allocated. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes what is being requested and how to configure it. + // The spec is immutable. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` + + // Status describes whether the claim is ready to use and what has been allocated. + // +optional + Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. +type ResourceClaimSpec struct { + // Devices defines how to request devices. + // + // +optional + Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` +} + +// DeviceClaim defines how to request devices with a ResourceClaim. +type DeviceClaim struct { + // Requests represent individual requests for distinct devices which + // must all be satisfied. If empty, nothing needs to be allocated. + // + // +optional + // +listType=atomic + Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"` + + // These constraints must be satisfied by the set of devices that get + // allocated for the claim. + // + // +optional + // +listType=atomic + Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"` + + // This field holds configuration for multiple potential drivers which + // could satisfy requests in this claim. It is ignored while allocating + // the claim. + // + // +optional + // +listType=atomic + Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"` + + // Potential future extension, ignored by older schedulers. This is + // fine because scoring allows users to define a preference, without + // making it a hard requirement. + // + // Score *SomeScoringStruct +} + +const ( + DeviceRequestsMaxSize = AllocationResultsMaxSize + DeviceConstraintsMaxSize = 32 + DeviceConfigMaxSize = 32 +) + +// DeviceRequest is a request for devices required for a claim. +// This is typically a request for a single resource like a device, but can +// also ask for several identical devices. +// +// A DeviceClassName is currently required. Clients must check that it is +// indeed set. It's absence indicates that something changed in a way that +// is not supported by the client yet, in which case it must refuse to +// handle the request. +type DeviceRequest struct { + // Name can be used to reference this request in a pod.spec.containers[].resources.claims + // entry and in a constraint of the claim. + // + // Must be a DNS label. + // + // +required + Name string `json:"name" protobuf:"bytes,1,name=name"` + + // DeviceClassName references a specific DeviceClass, which can define + // additional configuration and selectors to be inherited by this + // request. + // + // A class is required. Which classes are available depends on the cluster. + // + // Administrators may use this to restrict which devices may get + // requested by only installing classes with selectors for permitted + // devices. If users are free to request anything without restrictions, + // then administrators can create an empty DeviceClass for users + // to reference. + // + // +required + DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"` + + // Selectors define criteria which must be satisfied by a specific + // device in order for that device to be considered for this + // request. All selectors must be satisfied for a device to be + // considered. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"` + + // AllocationMode and its related fields define how devices are allocated + // to satisfy this request. Supported values are: + // + // - ExactCount: This request is for a specific number of devices. + // This is the default. The exact number is provided in the + // count field. + // + // - All: This request is for all of the matching devices in a pool. + // Allocation will fail if some devices are already allocated, + // unless adminAccess is requested. + // + // If AlloctionMode is not specified, the default mode is ExactCount. If + // the mode is ExactCount and count is not specified, the default count is + // one. Any other requests must specify this field. + // + // More modes may get added in the future. Clients must refuse to handle + // requests with unknown modes. + // + // +optional + AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"` + + // Count is used only when the count mode is "ExactCount". Must be greater than zero. + // If AllocationMode is ExactCount and this field is not specified, the default is one. + // + // +optional + // +oneOf=AllocationMode + Count int64 `json:"count,omitempty" protobuf:"bytes,5,opt,name=count"` + + // AdminAccess indicates that this is a claim for administrative access + // to the device(s). Claims with AdminAccess are expected to be used for + // monitoring or other management services for a device. They ignore + // all ordinary claims to the device with respect to access modes and + // any resource allocations. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"` +} + +const ( + DeviceSelectorsMaxSize = 32 +) + +type DeviceAllocationMode string + +// Valid [DeviceRequest.CountMode] values. +const ( + DeviceAllocationModeExactCount = DeviceAllocationMode("ExactCount") + DeviceAllocationModeAll = DeviceAllocationMode("All") +) + +// DeviceSelector must have exactly one field set. +type DeviceSelector struct { + // CEL contains a CEL expression for selecting a device. + // + // +optional + // +oneOf=SelectorType + CEL *CELDeviceSelector `json:"cel,omitempty" protobuf:"bytes,1,opt,name=cel"` +} + +// CELDeviceSelector contains a CEL expression for selecting a device. +type CELDeviceSelector struct { + // Expression is a CEL expression which evaluates a single device. It + // must evaluate to true when the device under consideration satisfies + // the desired criteria, and false when it does not. Any other result + // is an error and causes allocation of devices to abort. + // + // The expression's input is an object named "device", which carries + // the following properties: + // - driver (string): the name of the driver which defines this device. + // - attributes (map[string]object): the device's attributes, grouped by prefix + // (e.g. device.attributes["dra.example.com"] evaluates to an object with all + // of the attributes which were prefixed by "dra.example.com". + // - capacity (map[string]object): the device's capacities, grouped by prefix. + // + // Example: Consider a device with driver="dra.example.com", which exposes + // two attributes named "model" and "ext.example.com/family" and which + // exposes one capacity named "modules". This input to this expression + // would have the following fields: + // + // device.driver + // device.attributes["dra.example.com"].model + // device.attributes["ext.example.com"].family + // device.capacity["dra.example.com"].modules + // + // The device.driver field can be used to check for a specific driver, + // either as a high-level precondition (i.e. you only want to consider + // devices from this driver) or as part of a multi-clause expression + // that is meant to consider devices from different drivers. + // + // The value type of each attribute is defined by the device + // definition, and users who write these expressions must consult the + // documentation for their specific drivers. The value type of each + // capacity is Quantity. + // + // If an unknown prefix is used as a lookup in either device.attributes + // or device.capacity, an empty map will be returned. Any reference to + // an unknown field will cause an evaluation error and allocation to + // abort. + // + // A robust expression should check for the existence of attributes + // before referencing them. + // + // For ease of use, the cel.bind() function is enabled, and can be used + // to simplify expressions that access multiple attributes with the + // same domain. For example: + // + // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool) + // + // The length of the expression must be smaller or equal to 10 Ki. The + // cost of evaluating it is also limited based on the estimated number + // of logical steps. + // + // +required + Expression string `json:"expression" protobuf:"bytes,1,name=expression"` +} + +// CELSelectorExpressionMaxCost specifies the cost limit for a single CEL selector +// evaluation. +// +// There is no overall budget for selecting a device, so the actual time +// required for that is proportional to the number of CEL selectors and how +// often they need to be evaluated, which can vary depending on several factors +// (number of devices, cluster utilization, additional constraints). +// +// Validation against this limit and [CELSelectorExpressionMaxLength] happens +// only when setting an expression for the first time or when changing it. If +// the limits are changed in a future Kubernetes release, existing users are +// guaranteed that existing expressions will continue to be valid. +// +// However, the kube-scheduler also applies this cost limit at runtime, so it +// could happen that a valid expression fails at runtime after an up- or +// downgrade. This can also happen without version skew when the cost estimate +// underestimated the actual cost. That this might happen is the reason why +// kube-scheduler enforces the runtime limit instead of relying on validation. +// +// According to +// https://github.com/kubernetes/kubernetes/blob/4aeaf1e99e82da8334c0d6dddd848a194cd44b4f/staging/src/k8s.io/apiserver/pkg/apis/cel/config.go#L20-L22, +// this gives roughly 0.1 second for each expression evaluation. +// However, this depends on how fast the machine is. +const CELSelectorExpressionMaxCost = 1000000 + +// CELSelectorExpressionMaxLength is the maximum length of a CEL selector expression string. +const CELSelectorExpressionMaxLength = 10 * 1024 + +// DeviceConstraint must have exactly one field set besides Requests. +type DeviceConstraint struct { + // Requests is a list of the one or more requests in this claim which + // must co-satisfy this constraint. If a request is fulfilled by + // multiple devices, then all of the devices must satisfy the + // constraint. If this is not specified, this constraint applies to all + // requests in this claim. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + // MatchAttribute requires that all devices in question have this + // attribute and that its type and value are the same across those + // devices. + // + // For example, if you specified "dra.example.com/numa" (a hypothetical example!), + // then only devices in the same NUMA node will be chosen. A device which + // does not have that attribute will not be chosen. All devices should + // use a value of the same type for this attribute because that is part of + // its specification, but if one device doesn't, then it also will not be + // chosen. + // + // Must include the domain qualifier. + // + // +optional + // +oneOf=ConstraintType + MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"` + + // Potential future extension, not part of the current design: + // A CEL expression which compares different devices and returns + // true if they match. + // + // Because it would be part of a one-of, old schedulers will not + // accidentally ignore this additional, for them unknown match + // criteria. + // + // MatchExpression string +} + +// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim. +type DeviceClaimConfiguration struct { + // Requests lists the names of requests where the configuration applies. + // If empty, it applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"` +} + +// DeviceConfiguration must have exactly one field set. It gets embedded +// inline in some other structs which have other fields, so field names must +// not conflict with those. +type DeviceConfiguration struct { + // Opaque provides driver-specific configuration parameters. + // + // +optional + // +oneOf=ConfigurationType + Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"` +} + +// OpaqueDeviceConfiguration contains configuration parameters for a driver +// in a format defined by the driver vendor. +type OpaqueDeviceConfiguration struct { + // Driver is used to determine which kubelet plugin needs + // to be passed these configuration parameters. + // + // An admission policy provided by the driver developer could use this + // to decide whether it needs to validate them. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,name=driver"` + + // Parameters can contain arbitrary data. It is the responsibility of + // the driver developer to handle validation and versioning. Typically this + // includes self-identification and a version ("kind" + "apiVersion" for + // Kubernetes types), with conversion between different versions. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +required + Parameters runtime.RawExtension `json:"parameters" protobuf:"bytes,2,name=parameters"` +} + +// OpaqueParametersMaxLength is the maximum length of the raw data in an +// [OpaqueDeviceConfiguration.Parameters] field. +const OpaqueParametersMaxLength = 10 * 1024 + +// ResourceClaimStatus tracks whether the resource has been allocated and what +// the result of that was. +type ResourceClaimStatus struct { + // Allocation is set once the claim has been allocated successfully. + // + // +optional + Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"` + + // ReservedFor indicates which entities are currently allowed to use + // the claim. A Pod which references a ResourceClaim which is not + // reserved for that Pod will not be started. A claim that is in + // use or might be in use because it has been reserved must not get + // deallocated. + // + // In a cluster with multiple scheduler instances, two pods might get + // scheduled concurrently by different schedulers. When they reference + // the same ResourceClaim which already has reached its maximum number + // of consumers, only one pod can be scheduled. + // + // Both schedulers try to add their pod to the claim.status.reservedFor + // field, but only the update that reaches the API server first gets + // stored. The other one fails with an error and the scheduler + // which issued it knows that it must put the pod back into the queue, + // waiting for the ResourceClaim to become usable again. + // + // There can be at most 256 such reservations. This may get increased in + // the future, but not reduced. + // + // +optional + // +listType=map + // +listMapKey=uid + // +patchStrategy=merge + // +patchMergeKey=uid + ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` + + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + + // Devices contains the status of each device allocated for this + // claim, as reported by the driver. This can include driver-specific + // information. Entries are owned by their respective drivers. + // + // +optional + // +listType=map + // +listMapKey=driver + // +listMapKey=device + // +listMapKey=pool + // +featureGate=DRAResourceClaimDeviceStatus + Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"` +} + +// ResourceClaimReservedForMaxSize is the maximum number of entries in +// claim.status.reservedFor. +const ResourceClaimReservedForMaxSize = 256 + +// ResourceClaimConsumerReference contains enough information to let you +// locate the consumer of a ResourceClaim. The user must be a resource in the same +// namespace as the ResourceClaim. +type ResourceClaimConsumerReference struct { + // APIGroup is the group for the resource being referenced. It is + // empty for the core API. This matches the group in the APIVersion + // that is used when creating the resources. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"` + // Resource is the type of resource being referenced, for example "pods". + // +required + Resource string `json:"resource" protobuf:"bytes,3,name=resource"` + // Name is the name of resource being referenced. + // +required + Name string `json:"name" protobuf:"bytes,4,name=name"` + // UID identifies exactly one incarnation of the resource. + // +required + UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"` +} + +// AllocationResult contains attributes of an allocated resource. +type AllocationResult struct { + // Devices is the result of allocating devices. + // + // +optional + Devices DeviceAllocationResult `json:"devices,omitempty" protobuf:"bytes,1,opt,name=devices"` + + // NodeSelector defines where the allocated resources are available. If + // unset, they are available everywhere. + // + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` + + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` +} + +// DeviceAllocationResult is the result of allocating devices. +type DeviceAllocationResult struct { + // Results lists all allocated devices. + // + // +optional + // +listType=atomic + Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"` + + // This field is a combination of all the claim and class configuration parameters. + // Drivers can distinguish between those based on a flag. + // + // This includes configuration parameters for drivers which have no allocated + // devices in the result because it is up to the drivers which configuration + // parameters they support. They can silently ignore unknown configuration + // parameters. + // + // +optional + // +listType=atomic + Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// AllocationResultsMaxSize represents the maximum number of +// entries in allocation.devices.results. +const AllocationResultsMaxSize = 32 + +// DeviceRequestAllocationResult contains the allocation result for one request. +type DeviceRequestAllocationResult struct { + // Request is the name of the request in the claim which caused this + // device to be allocated. Multiple devices may have been allocated + // per request. + // + // +required + Request string `json:"request" protobuf:"bytes,1,name=request"` + + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,2,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,3,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,4,name=device"` + + // AdminAccess indicates that this device was allocated for + // administrative access. See the corresponding request field + // for a definition of mode. + // + // This is an alpha field and requires enabling the DRAAdminAccess + // feature gate. Admin access is disabled if this field is unset or + // set to false, otherwise it is enabled. + // + // +optional + // +featureGate=DRAAdminAccess + AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"` +} + +// DeviceAllocationConfiguration gets embedded in an AllocationResult. +type DeviceAllocationConfiguration struct { + // Source records whether the configuration comes from a class and thus + // is not something that a normal user would have been able to set + // or from a claim. + // + // +required + Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"` + + // Requests lists the names of requests where the configuration applies. + // If empty, its applies to all requests. + // + // +optional + // +listType=atomic + Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"` + + DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"` +} + +type AllocationConfigSource string + +// Valid [DeviceAllocationConfiguration.Source] values. +const ( + AllocationConfigSourceClass = "FromClass" + AllocationConfigSourceClaim = "FromClaim" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimList is a collection of claims. +type ResourceClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claims. + Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// DeviceClass is a vendor- or admin-provided resource that contains +// device configuration and selectors. It can be referenced in +// the device requests of a claim to apply these presets. +// Cluster scoped. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type DeviceClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines what can be allocated and how to configure it. + // + // This is mutable. Consumers have to be prepared for classes changing + // at any time, either because they get updated or replaced. Claim + // allocations are done once based on whatever was set in classes at + // the time of allocation. + // + // Changing the spec automatically increments the metadata.generation number. + Spec DeviceClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated +// and how to configure it. +type DeviceClassSpec struct { + // Each selector must be satisfied by a device which is claimed via this class. + // + // +optional + // +listType=atomic + Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"` + + // Config defines configuration parameters that apply to each device that is claimed via this class. + // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor + // configuration applies to exactly one driver. + // + // They are passed to the driver, but are not considered while allocating the claim. + // + // +optional + // +listType=atomic + Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` + + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` +} + +// DeviceClassConfiguration is used in DeviceClass. +type DeviceClassConfiguration struct { + DeviceConfiguration `json:",inline" protobuf:"bytes,1,opt,name=deviceConfiguration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// DeviceClassList is a collection of classes. +type DeviceClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource classes. + Items []DeviceClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimTemplate is used to produce ResourceClaim objects. +// +// This is an alpha type and requires enabling the DynamicResourceAllocation +// feature gate. +type ResourceClaimTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Describes the ResourceClaim that is to be generated. + // + // This field is immutable. A ResourceClaim will get created by the + // control plane for a Pod when needed and then not get updated + // anymore. + Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim. +type ResourceClaimTemplateSpec struct { + // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim + // when creating it. No other fields are allowed and will be rejected during + // validation. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec for the ResourceClaim. The entire content is copied unchanged + // into the ResourceClaim that gets created from this template. The + // same fields as in a ResourceClaim are also valid here. + Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.32 + +// ResourceClaimTemplateList is a collection of claim templates. +type ResourceClaimTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of resource claim templates. + Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AllocatedDeviceStatus contains the status of an allocated device, if the +// driver chooses to report it. This may include driver-specific information. +type AllocatedDeviceStatus struct { + // Driver specifies the name of the DRA driver whose kubelet + // plugin should be invoked to process the allocation once the claim is + // needed on a node. + // + // Must be a DNS subdomain and should end with a DNS domain owned by the + // vendor of the driver. + // + // +required + Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"` + + // This name together with the driver name and the device name field + // identify which device was allocated (`//`). + // + // Must not be longer than 253 characters and may contain one or more + // DNS sub-domains separated by slashes. + // + // +required + Pool string `json:"pool" protobuf:"bytes,2,rep,name=pool"` + + // Device references one device instance via its name in the driver's + // resource pool. It must be a DNS label. + // + // +required + Device string `json:"device" protobuf:"bytes,3,rep,name=device"` + + // Conditions contains the latest observation of the device's state. + // If the device has been configured according to the class and claim + // config references, the `Ready` condition should be True. + // + // +optional + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,4,opt,name=conditions"` + + // Data contains arbitrary driver-specific data. + // + // The length of the raw data must be smaller or equal to 10 Ki. + // + // +optional + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,5,opt,name=data"` + + // NetworkData contains network-related information specific to the device. + // + // +optional + NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"` +} + +// NetworkDeviceData provides network-related details for the allocated device. +// This information may be filled by drivers or other components to configure +// or identify the device within a network context. +type NetworkDeviceData struct { + // InterfaceName specifies the name of the network interface associated with + // the allocated device. This might be the name of a physical or virtual + // network interface being configured in the pod. + // + // Must not be longer than 256 characters. + // + // +optional + InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"` + + // IPs lists the network addresses assigned to the device's network interface. + // This can include both IPv4 and IPv6 addresses. + // The IPs are in the CIDR notation, which includes both the address and the + // associated subnet mask. + // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6. + // + // +optional + // +listType=atomic + IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"` + + // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface. + // + // Must not be longer than 128 characters. + // + // +optional + HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"` +} diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..4ecc35d08 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,386 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllocatedDeviceStatus = map[string]string{ + "": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "conditions": "Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True.", + "data": "Data contains arbitrary driver-specific data.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", + "networkData": "NetworkData contains network-related information specific to the device.", +} + +func (AllocatedDeviceStatus) SwaggerDoc() map[string]string { + return map_AllocatedDeviceStatus +} + +var map_AllocationResult = map[string]string{ + "": "AllocationResult contains attributes of an allocated resource.", + "devices": "Devices is the result of allocating devices.", + "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", +} + +func (AllocationResult) SwaggerDoc() map[string]string { + return map_AllocationResult +} + +var map_BasicDevice = map[string]string{ + "": "BasicDevice defines one device instance.", + "attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", + "capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.", +} + +func (BasicDevice) SwaggerDoc() map[string]string { + return map_BasicDevice +} + +var map_CELDeviceSelector = map[string]string{ + "": "CELDeviceSelector contains a CEL expression for selecting a device.", + "expression": "Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort.\n\nThe expression's input is an object named \"device\", which carries the following properties:\n - driver (string): the name of the driver which defines this device.\n - attributes (map[string]object): the device's attributes, grouped by prefix\n (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all\n of the attributes which were prefixed by \"dra.example.com\".\n - capacity (map[string]object): the device's capacities, grouped by prefix.\n\nExample: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields:\n\n device.driver\n device.attributes[\"dra.example.com\"].model\n device.attributes[\"ext.example.com\"].family\n device.capacity[\"dra.example.com\"].modules\n\nThe device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers.\n\nThe value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity.\n\nIf an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort.\n\nA robust expression should check for the existence of attributes before referencing them.\n\nFor ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example:\n\n cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool)\n\nThe length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps.", +} + +func (CELDeviceSelector) SwaggerDoc() map[string]string { + return map_CELDeviceSelector +} + +var map_Device = map[string]string{ + "": "Device represents one individual hardware instance that can be selected based on its attributes. Besides the name, exactly one field must be set.", + "name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.", + "basic": "Basic defines one device instance.", +} + +func (Device) SwaggerDoc() map[string]string { + return map_Device +} + +var map_DeviceAllocationConfiguration = map[string]string{ + "": "DeviceAllocationConfiguration gets embedded in an AllocationResult.", + "source": "Source records whether the configuration comes from a class and thus is not something that a normal user would have been able to set or from a claim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, its applies to all requests.", +} + +func (DeviceAllocationConfiguration) SwaggerDoc() map[string]string { + return map_DeviceAllocationConfiguration +} + +var map_DeviceAllocationResult = map[string]string{ + "": "DeviceAllocationResult is the result of allocating devices.", + "results": "Results lists all allocated devices.", + "config": "This field is a combination of all the claim and class configuration parameters. Drivers can distinguish between those based on a flag.\n\nThis includes configuration parameters for drivers which have no allocated devices in the result because it is up to the drivers which configuration parameters they support. They can silently ignore unknown configuration parameters.", +} + +func (DeviceAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceAllocationResult +} + +var map_DeviceAttribute = map[string]string{ + "": "DeviceAttribute must have exactly one field set.", + "int": "IntValue is a number.", + "bool": "BoolValue is a true/false value.", + "string": "StringValue is a string. Must not be longer than 64 characters.", + "version": "VersionValue is a semantic version according to semver.org spec 2.0.0. Must not be longer than 64 characters.", +} + +func (DeviceAttribute) SwaggerDoc() map[string]string { + return map_DeviceAttribute +} + +var map_DeviceCapacity = map[string]string{ + "": "DeviceCapacity describes a quantity associated with a device.", + "value": "Value defines how much of a certain device capacity is available.", +} + +func (DeviceCapacity) SwaggerDoc() map[string]string { + return map_DeviceCapacity +} + +var map_DeviceClaim = map[string]string{ + "": "DeviceClaim defines how to request devices with a ResourceClaim.", + "requests": "Requests represent individual requests for distinct devices which must all be satisfied. If empty, nothing needs to be allocated.", + "constraints": "These constraints must be satisfied by the set of devices that get allocated for the claim.", + "config": "This field holds configuration for multiple potential drivers which could satisfy requests in this claim. It is ignored while allocating the claim.", +} + +func (DeviceClaim) SwaggerDoc() map[string]string { + return map_DeviceClaim +} + +var map_DeviceClaimConfiguration = map[string]string{ + "": "DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.", + "requests": "Requests lists the names of requests where the configuration applies. If empty, it applies to all requests.", +} + +func (DeviceClaimConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClaimConfiguration +} + +var map_DeviceClass = map[string]string{ + "": "DeviceClass is a vendor- or admin-provided resource that contains device configuration and selectors. It can be referenced in the device requests of a claim to apply these presets. Cluster scoped.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec defines what can be allocated and how to configure it.\n\nThis is mutable. Consumers have to be prepared for classes changing at any time, either because they get updated or replaced. Claim allocations are done once based on whatever was set in classes at the time of allocation.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (DeviceClass) SwaggerDoc() map[string]string { + return map_DeviceClass +} + +var map_DeviceClassConfiguration = map[string]string{ + "": "DeviceClassConfiguration is used in DeviceClass.", +} + +func (DeviceClassConfiguration) SwaggerDoc() map[string]string { + return map_DeviceClassConfiguration +} + +var map_DeviceClassList = map[string]string{ + "": "DeviceClassList is a collection of classes.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource classes.", +} + +func (DeviceClassList) SwaggerDoc() map[string]string { + return map_DeviceClassList +} + +var map_DeviceClassSpec = map[string]string{ + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", +} + +func (DeviceClassSpec) SwaggerDoc() map[string]string { + return map_DeviceClassSpec +} + +var map_DeviceConfiguration = map[string]string{ + "": "DeviceConfiguration must have exactly one field set. It gets embedded inline in some other structs which have other fields, so field names must not conflict with those.", + "opaque": "Opaque provides driver-specific configuration parameters.", +} + +func (DeviceConfiguration) SwaggerDoc() map[string]string { + return map_DeviceConfiguration +} + +var map_DeviceConstraint = map[string]string{ + "": "DeviceConstraint must have exactly one field set besides Requests.", + "requests": "Requests is a list of the one or more requests in this claim which must co-satisfy this constraint. If a request is fulfilled by multiple devices, then all of the devices must satisfy the constraint. If this is not specified, this constraint applies to all requests in this claim.", + "matchAttribute": "MatchAttribute requires that all devices in question have this attribute and that its type and value are the same across those devices.\n\nFor example, if you specified \"dra.example.com/numa\" (a hypothetical example!), then only devices in the same NUMA node will be chosen. A device which does not have that attribute will not be chosen. All devices should use a value of the same type for this attribute because that is part of its specification, but if one device doesn't, then it also will not be chosen.\n\nMust include the domain qualifier.", +} + +func (DeviceConstraint) SwaggerDoc() map[string]string { + return map_DeviceConstraint +} + +var map_DeviceRequest = map[string]string{ + "": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.", + "name": "Name can be used to reference this request in a pod.spec.containers[].resources.claims entry and in a constraint of the claim.\n\nMust be a DNS label.", + "deviceClassName": "DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request.\n\nA class is required. Which classes are available depends on the cluster.\n\nAdministrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference.", + "selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.", + "allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.", + "count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.", + "adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequest) SwaggerDoc() map[string]string { + return map_DeviceRequest +} + +var map_DeviceRequestAllocationResult = map[string]string{ + "": "DeviceRequestAllocationResult contains the allocation result for one request.", + "request": "Request is the name of the request in the claim which caused this device to be allocated. Multiple devices may have been allocated per request.", + "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.", + "device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.", + "adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.", +} + +func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string { + return map_DeviceRequestAllocationResult +} + +var map_DeviceSelector = map[string]string{ + "": "DeviceSelector must have exactly one field set.", + "cel": "CEL contains a CEL expression for selecting a device.", +} + +func (DeviceSelector) SwaggerDoc() map[string]string { + return map_DeviceSelector +} + +var map_NetworkDeviceData = map[string]string{ + "": "NetworkDeviceData provides network-related details for the allocated device. This information may be filled by drivers or other components to configure or identify the device within a network context.", + "interfaceName": "InterfaceName specifies the name of the network interface associated with the allocated device. This might be the name of a physical or virtual network interface being configured in the pod.\n\nMust not be longer than 256 characters.", + "ips": "IPs lists the network addresses assigned to the device's network interface. This can include both IPv4 and IPv6 addresses. The IPs are in the CIDR notation, which includes both the address and the associated subnet mask. e.g.: \"192.0.2.5/24\" for IPv4 and \"2001:db8::5/64\" for IPv6.", + "hardwareAddress": "HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.\n\nMust not be longer than 128 characters.", +} + +func (NetworkDeviceData) SwaggerDoc() map[string]string { + return map_NetworkDeviceData +} + +var map_OpaqueDeviceConfiguration = map[string]string{ + "": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.", + "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.", + "parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.", +} + +func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { + return map_OpaqueDeviceConfiguration +} + +var map_ResourceClaim = map[string]string{ + "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Spec describes what is being requested and how to configure it. The spec is immutable.", + "status": "Status describes whether the claim is ready to use and what has been allocated.", +} + +func (ResourceClaim) SwaggerDoc() map[string]string { + return map_ResourceClaim +} + +var map_ResourceClaimConsumerReference = map[string]string{ + "": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "resource": "Resource is the type of resource being referenced, for example \"pods\".", + "name": "Name is the name of resource being referenced.", + "uid": "UID identifies exactly one incarnation of the resource.", +} + +func (ResourceClaimConsumerReference) SwaggerDoc() map[string]string { + return map_ResourceClaimConsumerReference +} + +var map_ResourceClaimList = map[string]string{ + "": "ResourceClaimList is a collection of claims.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claims.", +} + +func (ResourceClaimList) SwaggerDoc() map[string]string { + return map_ResourceClaimList +} + +var map_ResourceClaimSpec = map[string]string{ + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", +} + +func (ResourceClaimSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimSpec +} + +var map_ResourceClaimStatus = map[string]string{ + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.", + "devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.", +} + +func (ResourceClaimStatus) SwaggerDoc() map[string]string { + return map_ResourceClaimStatus +} + +var map_ResourceClaimTemplate = map[string]string{ + "": "ResourceClaimTemplate is used to produce ResourceClaim objects.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", +} + +func (ResourceClaimTemplate) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplate +} + +var map_ResourceClaimTemplateList = map[string]string{ + "": "ResourceClaimTemplateList is a collection of claim templates.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource claim templates.", +} + +func (ResourceClaimTemplateList) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateList +} + +var map_ResourceClaimTemplateSpec = map[string]string{ + "": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "metadata": "ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim when creating it. No other fields are allowed and will be rejected during validation.", + "spec": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", +} + +func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string { + return map_ResourceClaimTemplateSpec +} + +var map_ResourcePool = map[string]string{ + "": "ResourcePool describes the pool that ResourceSlices belong to.", + "name": "Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required.\n\nIt must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable.", + "generation": "Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted.\n\nCombined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state.", + "resourceSliceCount": "ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero.\n\nConsumers can use this to check whether they have seen all ResourceSlices belonging to the same pool.", +} + +func (ResourcePool) SwaggerDoc() map[string]string { + return map_ResourcePool +} + +var map_ResourceSlice = map[string]string{ + "": "ResourceSlice represents one or more resources in a pool of similar resources, managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many ResourceSlices comprise a pool is determined by the driver.\n\nAt the moment, the only supported resources are devices with attributes and capacities. Each device in a given pool, regardless of how many ResourceSlices, must have a unique name. The ResourceSlice in which a device gets published may change over time. The unique identifier for a device is the tuple , , .\n\nWhenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number and updates all ResourceSlices with that new number and new resource definitions. A consumer must only use ResourceSlices with the highest generation number and ignore all others.\n\nWhen allocating all resources in a pool matching certain criteria or when looking for the best solution among several different alternatives, a consumer should check the number of ResourceSlices in a pool (included in each ResourceSlice) to determine whether its view of a pool is complete and if not, should wait until the driver has completed updating the pool.\n\nFor resources that are not local to a node, the node name is not set. Instead, the driver may use a node selector to specify where the devices are available.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "metadata": "Standard object metadata", + "spec": "Contains the information published by the driver.\n\nChanging the spec automatically increments the metadata.generation number.", +} + +func (ResourceSlice) SwaggerDoc() map[string]string { + return map_ResourceSlice +} + +var map_ResourceSliceList = map[string]string{ + "": "ResourceSliceList is a collection of ResourceSlices.", + "metadata": "Standard list metadata", + "items": "Items is the list of resource ResourceSlices.", +} + +func (ResourceSliceList) SwaggerDoc() map[string]string { + return map_ResourceSliceList +} + +var map_ResourceSliceSpec = map[string]string{ + "": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.", + "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.", + "pool": "Pool describes the pool that this ResourceSlice belongs to.", + "nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set. This field is immutable.", + "nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector and AllNodes must be set.", + "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.", +} + +func (ResourceSliceSpec) SwaggerDoc() map[string]string { + return map_ResourceSliceSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..3be61333f --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,882 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocatedDeviceStatus) DeepCopyInto(out *AllocatedDeviceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Data.DeepCopyInto(&out.Data) + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(NetworkDeviceData) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocatedDeviceStatus. +func (in *AllocatedDeviceStatus) DeepCopy() *AllocatedDeviceStatus { + if in == nil { + return nil + } + out := new(AllocatedDeviceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllocationResult) DeepCopyInto(out *AllocationResult) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResult. +func (in *AllocationResult) DeepCopy() *AllocationResult { + if in == nil { + return nil + } + out := new(AllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicDevice) DeepCopyInto(out *BasicDevice) { + *out = *in + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = make(map[QualifiedName]DeviceAttribute, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(map[QualifiedName]DeviceCapacity, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicDevice. +func (in *BasicDevice) DeepCopy() *BasicDevice { + if in == nil { + return nil + } + out := new(BasicDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CELDeviceSelector) DeepCopyInto(out *CELDeviceSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CELDeviceSelector. +func (in *CELDeviceSelector) DeepCopy() *CELDeviceSelector { + if in == nil { + return nil + } + out := new(CELDeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Device) DeepCopyInto(out *Device) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BasicDevice) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. +func (in *Device) DeepCopy() *Device { + if in == nil { + return nil + } + out := new(Device) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationConfiguration) DeepCopyInto(out *DeviceAllocationConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationConfiguration. +func (in *DeviceAllocationConfiguration) DeepCopy() *DeviceAllocationConfiguration { + if in == nil { + return nil + } + out := new(DeviceAllocationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAllocationResult) DeepCopyInto(out *DeviceAllocationResult) { + *out = *in + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]DeviceRequestAllocationResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceAllocationConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAllocationResult. +func (in *DeviceAllocationResult) DeepCopy() *DeviceAllocationResult { + if in == nil { + return nil + } + out := new(DeviceAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceAttribute) DeepCopyInto(out *DeviceAttribute) { + *out = *in + if in.IntValue != nil { + in, out := &in.IntValue, &out.IntValue + *out = new(int64) + **out = **in + } + if in.BoolValue != nil { + in, out := &in.BoolValue, &out.BoolValue + *out = new(bool) + **out = **in + } + if in.StringValue != nil { + in, out := &in.StringValue, &out.StringValue + *out = new(string) + **out = **in + } + if in.VersionValue != nil { + in, out := &in.VersionValue, &out.VersionValue + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceAttribute. +func (in *DeviceAttribute) DeepCopy() *DeviceAttribute { + if in == nil { + return nil + } + out := new(DeviceAttribute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceCapacity) DeepCopyInto(out *DeviceCapacity) { + *out = *in + out.Value = in.Value.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceCapacity. +func (in *DeviceCapacity) DeepCopy() *DeviceCapacity { + if in == nil { + return nil + } + out := new(DeviceCapacity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaim) DeepCopyInto(out *DeviceClaim) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]DeviceRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Constraints != nil { + in, out := &in.Constraints, &out.Constraints + *out = make([]DeviceConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClaimConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaim. +func (in *DeviceClaim) DeepCopy() *DeviceClaim { + if in == nil { + return nil + } + out := new(DeviceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClaimConfiguration) DeepCopyInto(out *DeviceClaimConfiguration) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClaimConfiguration. +func (in *DeviceClaimConfiguration) DeepCopy() *DeviceClaimConfiguration { + if in == nil { + return nil + } + out := new(DeviceClaimConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClass) DeepCopyInto(out *DeviceClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClass. +func (in *DeviceClass) DeepCopy() *DeviceClass { + if in == nil { + return nil + } + out := new(DeviceClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassConfiguration) DeepCopyInto(out *DeviceClassConfiguration) { + *out = *in + in.DeviceConfiguration.DeepCopyInto(&out.DeviceConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassConfiguration. +func (in *DeviceClassConfiguration) DeepCopy() *DeviceClassConfiguration { + if in == nil { + return nil + } + out := new(DeviceClassConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassList) DeepCopyInto(out *DeviceClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeviceClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassList. +func (in *DeviceClassList) DeepCopy() *DeviceClassList { + if in == nil { + return nil + } + out := new(DeviceClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeviceClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make([]DeviceClassConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceClassSpec. +func (in *DeviceClassSpec) DeepCopy() *DeviceClassSpec { + if in == nil { + return nil + } + out := new(DeviceClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConfiguration) DeepCopyInto(out *DeviceConfiguration) { + *out = *in + if in.Opaque != nil { + in, out := &in.Opaque, &out.Opaque + *out = new(OpaqueDeviceConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConfiguration. +func (in *DeviceConfiguration) DeepCopy() *DeviceConfiguration { + if in == nil { + return nil + } + out := new(DeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceConstraint) DeepCopyInto(out *DeviceConstraint) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MatchAttribute != nil { + in, out := &in.MatchAttribute, &out.MatchAttribute + *out = new(FullyQualifiedName) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceConstraint. +func (in *DeviceConstraint) DeepCopy() *DeviceConstraint { + if in == nil { + return nil + } + out := new(DeviceConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequest) DeepCopyInto(out *DeviceRequest) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]DeviceSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequest. +func (in *DeviceRequest) DeepCopy() *DeviceRequest { + if in == nil { + return nil + } + out := new(DeviceRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceRequestAllocationResult) DeepCopyInto(out *DeviceRequestAllocationResult) { + *out = *in + if in.AdminAccess != nil { + in, out := &in.AdminAccess, &out.AdminAccess + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceRequestAllocationResult. +func (in *DeviceRequestAllocationResult) DeepCopy() *DeviceRequestAllocationResult { + if in == nil { + return nil + } + out := new(DeviceRequestAllocationResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeviceSelector) DeepCopyInto(out *DeviceSelector) { + *out = *in + if in.CEL != nil { + in, out := &in.CEL, &out.CEL + *out = new(CELDeviceSelector) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceSelector. +func (in *DeviceSelector) DeepCopy() *DeviceSelector { + if in == nil { + return nil + } + out := new(DeviceSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceData) DeepCopyInto(out *NetworkDeviceData) { + *out = *in + if in.IPs != nil { + in, out := &in.IPs, &out.IPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceData. +func (in *NetworkDeviceData) DeepCopy() *NetworkDeviceData { + if in == nil { + return nil + } + out := new(NetworkDeviceData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpaqueDeviceConfiguration) DeepCopyInto(out *OpaqueDeviceConfiguration) { + *out = *in + in.Parameters.DeepCopyInto(&out.Parameters) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpaqueDeviceConfiguration. +func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { + if in == nil { + return nil + } + out := new(OpaqueDeviceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. +func (in *ResourceClaim) DeepCopy() *ResourceClaim { + if in == nil { + return nil + } + out := new(ResourceClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimConsumerReference) DeepCopyInto(out *ResourceClaimConsumerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimConsumerReference. +func (in *ResourceClaimConsumerReference) DeepCopy() *ResourceClaimConsumerReference { + if in == nil { + return nil + } + out := new(ResourceClaimConsumerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimList) DeepCopyInto(out *ResourceClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimList. +func (in *ResourceClaimList) DeepCopy() *ResourceClaimList { + if in == nil { + return nil + } + out := new(ResourceClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { + *out = *in + in.Devices.DeepCopyInto(&out.Devices) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSpec. +func (in *ResourceClaimSpec) DeepCopy() *ResourceClaimSpec { + if in == nil { + return nil + } + out := new(ResourceClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimStatus) DeepCopyInto(out *ResourceClaimStatus) { + *out = *in + if in.Allocation != nil { + in, out := &in.Allocation, &out.Allocation + *out = new(AllocationResult) + (*in).DeepCopyInto(*out) + } + if in.ReservedFor != nil { + in, out := &in.ReservedFor, &out.ReservedFor + *out = make([]ResourceClaimConsumerReference, len(*in)) + copy(*out, *in) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]AllocatedDeviceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimStatus. +func (in *ResourceClaimStatus) DeepCopy() *ResourceClaimStatus { + if in == nil { + return nil + } + out := new(ResourceClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplate) DeepCopyInto(out *ResourceClaimTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplate. +func (in *ResourceClaimTemplate) DeepCopy() *ResourceClaimTemplate { + if in == nil { + return nil + } + out := new(ResourceClaimTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateList) DeepCopyInto(out *ResourceClaimTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateList. +func (in *ResourceClaimTemplateList) DeepCopy() *ResourceClaimTemplateList { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceClaimTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceClaimTemplateSpec) DeepCopyInto(out *ResourceClaimTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimTemplateSpec. +func (in *ResourceClaimTemplateSpec) DeepCopy() *ResourceClaimTemplateSpec { + if in == nil { + return nil + } + out := new(ResourceClaimTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePool) DeepCopyInto(out *ResourcePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePool. +func (in *ResourcePool) DeepCopy() *ResourcePool { + if in == nil { + return nil + } + out := new(ResourcePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice. +func (in *ResourceSlice) DeepCopy() *ResourceSlice { + if in == nil { + return nil + } + out := new(ResourceSlice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSlice) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList. +func (in *ResourceSliceList) DeepCopy() *ResourceSliceList { + if in == nil { + return nil + } + out := new(ResourceSliceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceSliceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSliceSpec) DeepCopyInto(out *ResourceSliceSpec) { + *out = *in + out.Pool = in.Pool + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(corev1.NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]Device, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceSpec. +func (in *ResourceSliceSpec) DeepCopy() *ResourceSliceSpec { + if in == nil { + return nil + } + out := new(ResourceSliceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..b79111b81 --- /dev/null +++ b/constraint/vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,166 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClass) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClass) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClass) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeviceClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeviceClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaim) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaim) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplate) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplate) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceClaimTemplateList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceClaimTemplateList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSlice) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSlice) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSlice) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ResourceSliceList) APILifecycleIntroduced() (major, minor int) { + return 1, 32 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ResourceSliceList) APILifecycleDeprecated() (major, minor int) { + return 1, 35 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ResourceSliceList) APILifecycleRemoved() (major, minor int) { + return 1, 38 +} diff --git a/constraint/vendor/k8s.io/api/scheduling/v1/doc.go b/constraint/vendor/k8s.io/api/scheduling/v1/doc.go index 76c4da002..ee3c66847 100644 --- a/constraint/vendor/k8s.io/api/scheduling/v1/doc.go +++ b/constraint/vendor/k8s.io/api/scheduling/v1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true - +// +k8s:prerelease-lifecycle-gen=true // +groupName=scheduling.k8s.io package v1 // import "k8s.io/api/scheduling/v1" diff --git a/constraint/vendor/k8s.io/api/scheduling/v1/generated.proto b/constraint/vendor/k8s.io/api/scheduling/v1/generated.proto index c1a27e8ba..374e68238 100644 --- a/constraint/vendor/k8s.io/api/scheduling/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -35,7 +35,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -66,7 +66,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/constraint/vendor/k8s.io/api/scheduling/v1/types.go b/constraint/vendor/k8s.io/api/scheduling/v1/types.go index 146bae40d..019dbcd00 100644 --- a/constraint/vendor/k8s.io/api/scheduling/v1/types.go +++ b/constraint/vendor/k8s.io/api/scheduling/v1/types.go @@ -24,6 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. @@ -59,6 +60,7 @@ type PriorityClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.14 // PriorityClassList is a collection of priority classes. type PriorityClassList struct { diff --git a/constraint/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a4a432a64 --- /dev/null +++ b/constraint/vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,34 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClass) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 14 +} diff --git a/constraint/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index f0878fb16..e42dccc68 100644 --- a/constraint/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/constraint/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 43878184d..7f77b0175 100644 --- a/constraint/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -36,7 +36,7 @@ message PriorityClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // value represents the integer value of this priority class. This is the actual priority that pods // receive when they have the name of this class in their pod spec. @@ -67,7 +67,7 @@ message PriorityClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of PriorityClasses repeated PriorityClass items = 2; diff --git a/constraint/vendor/k8s.io/api/storage/v1/doc.go b/constraint/vendor/k8s.io/api/storage/v1/doc.go index 75a6489da..e2310dac2 100644 --- a/constraint/vendor/k8s.io/api/storage/v1/doc.go +++ b/constraint/vendor/k8s.io/api/storage/v1/doc.go @@ -18,5 +18,6 @@ limitations under the License. // +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true package v1 // import "k8s.io/api/storage/v1" diff --git a/constraint/vendor/k8s.io/api/storage/v1/generated.proto b/constraint/vendor/k8s.io/api/storage/v1/generated.proto index 06bbe3d5c..dfc309bb4 100644 --- a/constraint/vendor/k8s.io/api/storage/v1/generated.proto +++ b/constraint/vendor/k8s.io/api/storage/v1/generated.proto @@ -44,7 +44,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -55,7 +55,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -226,7 +226,7 @@ message CSIDriverSpec { message CSINode { // Standard object's metadata. // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -275,7 +275,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -327,7 +327,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -336,7 +336,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -356,7 +356,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -370,7 +370,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -378,7 +378,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -393,7 +393,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -431,7 +431,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -439,7 +439,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -466,7 +466,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -484,15 +484,15 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. @@ -506,7 +506,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -554,7 +554,7 @@ message VolumeAttachmentStatus { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/constraint/vendor/k8s.io/api/storage/v1/types.go b/constraint/vendor/k8s.io/api/storage/v1/types.go index a94c7f44c..3936dc83b 100644 --- a/constraint/vendor/k8s.io/api/storage/v1/types.go +++ b/constraint/vendor/k8s.io/api/storage/v1/types.go @@ -25,6 +25,7 @@ import ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. @@ -79,6 +80,7 @@ type StorageClass struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.6 // StorageClassList is a collection of storage classes. type StorageClassList struct { @@ -112,6 +114,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. @@ -137,6 +140,7 @@ type VolumeAttachment struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.13 // VolumeAttachmentList is a collection of VolumeAttachment objects. type VolumeAttachmentList struct { @@ -165,8 +169,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. @@ -227,6 +231,7 @@ type VolumeError struct { // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriver captures information about a Container Storage Interface (CSI) // volume driver deployed on the cluster. @@ -251,6 +256,7 @@ type CSIDriver struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 // CSIDriverList is a collection of CSIDriver objects. type CSIDriverList struct { @@ -427,7 +433,7 @@ const ( // ReadWriteOnceWithFSTypeFSGroupPolicy indicates that each volume will be examined // to determine if the volume ownership and permissions // should be modified. If a fstype is defined and the volume's access mode - // contains ReadWriteOnce, then the defined fsGroup will be applied. + // contains ReadWriteOnce or ReadWriteOncePod, then the defined fsGroup will be applied. // This mode should be defined if it's expected that the // fsGroup may need to be modified depending on the pod's SecurityPolicy. // This is the default behavior if no other FSGroupPolicy is defined. @@ -491,6 +497,7 @@ const ( // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINode holds information about all CSI drivers installed on a node. // CSI drivers do not need to create the CSINode object directly. As long as @@ -572,6 +579,7 @@ type VolumeNodeResources struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.17 // CSINodeList is a collection of CSINode objects. type CSINodeList struct { @@ -588,6 +596,7 @@ type CSINodeList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacity stores the result of one CSI GetCapacity call. // For a given StorageClass, this describes the available capacity in a @@ -673,6 +682,7 @@ type CSIStorageCapacity struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.24 // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. type CSIStorageCapacityList struct { diff --git a/constraint/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 89b1cbb20..eee18bd18 100644 --- a/constraint/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } diff --git a/constraint/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..a44c1181a --- /dev/null +++ b/constraint/vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,82 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriver) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIDriverList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINode) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSINodeList) APILifecycleIntroduced() (major, minor int) { + return 1, 17 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacity) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CSIStorageCapacityList) APILifecycleIntroduced() (major, minor int) { + return 1, 24 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClass) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *StorageClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 6 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachment) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttachmentList) APILifecycleIntroduced() (major, minor int) { + return 1, 13 +} diff --git a/constraint/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 93aefd933..79acbebd8 100644 --- a/constraint/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -65,7 +65,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -74,7 +74,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -94,7 +94,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -108,7 +108,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -116,7 +116,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -130,7 +130,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -148,15 +148,15 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. @@ -170,7 +170,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is alpha-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -221,7 +221,7 @@ message VolumeAttributesClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Name of the CSI driver // This field is immutable. @@ -248,7 +248,7 @@ message VolumeAttributesClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttributesClass objects. repeated VolumeAttributesClass items = 2; @@ -258,7 +258,7 @@ message VolumeAttributesClassList { message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string maybe logged, so it should not contain sensitive diff --git a/constraint/vendor/k8s.io/api/storage/v1alpha1/types.go b/constraint/vendor/k8s.io/api/storage/v1alpha1/types.go index 1fbf65f81..7ef7353eb 100644 --- a/constraint/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -84,8 +84,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. diff --git a/constraint/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index ac87dbdca..e44f37b2d 100644 --- a/constraint/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -72,7 +72,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/constraint/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index c503ec651..446a40c48 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -524,10 +524,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } +func (*VolumeAttributesClass) ProtoMessage() {} +func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{17} +} +func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClass.Merge(m, src) +} +func (m *VolumeAttributesClass) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClass) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo + +func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } +func (*VolumeAttributesClassList) ProtoMessage() {} +func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_73e4f72503e71065, []int{18} +} +func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) +} +func (m *VolumeAttributesClassList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo + func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_73e4f72503e71065, []int{17} + return fileDescriptor_73e4f72503e71065, []int{19} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +611,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } func (*VolumeNodeResources) ProtoMessage() {} func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_73e4f72503e71065, []int{18} + return fileDescriptor_73e4f72503e71065, []int{20} } func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -600,6 +656,9 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry") + proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") } @@ -609,111 +668,115 @@ func init() { } var fileDescriptor_73e4f72503e71065 = []byte{ - // 1655 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xcf, 0xc6, 0xce, 0xdb, 0x38, 0x69, 0x92, 0x49, 0xda, 0xbf, 0xff, 0x3e, 0xd8, 0x91, 0x11, - 0x34, 0xad, 0xca, 0xba, 0x0d, 0xa5, 0xaa, 0x2a, 0x55, 0x22, 0x9b, 0x04, 0xea, 0x36, 0x4e, 0xd3, - 0x71, 0x54, 0x55, 0x15, 0x07, 0xc6, 0xeb, 0x89, 0x33, 0x8d, 0xf7, 0xa5, 0x3b, 0xe3, 0x10, 0x73, - 0x82, 0x0b, 0x67, 0xc4, 0x81, 0x4f, 0xc0, 0x57, 0x00, 0x09, 0x2e, 0x1c, 0xa9, 0x84, 0x84, 0x2a, - 0x2e, 0xf4, 0x64, 0x51, 0xf3, 0x11, 0x90, 0x38, 0x44, 0x1c, 0xd0, 0xcc, 0x8e, 0xbd, 0x6f, 0x76, - 0x93, 0x70, 0xf0, 0xcd, 0xf3, 0xbc, 0xfc, 0x9e, 0x67, 0xe6, 0x79, 0x5d, 0x83, 0xab, 0x87, 0xb7, - 0x99, 0x4e, 0x9d, 0x12, 0x76, 0x69, 0x89, 0x71, 0xc7, 0xc3, 0x0d, 0x52, 0x3a, 0xba, 0x51, 0x23, - 0x1c, 0xdf, 0x28, 0x35, 0x88, 0x4d, 0x3c, 0xcc, 0x49, 0x5d, 0x77, 0x3d, 0x87, 0x3b, 0x30, 0xe7, - 0xcb, 0xea, 0xd8, 0xa5, 0xba, 0x92, 0xd5, 0x95, 0x6c, 0xee, 0xdd, 0x06, 0xe5, 0x07, 0xad, 0x9a, - 0x6e, 0x3a, 0x56, 0xa9, 0xe1, 0x34, 0x9c, 0x92, 0x54, 0xa9, 0xb5, 0xf6, 0xe5, 0x49, 0x1e, 0xe4, - 0x2f, 0x1f, 0x2a, 0x57, 0x0c, 0x99, 0x35, 0x1d, 0x4f, 0xd8, 0x8c, 0x9b, 0xcb, 0xdd, 0x0c, 0x64, - 0x2c, 0x6c, 0x1e, 0x50, 0x9b, 0x78, 0xed, 0x92, 0x7b, 0xd8, 0x90, 0x4a, 0x1e, 0x61, 0x4e, 0xcb, - 0x33, 0xc9, 0xb9, 0xb4, 0x58, 0xc9, 0x22, 0x1c, 0x0f, 0xb2, 0x55, 0x1a, 0xa6, 0xe5, 0xb5, 0x6c, - 0x4e, 0xad, 0xa4, 0x99, 0x5b, 0xa7, 0x29, 0x30, 0xf3, 0x80, 0x58, 0x38, 0xae, 0x57, 0xfc, 0x51, - 0x03, 0x33, 0x1b, 0xd5, 0xf2, 0xa6, 0x47, 0x8f, 0x88, 0x07, 0x3f, 0x01, 0xd3, 0xc2, 0xa3, 0x3a, - 0xe6, 0x38, 0xab, 0xad, 0x68, 0xab, 0x99, 0xb5, 0xeb, 0x7a, 0xf0, 0xc8, 0x7d, 0x60, 0xdd, 0x3d, - 0x6c, 0x08, 0x02, 0xd3, 0x85, 0xb4, 0x7e, 0x74, 0x43, 0x7f, 0x58, 0x7b, 0x46, 0x4c, 0x5e, 0x21, - 0x1c, 0x1b, 0xf0, 0x45, 0xa7, 0x30, 0xd6, 0xed, 0x14, 0x40, 0x40, 0x43, 0x7d, 0x54, 0xf8, 0x00, - 0xa4, 0x99, 0x4b, 0xcc, 0xec, 0xb8, 0x44, 0xbf, 0xa2, 0x0f, 0x0f, 0xa1, 0xde, 0x77, 0xab, 0xea, - 0x12, 0xd3, 0x98, 0x55, 0xb0, 0x69, 0x71, 0x42, 0x12, 0xa4, 0xf8, 0x83, 0x06, 0xe6, 0xfa, 0x52, - 0xdb, 0x94, 0x71, 0xf8, 0x71, 0xe2, 0x02, 0xfa, 0xd9, 0x2e, 0x20, 0xb4, 0xa5, 0xfb, 0x0b, 0xca, - 0xce, 0x74, 0x8f, 0x12, 0x72, 0xfe, 0x3e, 0x98, 0xa0, 0x9c, 0x58, 0x2c, 0x3b, 0xbe, 0x92, 0x5a, - 0xcd, 0xac, 0xbd, 0x7d, 0x26, 0xef, 0x8d, 0x39, 0x85, 0x38, 0x51, 0x16, 0xba, 0xc8, 0x87, 0x28, - 0xfe, 0x9e, 0x0e, 0xf9, 0x2e, 0xee, 0x04, 0xef, 0x80, 0x0b, 0x98, 0x73, 0x6c, 0x1e, 0x20, 0xf2, - 0xbc, 0x45, 0x3d, 0x52, 0x97, 0x37, 0x98, 0x36, 0x60, 0xb7, 0x53, 0xb8, 0xb0, 0x1e, 0xe1, 0xa0, - 0x98, 0xa4, 0xd0, 0x75, 0x9d, 0x7a, 0xd9, 0xde, 0x77, 0x1e, 0xda, 0x15, 0xa7, 0x65, 0x73, 0xf9, - 0xc0, 0x4a, 0x77, 0x37, 0xc2, 0x41, 0x31, 0x49, 0x68, 0x82, 0xe5, 0x23, 0xa7, 0xd9, 0xb2, 0xc8, - 0x36, 0xdd, 0x27, 0x66, 0xdb, 0x6c, 0x92, 0x8a, 0x53, 0x27, 0x2c, 0x9b, 0x5a, 0x49, 0xad, 0xce, - 0x18, 0xa5, 0x6e, 0xa7, 0xb0, 0xfc, 0x78, 0x00, 0xff, 0xa4, 0x53, 0x58, 0x1a, 0x40, 0x47, 0x03, - 0xc1, 0xe0, 0x5d, 0x30, 0xaf, 0x5e, 0x68, 0x03, 0xbb, 0xd8, 0xa4, 0xbc, 0x9d, 0x4d, 0x4b, 0x0f, - 0x97, 0xba, 0x9d, 0xc2, 0x7c, 0x35, 0xca, 0x42, 0x71, 0x59, 0x78, 0x0f, 0xcc, 0xed, 0xb3, 0x8f, - 0x3c, 0xa7, 0xe5, 0xee, 0x3a, 0x4d, 0x6a, 0xb6, 0xb3, 0x13, 0x2b, 0xda, 0xea, 0x8c, 0x51, 0xec, - 0x76, 0x0a, 0x73, 0x1f, 0x56, 0x43, 0x8c, 0x93, 0x38, 0x01, 0x45, 0x15, 0x21, 0x01, 0x73, 0xdc, - 0x39, 0x24, 0xb6, 0x78, 0x3a, 0xc2, 0x38, 0xcb, 0x4e, 0xca, 0x58, 0xae, 0xbe, 0x29, 0x96, 0x7b, - 0x21, 0x05, 0xe3, 0xa2, 0x0a, 0xe7, 0x5c, 0x98, 0xca, 0x50, 0x14, 0x15, 0x6e, 0x80, 0x45, 0xcf, - 0x0f, 0x0e, 0x43, 0xc4, 0x6d, 0xd5, 0x9a, 0x94, 0x1d, 0x64, 0xa7, 0xe4, 0x8d, 0x2f, 0x76, 0x3b, - 0x85, 0x45, 0x14, 0x67, 0xa2, 0xa4, 0x3c, 0xbc, 0x09, 0x66, 0x19, 0xd9, 0xa6, 0x76, 0xeb, 0xd8, - 0x8f, 0xe9, 0xb4, 0xd4, 0x5f, 0xe8, 0x76, 0x0a, 0xb3, 0xd5, 0xad, 0x80, 0x8e, 0x22, 0x52, 0xc5, - 0xef, 0x35, 0x30, 0xb5, 0x51, 0x2d, 0xef, 0x38, 0x75, 0x32, 0x82, 0x82, 0x2e, 0x47, 0x0a, 0xfa, - 0xf2, 0x29, 0x25, 0x21, 0x9c, 0x1a, 0x5a, 0xce, 0x7f, 0xf9, 0xe5, 0x2c, 0x64, 0x54, 0x3f, 0x5a, - 0x01, 0x69, 0x1b, 0x5b, 0x44, 0xba, 0x3e, 0x13, 0xe8, 0xec, 0x60, 0x8b, 0x20, 0xc9, 0x81, 0xef, - 0x80, 0x49, 0xdb, 0xa9, 0x93, 0xf2, 0xa6, 0x74, 0x60, 0xc6, 0xb8, 0xa0, 0x64, 0x26, 0x77, 0x24, - 0x15, 0x29, 0xae, 0x78, 0x4a, 0xee, 0xb8, 0x4e, 0xd3, 0x69, 0xb4, 0x1f, 0x90, 0x76, 0x2f, 0xb9, - 0xe5, 0x53, 0xee, 0x85, 0xe8, 0x28, 0x22, 0x05, 0x6b, 0x20, 0x83, 0x9b, 0x4d, 0xc7, 0xc4, 0x1c, - 0xd7, 0x9a, 0x44, 0x66, 0x6c, 0x66, 0xad, 0xf4, 0xa6, 0x3b, 0xfa, 0x15, 0x21, 0x8c, 0x23, 0x35, - 0x11, 0x98, 0x31, 0xdf, 0xed, 0x14, 0x32, 0xeb, 0x01, 0x0e, 0x0a, 0x83, 0x16, 0xbf, 0xd3, 0x40, - 0x46, 0xdd, 0x7a, 0x04, 0x2d, 0xec, 0x5e, 0xb4, 0x85, 0xbd, 0x75, 0x86, 0x78, 0x0d, 0x69, 0x60, - 0x66, 0xdf, 0x6d, 0xd9, 0xbd, 0xf6, 0xc0, 0x54, 0x5d, 0x06, 0x8d, 0x65, 0x35, 0x09, 0x7d, 0xe5, - 0x0c, 0xd0, 0xaa, 0x43, 0xce, 0x2b, 0x03, 0x53, 0xfe, 0x99, 0xa1, 0x1e, 0x54, 0xf1, 0xef, 0x14, - 0x80, 0x1b, 0xd5, 0x72, 0xac, 0x3f, 0x8c, 0x20, 0xad, 0x29, 0x98, 0x15, 0x99, 0xd3, 0xcb, 0x0d, - 0x95, 0xde, 0xef, 0x9d, 0x31, 0x12, 0xb8, 0x46, 0x9a, 0x55, 0xd2, 0x24, 0x26, 0x77, 0x3c, 0x3f, - 0xc9, 0x76, 0x42, 0x60, 0x28, 0x02, 0x0d, 0x37, 0xc1, 0x42, 0xaf, 0xdd, 0x35, 0x31, 0x63, 0x22, - 0xb9, 0xb3, 0x29, 0x99, 0xcc, 0x59, 0xe5, 0xe2, 0x42, 0x35, 0xc6, 0x47, 0x09, 0x0d, 0xf8, 0x04, - 0x4c, 0x9b, 0xe1, 0xce, 0x7a, 0x4a, 0xda, 0xe8, 0xbd, 0x85, 0x45, 0x7f, 0xd4, 0xc2, 0x36, 0xa7, - 0xbc, 0x6d, 0xcc, 0x8a, 0x94, 0xe9, 0xb7, 0xe0, 0x3e, 0x1a, 0x64, 0x60, 0xd1, 0xc2, 0xc7, 0xd4, - 0x6a, 0x59, 0x7e, 0x72, 0x57, 0xe9, 0x67, 0x44, 0xf6, 0xdf, 0xf3, 0x9b, 0x90, 0xad, 0xaf, 0x12, - 0x07, 0x43, 0x49, 0xfc, 0xe2, 0x2f, 0x1a, 0xb8, 0x94, 0x0c, 0xfc, 0x08, 0x0a, 0xa4, 0x1a, 0x2d, - 0x10, 0xfd, 0x94, 0x2c, 0x8e, 0x39, 0x38, 0xa4, 0x56, 0xbe, 0x9e, 0x04, 0xb3, 0xe1, 0x18, 0x8e, - 0x20, 0x81, 0xdf, 0x07, 0x19, 0xd7, 0x73, 0x8e, 0x28, 0xa3, 0x8e, 0x4d, 0x3c, 0xd5, 0x1d, 0x97, - 0x94, 0x4a, 0x66, 0x37, 0x60, 0xa1, 0xb0, 0x1c, 0x6c, 0x02, 0xe0, 0x62, 0x0f, 0x5b, 0x84, 0x8b, - 0x4a, 0x4e, 0xc9, 0x37, 0xb8, 0xfd, 0xa6, 0x37, 0x08, 0x5f, 0x4b, 0xdf, 0xed, 0xab, 0x6e, 0xd9, - 0xdc, 0x6b, 0x07, 0x2e, 0x06, 0x0c, 0x14, 0xc2, 0x87, 0x87, 0x60, 0xce, 0x23, 0x66, 0x13, 0x53, - 0x4b, 0x8d, 0xf5, 0xb4, 0x74, 0x73, 0x4b, 0x8c, 0x57, 0x14, 0x66, 0x9c, 0x74, 0x0a, 0xd7, 0x93, - 0x2b, 0xba, 0xbe, 0x4b, 0x3c, 0x46, 0x19, 0x27, 0x36, 0xf7, 0x53, 0x27, 0xa2, 0x83, 0xa2, 0xd8, - 0x62, 0x04, 0x58, 0x62, 0x40, 0x3e, 0x74, 0x39, 0x75, 0x6c, 0x96, 0x9d, 0x08, 0x46, 0x40, 0x25, - 0x44, 0x47, 0x11, 0x29, 0xb8, 0x0d, 0x96, 0x45, 0xb7, 0xfe, 0xd4, 0x37, 0xb0, 0x75, 0xec, 0x62, - 0x5b, 0x3c, 0x55, 0x76, 0x52, 0xce, 0xe2, 0xac, 0xd8, 0x8e, 0xd6, 0x07, 0xf0, 0xd1, 0x40, 0x2d, - 0xf8, 0x04, 0x2c, 0xfa, 0xeb, 0x91, 0x41, 0xed, 0x3a, 0xb5, 0x1b, 0x62, 0x39, 0x92, 0x6b, 0xc1, - 0x8c, 0x71, 0x55, 0xd4, 0xc6, 0xe3, 0x38, 0xf3, 0x64, 0x10, 0x11, 0x25, 0x41, 0xe0, 0x73, 0xb0, - 0x28, 0x2d, 0x92, 0xba, 0x6a, 0x2c, 0x94, 0xb0, 0xec, 0x74, 0x72, 0xb7, 0x11, 0x4f, 0x27, 0x12, - 0xa9, 0xd7, 0x7e, 0x7a, 0x6d, 0x6a, 0x8f, 0x78, 0x96, 0xf1, 0x7f, 0x15, 0xaf, 0xc5, 0xf5, 0x38, - 0x14, 0x4a, 0xa2, 0xe7, 0xee, 0x82, 0xf9, 0x58, 0xc0, 0xe1, 0x02, 0x48, 0x1d, 0x92, 0xb6, 0x3f, - 0xaf, 0x91, 0xf8, 0x09, 0x97, 0xc1, 0xc4, 0x11, 0x6e, 0xb6, 0x88, 0x9f, 0x81, 0xc8, 0x3f, 0xdc, - 0x19, 0xbf, 0xad, 0x15, 0x7f, 0xd2, 0x40, 0xa4, 0xb1, 0x8d, 0xa0, 0xb8, 0x2b, 0xd1, 0xe2, 0x5e, - 0x3d, 0x6b, 0x62, 0x0f, 0x29, 0xeb, 0x2f, 0x34, 0x30, 0x1b, 0xde, 0x02, 0xe1, 0x35, 0x30, 0x8d, - 0x5b, 0x75, 0x4a, 0x6c, 0xb3, 0xb7, 0xb3, 0xf4, 0xbd, 0x59, 0x57, 0x74, 0xd4, 0x97, 0x10, 0x3b, - 0x22, 0x39, 0x76, 0xa9, 0x87, 0x45, 0xa6, 0x55, 0x89, 0xe9, 0xd8, 0x75, 0x26, 0x9f, 0x29, 0xe5, - 0x37, 0xca, 0xad, 0x38, 0x13, 0x25, 0xe5, 0x8b, 0xdf, 0x8e, 0x83, 0x05, 0x3f, 0x41, 0xfc, 0x4f, - 0x04, 0x8b, 0xd8, 0x7c, 0x04, 0xed, 0x05, 0x45, 0xd6, 0xbe, 0xeb, 0xa7, 0xaf, 0x44, 0x81, 0x77, - 0xc3, 0xf6, 0x3f, 0xf8, 0x14, 0x4c, 0x32, 0x8e, 0x79, 0x8b, 0xc9, 0xf1, 0x97, 0x59, 0x5b, 0x3b, - 0x17, 0xaa, 0xd4, 0x0c, 0xf6, 0x3f, 0xff, 0x8c, 0x14, 0x62, 0xf1, 0x67, 0x0d, 0x2c, 0xc7, 0x55, - 0x46, 0x90, 0x70, 0x8f, 0xa2, 0x09, 0x77, 0xed, 0x3c, 0x37, 0x1a, 0x92, 0x74, 0xbf, 0x69, 0xe0, - 0x52, 0xe2, 0xf2, 0x72, 0xce, 0x8a, 0x5e, 0xe5, 0xc6, 0x3a, 0xe2, 0x4e, 0xb0, 0x3e, 0xcb, 0x5e, - 0xb5, 0x3b, 0x80, 0x8f, 0x06, 0x6a, 0xc1, 0x67, 0x60, 0x81, 0xda, 0x4d, 0x6a, 0x13, 0x35, 0x96, - 0x83, 0x70, 0x0f, 0x6c, 0x28, 0x71, 0x64, 0x19, 0xe6, 0x65, 0xb1, 0xbd, 0x94, 0x63, 0x28, 0x28, - 0x81, 0x5b, 0xfc, 0x75, 0x40, 0x78, 0xe4, 0x5a, 0x29, 0x2a, 0x4a, 0x52, 0x88, 0x97, 0xa8, 0x28, - 0x45, 0x47, 0x7d, 0x09, 0x99, 0x41, 0xf2, 0x29, 0x94, 0xa3, 0xe7, 0xcb, 0x20, 0xa9, 0x19, 0xca, - 0x20, 0x79, 0x46, 0x0a, 0x51, 0x78, 0x22, 0xd6, 0xb6, 0xd0, 0x7a, 0xd6, 0xf7, 0x64, 0x47, 0xd1, - 0x51, 0x5f, 0xa2, 0xf8, 0x4f, 0x6a, 0x40, 0x94, 0x64, 0x2a, 0x86, 0xae, 0xd4, 0xfb, 0xc2, 0x8f, - 0x5f, 0xa9, 0xde, 0xbf, 0x52, 0x1d, 0x7e, 0xa3, 0x01, 0x88, 0xfb, 0x10, 0x95, 0x5e, 0xaa, 0xfa, - 0xf9, 0x74, 0xff, 0xfc, 0x15, 0xa2, 0xaf, 0x27, 0xc0, 0xfc, 0x59, 0x9d, 0x53, 0x4e, 0xc0, 0xa4, - 0x00, 0x1a, 0xe0, 0x01, 0xa4, 0x20, 0xe3, 0x53, 0xb7, 0x3c, 0xcf, 0xf1, 0x54, 0xc9, 0x5e, 0x3e, - 0xdd, 0x21, 0x29, 0x6e, 0xe4, 0xe5, 0x37, 0x51, 0xa0, 0x7f, 0xd2, 0x29, 0x64, 0x42, 0x7c, 0x14, - 0xc6, 0x16, 0xa6, 0xea, 0x24, 0x30, 0x95, 0xfe, 0x0f, 0xa6, 0x36, 0xc9, 0x70, 0x53, 0x21, 0xec, - 0xdc, 0x16, 0xf8, 0xdf, 0x90, 0x07, 0x3a, 0xd7, 0x6c, 0xfb, 0x52, 0x03, 0x61, 0x1b, 0x70, 0x1b, - 0xa4, 0x39, 0x55, 0x95, 0x98, 0x59, 0xbb, 0x7a, 0xb6, 0x0e, 0xb3, 0x47, 0x2d, 0x12, 0x34, 0x4a, - 0x71, 0x42, 0x12, 0x05, 0x5e, 0x01, 0x53, 0x16, 0x61, 0x0c, 0x37, 0x94, 0xe5, 0xe0, 0x03, 0xaa, - 0xe2, 0x93, 0x51, 0x8f, 0x5f, 0xbc, 0x05, 0x96, 0x06, 0x7c, 0x92, 0xc2, 0x02, 0x98, 0x30, 0xe5, - 0x5f, 0x0a, 0xc2, 0xa1, 0x09, 0x63, 0x46, 0x74, 0x99, 0x0d, 0xf9, 0x5f, 0x82, 0x4f, 0x37, 0x3e, - 0x78, 0xf1, 0x3a, 0x3f, 0xf6, 0xf2, 0x75, 0x7e, 0xec, 0xd5, 0xeb, 0xfc, 0xd8, 0xe7, 0xdd, 0xbc, - 0xf6, 0xa2, 0x9b, 0xd7, 0x5e, 0x76, 0xf3, 0xda, 0xab, 0x6e, 0x5e, 0xfb, 0xa3, 0x9b, 0xd7, 0xbe, - 0xfa, 0x33, 0x3f, 0xf6, 0x34, 0x37, 0xfc, 0xdf, 0xda, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x12, - 0x41, 0x18, 0xc9, 0xca, 0x15, 0x00, 0x00, + // 1728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x4f, 0xc7, 0xce, 0x57, 0x39, 0x99, 0x24, 0x35, 0x99, 0xc5, 0xeb, 0x83, 0x1d, 0x19, 0xc1, + 0x66, 0x46, 0x4b, 0x7b, 0x12, 0x96, 0xd5, 0x68, 0xa5, 0x95, 0x48, 0x27, 0x81, 0xf5, 0x6e, 0x9c, + 0xc9, 0x96, 0xa3, 0xd1, 0x6a, 0xc5, 0x81, 0x72, 0xbb, 0xe2, 0xd4, 0xc6, 0xfd, 0xb1, 0x5d, 0xd5, + 0x21, 0xe6, 0x04, 0x17, 0xce, 0x88, 0x03, 0x7f, 0x01, 0xff, 0x02, 0x48, 0x70, 0xe1, 0xc8, 0x48, + 0x48, 0x68, 0xe1, 0xc2, 0x9e, 0x2c, 0xc6, 0xf3, 0x27, 0x20, 0x71, 0x88, 0x38, 0xa0, 0xaa, 0x2e, + 0xf7, 0xb7, 0x27, 0x36, 0x2b, 0xf9, 0xe6, 0x7a, 0x1f, 0xbf, 0x7a, 0x55, 0xef, 0xf7, 0x5e, 0xbd, + 0x36, 0x78, 0x72, 0xfd, 0x8c, 0xe9, 0xd4, 0x69, 0x60, 0x97, 0x36, 0x18, 0x77, 0x3c, 0xdc, 0x23, + 0x8d, 0x9b, 0xfd, 0x0e, 0xe1, 0x78, 0xbf, 0xd1, 0x23, 0x36, 0xf1, 0x30, 0x27, 0x5d, 0xdd, 0xf5, + 0x1c, 0xee, 0xc0, 0x4a, 0x60, 0xab, 0x63, 0x97, 0xea, 0xca, 0x56, 0x57, 0xb6, 0x95, 0xef, 0xf5, + 0x28, 0xbf, 0xf2, 0x3b, 0xba, 0xe9, 0x58, 0x8d, 0x9e, 0xd3, 0x73, 0x1a, 0xd2, 0xa5, 0xe3, 0x5f, + 0xca, 0x95, 0x5c, 0xc8, 0x5f, 0x01, 0x54, 0xa5, 0x1e, 0xdb, 0xd6, 0x74, 0x3c, 0xb1, 0x67, 0x7a, + 0xbb, 0xca, 0x7b, 0x91, 0x8d, 0x85, 0xcd, 0x2b, 0x6a, 0x13, 0x6f, 0xd0, 0x70, 0xaf, 0x7b, 0xd2, + 0xc9, 0x23, 0xcc, 0xf1, 0x3d, 0x93, 0xcc, 0xe4, 0xc5, 0x1a, 0x16, 0xe1, 0x38, 0x6f, 0xaf, 0xc6, + 0x24, 0x2f, 0xcf, 0xb7, 0x39, 0xb5, 0xb2, 0xdb, 0xbc, 0x7f, 0x9f, 0x03, 0x33, 0xaf, 0x88, 0x85, + 0xd3, 0x7e, 0xf5, 0x3f, 0x69, 0x60, 0xed, 0xa8, 0xdd, 0x3c, 0xf6, 0xe8, 0x0d, 0xf1, 0xe0, 0x4f, + 0xc1, 0xaa, 0x88, 0xa8, 0x8b, 0x39, 0x2e, 0x6b, 0xbb, 0xda, 0x5e, 0xe9, 0xe0, 0xa9, 0x1e, 0x5d, + 0x72, 0x08, 0xac, 0xbb, 0xd7, 0x3d, 0x21, 0x60, 0xba, 0xb0, 0xd6, 0x6f, 0xf6, 0xf5, 0xe7, 0x9d, + 0x2f, 0x88, 0xc9, 0x5b, 0x84, 0x63, 0x03, 0xbe, 0x1c, 0xd6, 0x16, 0x46, 0xc3, 0x1a, 0x88, 0x64, + 0x28, 0x44, 0x85, 0x9f, 0x80, 0x22, 0x73, 0x89, 0x59, 0x5e, 0x94, 0xe8, 0x8f, 0xf5, 0xc9, 0x29, + 0xd4, 0xc3, 0xb0, 0xda, 0x2e, 0x31, 0x8d, 0x75, 0x05, 0x5b, 0x14, 0x2b, 0x24, 0x41, 0xea, 0x7f, + 0xd4, 0xc0, 0x46, 0x68, 0x75, 0x4a, 0x19, 0x87, 0x3f, 0xc9, 0x1c, 0x40, 0x9f, 0xee, 0x00, 0xc2, + 0x5b, 0x86, 0xbf, 0xa5, 0xf6, 0x59, 0x1d, 0x4b, 0x62, 0xc1, 0x7f, 0x0c, 0x96, 0x28, 0x27, 0x16, + 0x2b, 0x2f, 0xee, 0x16, 0xf6, 0x4a, 0x07, 0xdf, 0x99, 0x2a, 0x7a, 0x63, 0x43, 0x21, 0x2e, 0x35, + 0x85, 0x2f, 0x0a, 0x20, 0xea, 0xff, 0x2c, 0xc6, 0x62, 0x17, 0x67, 0x82, 0x1f, 0x80, 0x07, 0x98, + 0x73, 0x6c, 0x5e, 0x21, 0xf2, 0xa5, 0x4f, 0x3d, 0xd2, 0x95, 0x27, 0x58, 0x35, 0xe0, 0x68, 0x58, + 0x7b, 0x70, 0x98, 0xd0, 0xa0, 0x94, 0xa5, 0xf0, 0x75, 0x9d, 0x6e, 0xd3, 0xbe, 0x74, 0x9e, 0xdb, + 0x2d, 0xc7, 0xb7, 0xb9, 0xbc, 0x60, 0xe5, 0x7b, 0x9e, 0xd0, 0xa0, 0x94, 0x25, 0x34, 0xc1, 0xce, + 0x8d, 0xd3, 0xf7, 0x2d, 0x72, 0x4a, 0x2f, 0x89, 0x39, 0x30, 0xfb, 0xa4, 0xe5, 0x74, 0x09, 0x2b, + 0x17, 0x76, 0x0b, 0x7b, 0x6b, 0x46, 0x63, 0x34, 0xac, 0xed, 0xbc, 0xc8, 0xd1, 0xdf, 0x0d, 0x6b, + 0x0f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x08, 0x36, 0xd5, 0x0d, 0x1d, 0x61, 0x17, 0x9b, 0x94, + 0x0f, 0xca, 0x45, 0x19, 0xe1, 0xc3, 0xd1, 0xb0, 0xb6, 0xd9, 0x4e, 0xaa, 0x50, 0xda, 0x16, 0x7e, + 0x04, 0x36, 0x2e, 0xd9, 0x8f, 0x3d, 0xc7, 0x77, 0xcf, 0x9d, 0x3e, 0x35, 0x07, 0xe5, 0xa5, 0x5d, + 0x6d, 0x6f, 0xcd, 0xa8, 0x8f, 0x86, 0xb5, 0x8d, 0x1f, 0xb5, 0x63, 0x8a, 0xbb, 0xb4, 0x00, 0x25, + 0x1d, 0x21, 0x01, 0x1b, 0xdc, 0xb9, 0x26, 0xb6, 0xb8, 0x3a, 0xc2, 0x38, 0x2b, 0x2f, 0xcb, 0x5c, + 0xee, 0xbd, 0x29, 0x97, 0x17, 0x31, 0x07, 0xe3, 0x91, 0x4a, 0xe7, 0x46, 0x5c, 0xca, 0x50, 0x12, + 0x15, 0x1e, 0x81, 0x6d, 0x2f, 0x48, 0x0e, 0x43, 0xc4, 0xf5, 0x3b, 0x7d, 0xca, 0xae, 0xca, 0x2b, + 0xf2, 0xc4, 0x8f, 0x46, 0xc3, 0xda, 0x36, 0x4a, 0x2b, 0x51, 0xd6, 0x1e, 0xbe, 0x07, 0xd6, 0x19, + 0x39, 0xa5, 0xb6, 0x7f, 0x1b, 0xe4, 0x74, 0x55, 0xfa, 0x6f, 0x8d, 0x86, 0xb5, 0xf5, 0xf6, 0x49, + 0x24, 0x47, 0x09, 0xab, 0xfa, 0x1f, 0x34, 0xb0, 0x72, 0xd4, 0x6e, 0x9e, 0x39, 0x5d, 0x32, 0x87, + 0x82, 0x6e, 0x26, 0x0a, 0xfa, 0x9d, 0x7b, 0x4a, 0x42, 0x04, 0x35, 0xb1, 0x9c, 0xff, 0x1d, 0x94, + 0xb3, 0xb0, 0x51, 0xfd, 0x68, 0x17, 0x14, 0x6d, 0x6c, 0x11, 0x19, 0xfa, 0x5a, 0xe4, 0x73, 0x86, + 0x2d, 0x82, 0xa4, 0x06, 0x7e, 0x17, 0x2c, 0xdb, 0x4e, 0x97, 0x34, 0x8f, 0x65, 0x00, 0x6b, 0xc6, + 0x03, 0x65, 0xb3, 0x7c, 0x26, 0xa5, 0x48, 0x69, 0xc5, 0x55, 0x72, 0xc7, 0x75, 0xfa, 0x4e, 0x6f, + 0xf0, 0x09, 0x19, 0x8c, 0xc9, 0x2d, 0xaf, 0xf2, 0x22, 0x26, 0x47, 0x09, 0x2b, 0xd8, 0x01, 0x25, + 0xdc, 0xef, 0x3b, 0x26, 0xe6, 0xb8, 0xd3, 0x27, 0x92, 0xb1, 0xa5, 0x83, 0xc6, 0x9b, 0xce, 0x18, + 0x54, 0x84, 0xd8, 0x1c, 0xa9, 0x17, 0x81, 0x19, 0x9b, 0xa3, 0x61, 0xad, 0x74, 0x18, 0xe1, 0xa0, + 0x38, 0x68, 0xfd, 0xf7, 0x1a, 0x28, 0xa9, 0x53, 0xcf, 0xa1, 0x85, 0x7d, 0x94, 0x6c, 0x61, 0xdf, + 0x9e, 0x22, 0x5f, 0x13, 0x1a, 0x98, 0x19, 0x86, 0x2d, 0xbb, 0xd7, 0x05, 0x58, 0xe9, 0xca, 0xa4, + 0xb1, 0xb2, 0x26, 0xa1, 0x1f, 0x4f, 0x01, 0xad, 0x3a, 0xe4, 0xa6, 0xda, 0x60, 0x25, 0x58, 0x33, + 0x34, 0x86, 0xaa, 0xff, 0xa7, 0x00, 0xe0, 0x51, 0xbb, 0x99, 0xea, 0x0f, 0x73, 0xa0, 0x35, 0x05, + 0xeb, 0x82, 0x39, 0x63, 0x6e, 0x28, 0x7a, 0x7f, 0x7f, 0xca, 0x4c, 0xe0, 0x0e, 0xe9, 0xb7, 0x49, + 0x9f, 0x98, 0xdc, 0xf1, 0x02, 0x92, 0x9d, 0xc5, 0xc0, 0x50, 0x02, 0x1a, 0x1e, 0x83, 0xad, 0x71, + 0xbb, 0xeb, 0x63, 0xc6, 0x04, 0xb9, 0xcb, 0x05, 0x49, 0xe6, 0xb2, 0x0a, 0x71, 0xab, 0x9d, 0xd2, + 0xa3, 0x8c, 0x07, 0xfc, 0x0c, 0xac, 0x9a, 0xf1, 0xce, 0x7a, 0x0f, 0x6d, 0xf4, 0xf1, 0xc0, 0xa2, + 0x7f, 0xea, 0x63, 0x9b, 0x53, 0x3e, 0x30, 0xd6, 0x05, 0x65, 0xc2, 0x16, 0x1c, 0xa2, 0x41, 0x06, + 0xb6, 0x2d, 0x7c, 0x4b, 0x2d, 0xdf, 0x0a, 0xc8, 0xdd, 0xa6, 0x3f, 0x27, 0xb2, 0xff, 0xce, 0xbe, + 0x85, 0x6c, 0x7d, 0xad, 0x34, 0x18, 0xca, 0xe2, 0xd7, 0xff, 0xaa, 0x81, 0xb7, 0xb2, 0x89, 0x9f, + 0x43, 0x81, 0xb4, 0x93, 0x05, 0xa2, 0xdf, 0xc3, 0xe2, 0x54, 0x80, 0x13, 0x6a, 0xe5, 0x37, 0xcb, + 0x60, 0x3d, 0x9e, 0xc3, 0x39, 0x10, 0xf8, 0x07, 0xa0, 0xe4, 0x7a, 0xce, 0x0d, 0x65, 0xd4, 0xb1, + 0x89, 0xa7, 0xba, 0xe3, 0x43, 0xe5, 0x52, 0x3a, 0x8f, 0x54, 0x28, 0x6e, 0x07, 0xfb, 0x00, 0xb8, + 0xd8, 0xc3, 0x16, 0xe1, 0xa2, 0x92, 0x0b, 0xf2, 0x0e, 0x9e, 0xbd, 0xe9, 0x0e, 0xe2, 0xc7, 0xd2, + 0xcf, 0x43, 0xd7, 0x13, 0x9b, 0x7b, 0x83, 0x28, 0xc4, 0x48, 0x81, 0x62, 0xf8, 0xf0, 0x1a, 0x6c, + 0x78, 0xc4, 0xec, 0x63, 0x6a, 0xa9, 0x67, 0xbd, 0x28, 0xc3, 0x3c, 0x11, 0xcf, 0x2b, 0x8a, 0x2b, + 0xee, 0x86, 0xb5, 0xa7, 0xd9, 0x11, 0x5d, 0x3f, 0x27, 0x1e, 0xa3, 0x8c, 0x13, 0x9b, 0x07, 0xd4, + 0x49, 0xf8, 0xa0, 0x24, 0xb6, 0x78, 0x02, 0x2c, 0xf1, 0x40, 0x3e, 0x77, 0x39, 0x75, 0x6c, 0x56, + 0x5e, 0x8a, 0x9e, 0x80, 0x56, 0x4c, 0x8e, 0x12, 0x56, 0xf0, 0x14, 0xec, 0x88, 0x6e, 0xfd, 0xb3, + 0x60, 0x83, 0x93, 0x5b, 0x17, 0xdb, 0xe2, 0xaa, 0xca, 0xcb, 0xf2, 0x2d, 0x2e, 0x8b, 0xe9, 0xe8, + 0x30, 0x47, 0x8f, 0x72, 0xbd, 0xe0, 0x67, 0x60, 0x3b, 0x18, 0x8f, 0x0c, 0x6a, 0x77, 0xa9, 0xdd, + 0x13, 0xc3, 0x91, 0x1c, 0x0b, 0xd6, 0x8c, 0x27, 0xa2, 0x36, 0x5e, 0xa4, 0x95, 0x77, 0x79, 0x42, + 0x94, 0x05, 0x81, 0x5f, 0x82, 0x6d, 0xb9, 0x23, 0xe9, 0xaa, 0xc6, 0x42, 0x09, 0x2b, 0xaf, 0x66, + 0x67, 0x1b, 0x71, 0x75, 0x82, 0x48, 0xe3, 0xf6, 0x33, 0x6e, 0x53, 0x17, 0xc4, 0xb3, 0x8c, 0xb7, + 0x55, 0xbe, 0xb6, 0x0f, 0xd3, 0x50, 0x28, 0x8b, 0x5e, 0xf9, 0x10, 0x6c, 0xa6, 0x12, 0x0e, 0xb7, + 0x40, 0xe1, 0x9a, 0x0c, 0x82, 0xf7, 0x1a, 0x89, 0x9f, 0x70, 0x07, 0x2c, 0xdd, 0xe0, 0xbe, 0x4f, + 0x02, 0x06, 0xa2, 0x60, 0xf1, 0xc1, 0xe2, 0x33, 0xad, 0xfe, 0x67, 0x0d, 0x24, 0x1a, 0xdb, 0x1c, + 0x8a, 0xbb, 0x95, 0x2c, 0xee, 0xbd, 0x69, 0x89, 0x3d, 0xa1, 0xac, 0x7f, 0xa9, 0x81, 0xf5, 0xf8, + 0x14, 0x08, 0xdf, 0x05, 0xab, 0xd8, 0xef, 0x52, 0x62, 0x9b, 0xe3, 0x99, 0x25, 0x8c, 0xe6, 0x50, + 0xc9, 0x51, 0x68, 0x21, 0x66, 0x44, 0x72, 0xeb, 0x52, 0x0f, 0x0b, 0xa6, 0xb5, 0x89, 0xe9, 0xd8, + 0x5d, 0x26, 0xaf, 0xa9, 0x10, 0x34, 0xca, 0x93, 0xb4, 0x12, 0x65, 0xed, 0xeb, 0xbf, 0x5b, 0x04, + 0x5b, 0x01, 0x41, 0x82, 0x4f, 0x04, 0x8b, 0xd8, 0x7c, 0x0e, 0xed, 0x05, 0x25, 0xc6, 0xbe, 0xa7, + 0xf7, 0x8f, 0x44, 0x51, 0x74, 0x93, 0xe6, 0x3f, 0xf8, 0x39, 0x58, 0x66, 0x1c, 0x73, 0x9f, 0xc9, + 0xe7, 0xaf, 0x74, 0x70, 0x30, 0x13, 0xaa, 0xf4, 0x8c, 0xe6, 0xbf, 0x60, 0x8d, 0x14, 0x62, 0xfd, + 0x2f, 0x1a, 0xd8, 0x49, 0xbb, 0xcc, 0x81, 0x70, 0x9f, 0x26, 0x09, 0xf7, 0xee, 0x2c, 0x27, 0x9a, + 0x40, 0xba, 0x7f, 0x68, 0xe0, 0xad, 0xcc, 0xe1, 0xe5, 0x3b, 0x2b, 0x7a, 0x95, 0x9b, 0xea, 0x88, + 0x67, 0xd1, 0xf8, 0x2c, 0x7b, 0xd5, 0x79, 0x8e, 0x1e, 0xe5, 0x7a, 0xc1, 0x2f, 0xc0, 0x16, 0xb5, + 0xfb, 0xd4, 0x26, 0xea, 0x59, 0x8e, 0xd2, 0x9d, 0xdb, 0x50, 0xd2, 0xc8, 0x32, 0xcd, 0x3b, 0x62, + 0x7a, 0x69, 0xa6, 0x50, 0x50, 0x06, 0xb7, 0xfe, 0xb7, 0x9c, 0xf4, 0xc8, 0xb1, 0x52, 0x54, 0x94, + 0x94, 0x10, 0x2f, 0x53, 0x51, 0x4a, 0x8e, 0x42, 0x0b, 0xc9, 0x20, 0x79, 0x15, 0x2a, 0xd0, 0xd9, + 0x18, 0x24, 0x3d, 0x63, 0x0c, 0x92, 0x6b, 0xa4, 0x10, 0x45, 0x24, 0x62, 0x6c, 0x8b, 0x8d, 0x67, + 0x61, 0x24, 0x67, 0x4a, 0x8e, 0x42, 0x8b, 0xfa, 0x7f, 0x0b, 0x39, 0x59, 0x92, 0x54, 0x8c, 0x1d, + 0x69, 0xfc, 0x85, 0x9f, 0x3e, 0x52, 0x37, 0x3c, 0x52, 0x17, 0xfe, 0x56, 0x03, 0x10, 0x87, 0x10, + 0xad, 0x31, 0x55, 0x03, 0x3e, 0x7d, 0x3c, 0x7b, 0x85, 0xe8, 0x87, 0x19, 0xb0, 0xe0, 0xad, 0xae, + 0xa8, 0x20, 0x60, 0xd6, 0x00, 0xe5, 0x44, 0x00, 0x29, 0x28, 0x05, 0xd2, 0x13, 0xcf, 0x73, 0x3c, + 0x55, 0xb2, 0xef, 0xdc, 0x1f, 0x90, 0x34, 0x37, 0xaa, 0xf2, 0x9b, 0x28, 0xf2, 0xbf, 0x1b, 0xd6, + 0x4a, 0x31, 0x3d, 0x8a, 0x63, 0x8b, 0xad, 0xba, 0x24, 0xda, 0xaa, 0xf8, 0x7f, 0x6c, 0x75, 0x4c, + 0x26, 0x6f, 0x15, 0xc3, 0xae, 0x9c, 0x80, 0x6f, 0x4d, 0xb8, 0xa0, 0x99, 0xde, 0xb6, 0xd7, 0x8b, + 0xe0, 0x51, 0x78, 0xff, 0x1e, 0xed, 0xf8, 0x9c, 0xb0, 0x79, 0x4d, 0x7e, 0x07, 0x00, 0x04, 0x9f, + 0x4f, 0x92, 0xaa, 0xc1, 0xe0, 0x17, 0x7a, 0x1c, 0x87, 0x1a, 0x14, 0xb3, 0x82, 0x7e, 0xce, 0xd8, + 0x77, 0x38, 0x15, 0xb9, 0xe2, 0x87, 0x9b, 0x75, 0xfe, 0xfb, 0xa6, 0x13, 0xc4, 0xdf, 0x35, 0xf0, + 0x76, 0x6e, 0x20, 0x73, 0xe8, 0xec, 0x2f, 0x92, 0x9d, 0x7d, 0x7f, 0xe6, 0xcb, 0x9a, 0xd0, 0xde, + 0x7f, 0xa5, 0x81, 0x38, 0x3b, 0xe1, 0x29, 0x28, 0x72, 0xaa, 0x7a, 0x78, 0xe9, 0xe0, 0xc9, 0x74, + 0x27, 0xb8, 0xa0, 0x16, 0x89, 0x9e, 0x58, 0xb1, 0x42, 0x12, 0x05, 0x3e, 0x06, 0x2b, 0x16, 0x61, + 0x0c, 0xf7, 0xc6, 0xc4, 0x08, 0x3f, 0xbd, 0x5b, 0x81, 0x18, 0x8d, 0xf5, 0xf5, 0xf7, 0xc1, 0xc3, + 0x9c, 0x3f, 0x33, 0x60, 0x0d, 0x2c, 0x99, 0xf2, 0xcf, 0x28, 0x11, 0xd0, 0x92, 0xb1, 0x26, 0x0e, + 0x70, 0x24, 0xff, 0x85, 0x0a, 0xe4, 0xc6, 0x0f, 0x5f, 0xbe, 0xaa, 0x2e, 0x7c, 0xf5, 0xaa, 0xba, + 0xf0, 0xf5, 0xab, 0xea, 0xc2, 0x2f, 0x46, 0x55, 0xed, 0xe5, 0xa8, 0xaa, 0x7d, 0x35, 0xaa, 0x6a, + 0x5f, 0x8f, 0xaa, 0xda, 0xbf, 0x46, 0x55, 0xed, 0xd7, 0xaf, 0xab, 0x0b, 0x9f, 0x57, 0x26, 0xff, + 0xcf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x30, 0xdb, 0x24, 0x04, 0x18, 0x00, 0x00, } func (m *CSIDriver) Marshal() (dAtA []byte, err error) { @@ -1664,6 +1727,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.DriverName) + copy(dAtA[i:], m.DriverName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2086,6 +2258,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } +func (m *VolumeAttributesClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DriverName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *VolumeAttributesClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -2384,6 +2594,44 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } +func (this *VolumeAttributesClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&VolumeAttributesClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttributesClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttributesClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttributesClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -5154,6 +5402,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DriverName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttributesClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/generated.proto b/constraint/vendor/k8s.io/api/storage/v1beta1/generated.proto index f6e619d05..64dcc8262 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -47,7 +47,7 @@ message CSIDriver { // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and // alphanumerics between. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents the specification of the CSI Driver. optional CSIDriverSpec spec = 2; @@ -58,7 +58,7 @@ message CSIDriverList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIDriver repeated CSIDriver items = 2; @@ -229,7 +229,7 @@ message CSIDriverSpec { // CSINode has an OwnerReference that points to the corresponding node object. message CSINode { // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification of CSINode optional CSINodeSpec spec = 2; @@ -277,7 +277,7 @@ message CSINodeList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSINode repeated CSINode items = 2; @@ -329,7 +329,7 @@ message CSIStorageCapacity { // // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // nodeTopology defines which nodes have access to the storage // for which capacity was reported. If not set, the storage is @@ -338,7 +338,7 @@ message CSIStorageCapacity { // immutable. // // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector nodeTopology = 2; // storageClassName represents the name of the StorageClass that the reported capacity applies to. // It must meet the same requirements as the name of a StorageClass @@ -358,7 +358,7 @@ message CSIStorageCapacity { // unavailable. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity capacity = 4; // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse // for a GetCapacityRequest with topology and parameters that match the @@ -372,7 +372,7 @@ message CSIStorageCapacity { // API is ResourceRequirements.Requests in a volume claim. // // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; + optional .k8s.io.apimachinery.pkg.api.resource.Quantity maximumVolumeSize = 5; } // CSIStorageCapacityList is a collection of CSIStorageCapacity objects. @@ -380,7 +380,7 @@ message CSIStorageCapacityList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of CSIStorageCapacity objects. repeated CSIStorageCapacity items = 2; @@ -395,7 +395,7 @@ message StorageClass { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // provisioner indicates the type of the provisioner. optional string provisioner = 2; @@ -433,7 +433,7 @@ message StorageClass { // This field is only honored by servers that enable the VolumeScheduling feature. // +optional // +listType=atomic - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; + repeated .k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; } // StorageClassList is a collection of storage classes. @@ -441,7 +441,7 @@ message StorageClassList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of StorageClasses repeated StorageClass items = 2; @@ -468,7 +468,7 @@ message VolumeAttachment { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec represents specification of the desired attach/detach volume behavior. // Populated by the Kubernetes system. @@ -486,15 +486,15 @@ message VolumeAttachmentList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items is the list of VolumeAttachments repeated VolumeAttachment items = 2; } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. message VolumeAttachmentSource { // persistentVolumeName represents the name of the persistent volume to attach. @@ -508,7 +508,7 @@ message VolumeAttachmentSource { // PersistentVolumeSpec. This field is beta-level and is only // honored by servers that enabled the CSIMigration feature. // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; + optional .k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -552,11 +552,51 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +message VolumeAttributesClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Name of the CSI driver + // This field is immutable. + optional string driverName = 2; + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + map parameters = 3; +} + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +message VolumeAttributesClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of VolumeAttributesClass objects. + repeated VolumeAttributesClass items = 2; +} + // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // message represents the error encountered during Attach or Detach operation. // This string may be logged, so it should not contain sensitive diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/register.go b/constraint/vendor/k8s.io/api/storage/v1beta1/register.go index a281d0f26..e2214ef2f 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/register.go @@ -58,6 +58,9 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CSIStorageCapacity{}, &CSIStorageCapacityList{}, + + &VolumeAttributesClass{}, + &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/types.go b/constraint/vendor/k8s.io/api/storage/v1beta1/types.go index 9333a28b8..d9b6b7685 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/types.go @@ -176,8 +176,8 @@ type VolumeAttachmentSpec struct { } // VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. +// Right now only PersistentVolumes can be attached via external attacher, +// in the future we may allow also inline volumes in pods. // Exactly one member can be set. type VolumeAttachmentSource struct { // persistentVolumeName represents the name of the persistent volume to attach. @@ -714,3 +714,55 @@ type CSIStorageCapacityList struct { // items is the list of CSIStorageCapacity objects. Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClass represents a specification of mutable volume attributes +// defined by the CSI driver. The class can be specified during dynamic provisioning +// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. +type VolumeAttributesClass struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Name of the CSI driver + // This field is immutable. + DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` + + // parameters hold volume attributes defined by the CSI driver. These values + // are opaque to the Kubernetes and are passed directly to the CSI driver. + // The underlying storage provider supports changing these attributes on an + // existing volume, however the parameters field itself is immutable. To + // invoke a volume update, a new VolumeAttributesClass should be created with + // new parameters, and the PersistentVolumeClaim should be updated to reference + // the new VolumeAttributesClass. + // + // This field is required and must contain at least one key/value pair. + // The keys cannot be empty, and the maximum number of parameters is 512, with + // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, + // the target PersistentVolumeClaim will be set to an "Infeasible" state in the + // modifyVolumeStatus field. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// +k8s:prerelease-lifecycle-gen:introduced=1.31 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. +type VolumeAttributesClassList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of VolumeAttributesClass objects. + Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 6d9d23306..58da44fc8 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.", } @@ -216,6 +216,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } +var map_VolumeAttributesClass = map[string]string{ + "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "driverName": "Name of the CSI driver This field is immutable.", + "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", +} + +func (VolumeAttributesClass) SwaggerDoc() map[string]string { + return map_VolumeAttributesClass +} + +var map_VolumeAttributesClassList = map[string]string{ + "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of VolumeAttributesClass objects.", +} + +func (VolumeAttributesClassList) SwaggerDoc() map[string]string { + return map_VolumeAttributesClassList +} + var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index f0450182b..d87aa6b90 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -579,6 +579,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. +func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { + if in == nil { + return nil + } + out := new(VolumeAttributesClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttributesClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. +func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { + if in == nil { + return nil + } + out := new(VolumeAttributesClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go index c5d23e7d4..4be57dc0d 100644 --- a/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/constraint/vendor/k8s.io/api/storage/v1beta1/zz_generated.prerelease-lifecycle.go @@ -264,3 +264,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 22 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { + return 1, 31 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { + return 1, 34 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { + return 1, 37 +} diff --git a/constraint/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/constraint/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto index fc8a3346e..341e0bc5c 100644 --- a/constraint/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto +++ b/constraint/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto @@ -51,7 +51,7 @@ message MigrationCondition { // The last time this condition was updated. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; // The reason for the condition's last transition. // +optional @@ -68,7 +68,7 @@ message StorageVersionMigration { // Standard object metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the migration. // +optional @@ -84,7 +84,7 @@ message StorageVersionMigrationList { // Standard list metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of StorageVersionMigration // +patchMergeKey=type diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index 8c4e147f0..61efeae69 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -144,7 +144,6 @@ type JSONSchemaProps struct { XMapType *string // x-kubernetes-validations -kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go index 09d4872f8..c7be07a14 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc.go @@ -19,6 +19,7 @@ limitations under the License. // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=apiextensions.k8s.io // Package v1 is the v1 version of the API. diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto index 2ad78822f..1bbd0ce13 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto @@ -41,7 +41,7 @@ message ConversionRequest { // objects is the list of custom resource objects to be converted. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. @@ -55,14 +55,14 @@ message ConversionResponse { // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; } // ConversionReview describes a conversion request/response. @@ -125,7 +125,7 @@ message CustomResourceDefinition { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; @@ -146,7 +146,7 @@ message CustomResourceDefinitionCondition { // lastTransitionTime last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -162,7 +162,7 @@ message CustomResourceDefinitionList { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; @@ -573,7 +573,6 @@ message JSONSchemaProps { optional string xKubernetesMapType = 43; // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go index 12cc2f6f2..6ade24a82 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go @@ -20,12 +20,42 @@ import ( "bytes" "errors" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/util/json" ) var jsTrue = []byte("true") var jsFalse = []byte("false") +// The CBOR parsing related constants and functions below are not exported so they can be +// easily removed at a future date when the CBOR library provides equivalent functionality. + +type cborMajorType int + +const ( + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1 + cborUnsignedInteger cborMajorType = 0 + cborNegativeInteger cborMajorType = 1 + cborByteString cborMajorType = 2 + cborTextString cborMajorType = 3 + cborArray cborMajorType = 4 + cborMap cborMajorType = 5 + cborTag cborMajorType = 6 + cborOther cborMajorType = 7 +) + +const ( + // from https://www.rfc-editor.org/rfc/rfc8949.html#name-jump-table-for-initial-byte. + // additionally, see https://www.rfc-editor.org/rfc/rfc8949.html#section-3.3-5. + cborFalseValue = 0xf4 + cborTrueValue = 0xf5 + cborNullValue = 0xf6 +) + +func cborType(b byte) cborMajorType { + return cborMajorType(b >> 5) +} + func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -59,6 +89,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) { + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(s.Allows) +} + +func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error { + switch { + case len(data) == 0: + // ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON + *s = JSONSchemaPropsOrBool{} + return nil + case cborType(data[0]) == cborMap: + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrBool{Allows: true, Schema: &p} + return nil + case data[0] == cborTrueValue: + *s = JSONSchemaPropsOrBool{Allows: true} + return nil + case data[0] == cborFalseValue: + *s = JSONSchemaPropsOrBool{Allows: false} + return nil + default: + // ideally, this case would not also capture a null input value, + // but we are matching the behavior of the UnmarshalJSON + return errors.New("boolean or JSON schema expected") + } +} + func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -91,6 +154,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) { + if len(s.Property) > 0 { + return cbor.Marshal(s.Property) + } + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(nil) +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []string + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Property: a} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Schema: &p} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrStringArray{} + return nil +} + func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { if len(s.JSONSchemas) > 0 { return json.Marshal(s.JSONSchemas) @@ -120,6 +217,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return cbor.Marshal(s.JSONSchemas) + } + return cbor.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{Schema: &p} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []JSONSchemaProps + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{JSONSchemas: a} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrArray{} + return nil +} + func (s JSON) MarshalJSON() ([]byte, error) { if len(s.Raw) > 0 { return s.Raw, nil @@ -130,7 +258,38 @@ func (s JSON) MarshalJSON() ([]byte, error) { func (s *JSON) UnmarshalJSON(data []byte) error { if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data + s.Raw = append(s.Raw[0:0], data...) + } + return nil +} + +func (s JSON) MarshalCBOR() ([]byte, error) { + // Note that non-semantic whitespace is lost during the transcoding performed here. + // We do not forsee this to be a problem given the current known uses of this type. + // Other limitations that arise when roundtripping JSON via dynamic clients also apply + // here, for example: insignificant whitespace handling, number handling, and map key ordering. + if len(s.Raw) == 0 { + return []byte{cborNullValue}, nil + } + var u any + if err := json.Unmarshal(s.Raw, &u); err != nil { + return nil, err + } + return cbor.Marshal(u) +} + +func (s *JSON) UnmarshalCBOR(data []byte) error { + if len(data) == 0 || data[0] == cborNullValue { + return nil + } + var u any + if err := cbor.Unmarshal(data, &u); err != nil { + return err + } + raw, err := json.Marshal(u) + if err != nil { + return err } + s.Raw = raw return nil } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go index e1d1e0be3..212cea6f6 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go @@ -383,6 +383,7 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s. // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. @@ -401,6 +402,7 @@ type CustomResourceDefinition struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // CustomResourceDefinitionList is a list of CustomResourceDefinition objects. type CustomResourceDefinitionList struct { @@ -469,6 +471,7 @@ type CustomResourceSubresourceScale struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.16 // ConversionReview describes a conversion request/response. type ConversionReview struct { diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go index 5dbdf576b..197bd1b7a 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go @@ -189,7 +189,6 @@ type JSONSchemaProps struct { XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..e3acc247c --- /dev/null +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,40 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ConversionReview) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinition) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *CustomResourceDefinitionList) APILifecycleIntroduced() (major, minor int) { + return 1, 16 +} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 62d7a33dc..f9e560034 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -41,7 +41,7 @@ message ConversionRequest { // objects is the list of custom resource objects to be converted. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; } // ConversionResponse describes a conversion response. @@ -55,14 +55,14 @@ message ConversionResponse { // must also have the same size as the input list with the same objects in the same order (equal kind, metadata.uid, metadata.name and metadata.namespace). // The webhook is allowed to mutate labels and annotations. Any other change to the metadata is silently ignored. // +listType=atomic - repeated k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension convertedObjects = 2; // result contains the result of conversion with extra details if the conversion failed. `result.status` determines if // the conversion failed or succeeded. The `result.status` field is required and represents the success or failure of the // conversion. A successful conversion must set `result.status` to `Success`. A failed conversion must set // `result.status` to `Failure` and provide more details in `result.message` and return http status 200. The `result.message` // will be used to construct an error message for the end user. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status result = 3; } // ConversionReview describes a conversion request/response. @@ -138,7 +138,7 @@ message CustomResourceDefinition { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec describes how the user wants the resources to appear optional CustomResourceDefinitionSpec spec = 2; @@ -159,7 +159,7 @@ message CustomResourceDefinitionCondition { // lastTransitionTime last time the condition transitioned from one status to another. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; // reason is a unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -175,7 +175,7 @@ message CustomResourceDefinitionList { // Standard object's metadata // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // items list individual CustomResourceDefinition objects repeated CustomResourceDefinition items = 2; @@ -634,7 +634,6 @@ message JSONSchemaProps { optional string xKubernetesMapType = 43; // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go index 44941d82e..5e6e82532 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go @@ -20,12 +20,40 @@ import ( "bytes" "errors" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/util/json" ) var jsTrue = []byte("true") var jsFalse = []byte("false") +// The CBOR parsing related constants and functions below are not exported so they can be +// easily removed at a future date when the CBOR library provides equivalent functionality. + +type cborMajorType int + +const ( + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1 + cborUnsignedInteger cborMajorType = 0 + cborNegativeInteger cborMajorType = 1 + cborByteString cborMajorType = 2 + cborTextString cborMajorType = 3 + cborArray cborMajorType = 4 + cborMap cborMajorType = 5 + cborTag cborMajorType = 6 + cborOther cborMajorType = 7 +) + +const ( + cborFalseValue = 0xf4 + cborTrueValue = 0xf5 + cborNullValue = 0xf6 +) + +func cborType(b byte) cborMajorType { + return cborMajorType(b >> 5) +} + func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) @@ -59,6 +87,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) { + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(s.Allows) +} + +func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error { + switch { + case len(data) == 0: + // ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON + *s = JSONSchemaPropsOrBool{} + return nil + case cborType(data[0]) == cborMap: + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrBool{Allows: true, Schema: &p} + return nil + case data[0] == cborTrueValue: + *s = JSONSchemaPropsOrBool{Allows: true} + return nil + case data[0] == cborFalseValue: + *s = JSONSchemaPropsOrBool{Allows: false} + return nil + default: + // ideally, this case would not also capture a null input value, + // but we are matching the behavior of the UnmarshalJSON + return errors.New("boolean or JSON schema expected") + } +} + func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) { if len(s.Property) > 0 { return json.Marshal(s.Property) @@ -91,6 +152,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) { + if len(s.Property) > 0 { + return cbor.Marshal(s.Property) + } + if s.Schema != nil { + return cbor.Marshal(s.Schema) + } + return cbor.Marshal(nil) +} + +func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []string + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Property: a} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrStringArray{Schema: &p} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrStringArray{} + return nil +} + func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) { if len(s.JSONSchemas) > 0 { return json.Marshal(s.JSONSchemas) @@ -120,6 +215,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error { return nil } +func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) { + if len(s.JSONSchemas) > 0 { + return cbor.Marshal(s.JSONSchemas) + } + return cbor.Marshal(s.Schema) +} + +func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error { + if len(data) > 0 && cborType(data[0]) == cborMap { + var p JSONSchemaProps + if err := cbor.Unmarshal(data, &p); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{Schema: &p} + return nil + } + if len(data) > 0 && cborType(data[0]) == cborArray { + var a []JSONSchemaProps + if err := cbor.Unmarshal(data, &a); err != nil { + return err + } + *s = JSONSchemaPropsOrArray{JSONSchemas: a} + return nil + } + // At this point we either have: empty data, a null value, or an + // unexpected type. In order to match the behavior of the existing + // UnmarshalJSON, no error is returned and *s is overwritten here. + *s = JSONSchemaPropsOrArray{} + return nil +} + func (s JSON) MarshalJSON() ([]byte, error) { if len(s.Raw) > 0 { return s.Raw, nil @@ -130,7 +256,38 @@ func (s JSON) MarshalJSON() ([]byte, error) { func (s *JSON) UnmarshalJSON(data []byte) error { if len(data) > 0 && !bytes.Equal(data, nullLiteral) { - s.Raw = data + s.Raw = append(s.Raw[0:0], data...) + } + return nil +} + +func (s JSON) MarshalCBOR() ([]byte, error) { + // Note that non-semantic whitespace is lost during the transcoding performed here. + // We do not forsee this to be a problem given the current known uses of this type. + // Other limitations that arise when roundtripping JSON via dynamic clients also apply + // here, for example: insignificant whitespace handling, number handling, and map key ordering. + if len(s.Raw) == 0 { + return []byte{cborNullValue}, nil + } + var u any + if err := json.Unmarshal(s.Raw, &u); err != nil { + return nil, err + } + return cbor.Marshal(u) +} + +func (s *JSON) UnmarshalCBOR(data []byte) error { + if len(data) == 0 || data[0] == cborNullValue { + return nil + } + var u any + if err := cbor.Unmarshal(data, &u); err != nil { + return err + } + raw, err := json.Marshal(u) + if err != nil { + return err } + s.Raw = raw return nil } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 86013e39f..3ed584dd9 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -189,7 +189,6 @@ type JSONSchemaProps struct { XMapType *string `json:"x-kubernetes-map-type,omitempty" protobuf:"bytes,43,opt,name=xKubernetesMapType"` // x-kubernetes-validations describes a list of validation rules written in the CEL expression language. - // This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled. // +patchMergeKey=rule // +patchStrategy=merge // +listType=map diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index 7133776fb..fa3a1cbe9 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -28,7 +28,6 @@ import ( "unicode/utf8" celgo "github.com/google/cel-go/cel" - "k8s.io/apiextensions-apiserver/pkg/apihelpers" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -94,6 +93,8 @@ func ValidateCustomResourceDefinition(ctx context.Context, obj *apiextensions.Cu requireMapListKeysMapSetValidation: true, // strictCost is always true to enforce cost limits. celEnvironmentSet: environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), + // allowInvalidCABundle is set to true since the CRD is not established yet. + allowInvalidCABundle: true, } allErrs := genericvalidation.ValidateObjectMeta(&obj.ObjectMeta, false, nameValidationFn, field.NewPath("metadata")) @@ -140,6 +141,9 @@ type validationOptions struct { suppressPerExpressionCost bool celEnvironmentSet *environment.EnvSet + // allowInvalidCABundle allows an invalid conversion webhook CABundle on update only if the existing CABundle is invalid. + // An invalid CABundle is also permitted on create and before a CRD is in an Established=True condition. + allowInvalidCABundle bool } type preexistingExpressions struct { @@ -179,7 +183,7 @@ func findPreexistingExpressionsInSchema(schema *apiextensions.JSONSchemaProps, e for _, v := range s.XValidations { expressions.rules.Insert(v.Rule) if len(v.MessageExpression) > 0 { - expressions.messageExpressions.Insert(v.Rule) + expressions.messageExpressions.Insert(v.MessageExpression) } } return false @@ -233,7 +237,8 @@ func ValidateCustomResourceDefinitionUpdate(ctx context.Context, obj, oldObj *ap preexistingExpressions: findPreexistingExpressions(&oldObj.Spec), versionsWithUnchangedSchemas: findVersionsWithUnchangedSchemas(obj, oldObj), // strictCost is always true to enforce cost limits. - celEnvironmentSet: environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), + celEnvironmentSet: environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), + allowInvalidCABundle: allowInvalidCABundle(oldObj), } return validateCustomResourceDefinitionUpdate(ctx, obj, oldObj, opts) } @@ -485,7 +490,7 @@ func validateCustomResourceDefinitionSpec(ctx context.Context, spec *apiextensio if (spec.Conversion != nil && spec.Conversion.Strategy != apiextensions.NoneConverter) && (spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields) { allErrs = append(allErrs, field.Invalid(fldPath.Child("conversion").Child("strategy"), spec.Conversion.Strategy, "must be None if spec.preserveUnknownFields is true")) } - allErrs = append(allErrs, validateCustomResourceConversion(spec.Conversion, opts.requireRecognizedConversionReviewVersion, fldPath.Child("conversion"))...) + allErrs = append(allErrs, validateCustomResourceConversion(spec.Conversion, opts.requireRecognizedConversionReviewVersion, fldPath.Child("conversion"), opts)...) return allErrs } @@ -545,6 +550,20 @@ func validateConversionReviewVersions(versions []string, requireRecognizedVersio return allErrs } +// Allows invalid CA Bundle to be specified only if the existing CABundle is invalid +// or if the CRD is not established yet. +func allowInvalidCABundle(oldCRD *apiextensions.CustomResourceDefinition) bool { + if !apiextensions.IsCRDConditionTrue(oldCRD, apiextensions.Established) { + return true + } + oldConversion := oldCRD.Spec.Conversion + if oldConversion == nil || oldConversion.WebhookClientConfig == nil || + len(oldConversion.WebhookClientConfig.CABundle) == 0 { + return false + } + return len(webhook.ValidateCABundle(field.NewPath("caBundle"), oldConversion.WebhookClientConfig.CABundle)) > 0 +} + // hasValidConversionReviewVersion return true if there is a valid version or if the list is empty. func hasValidConversionReviewVersionOrEmpty(versions []string) bool { if len(versions) < 1 { @@ -558,12 +577,7 @@ func hasValidConversionReviewVersionOrEmpty(versions []string) bool { return false } -// ValidateCustomResourceConversion statically validates -func ValidateCustomResourceConversion(conversion *apiextensions.CustomResourceConversion, fldPath *field.Path) field.ErrorList { - return validateCustomResourceConversion(conversion, true, fldPath) -} - -func validateCustomResourceConversion(conversion *apiextensions.CustomResourceConversion, requireRecognizedVersion bool, fldPath *field.Path) field.ErrorList { +func validateCustomResourceConversion(conversion *apiextensions.CustomResourceConversion, requireRecognizedVersion bool, fldPath *field.Path, opts validationOptions) field.ErrorList { allErrs := field.ErrorList{} if conversion == nil { return allErrs @@ -582,6 +596,9 @@ func validateCustomResourceConversion(conversion *apiextensions.CustomResourceCo case cc.Service != nil: allErrs = append(allErrs, webhook.ValidateWebhookService(fldPath.Child("webhookClientConfig").Child("service"), cc.Service.Name, cc.Service.Namespace, cc.Service.Path, cc.Service.Port)...) } + if len(cc.CABundle) > 0 && !opts.allowInvalidCABundle { + allErrs = append(allErrs, webhook.ValidateCABundle(fldPath.Child("webhookClientConfig").Child("caBundle"), cc.CABundle)...) + } } allErrs = append(allErrs, validateConversionReviewVersions(conversion.ConversionReviewVersions, requireRecognizedVersion, fldPath.Child("conversionReviewVersions"))...) } else { diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go index 4f065a18f..1f61a87fa 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go @@ -17,6 +17,7 @@ limitations under the License. package cel import ( + "errors" "fmt" "strings" "time" @@ -122,10 +123,13 @@ func Compile(s *schema.Structural, declType *apiservercel.DeclType, perCallLimit metrics.Metrics.ObserveCompilation(time.Since(t)) }() - if len(s.Extensions.XValidations) == 0 { + if len(s.XValidations) == 0 { return nil, nil } - celRules := s.Extensions.XValidations + if declType == nil { + return nil, errors.New("failed to convert to declType for CEL validation rules") + } + celRules := s.XValidations oldSelfEnvSet, optionalOldSelfEnvSet, err := prepareEnvSet(baseEnvSet, declType) if err != nil { diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go index 0bc109a73..a66ab4293 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go @@ -62,6 +62,9 @@ func (s *Structural) Pattern() string { } func (s *Structural) Items() common.Schema { + if s.Structural.Items == nil { + return nil + } return &Structural{Structural: s.Structural.Items} } @@ -279,11 +282,18 @@ func nestedValueValidationToStructural(nvv *schema.NestedValueValidation) *Struc newProperties[k] = *nestedValueValidationToStructural(&v).Structural } + var newAdditionalProperties *schema.StructuralOrBool + if nvv.AdditionalProperties != nil { + newAdditionalProperties = &schema.StructuralOrBool{Structural: nestedValueValidationToStructural(nvv.AdditionalProperties).Structural} + } + return &Structural{ Structural: &schema.Structural{ - Items: newItems, - Properties: newProperties, - ValueValidation: &nvv.ValueValidation, + Items: newItems, + Properties: newProperties, + AdditionalProperties: newAdditionalProperties, + ValueValidation: &nvv.ValueValidation, + ValidationExtensions: nvv.ValidationExtensions, }, } } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go index 6b49e67a4..f1c5ad9e9 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go @@ -49,9 +49,11 @@ func WithTypeAndObjectMeta(s *schema.Structural) *schema.Structural { return s } result := &schema.Structural{ - Generic: s.Generic, - Extensions: s.Extensions, - ValueValidation: s.ValueValidation, + AdditionalProperties: s.AdditionalProperties, + Generic: s.Generic, + Extensions: s.Extensions, + ValueValidation: s.ValueValidation, + ValidationExtensions: s.ValidationExtensions, } props := make(map[string]schema.Structural, len(s.Properties)) for k, prop := range s.Properties { diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go index d9b595805..575fd5e2e 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go @@ -51,12 +51,15 @@ import ( // Validator parallels the structure of schema.Structural and includes the compiled CEL programs // for the x-kubernetes-validations of each schema node. type Validator struct { - Items *Validator - Properties map[string]Validator - + Items *Validator + Properties map[string]Validator + AllOfValidators []*Validator AdditionalProperties *Validator - compiledRules []CompilationResult + Schema *schema.Structural + + uncompiledRules []apiextensions.ValidationRule + compiledRules []CompilationResult // Program compilation is pre-checked at CRD creation/update time, so we don't expect compilation to fail // they are recompiled and added to this type, and it does, it is an internal bug. @@ -82,27 +85,47 @@ func NewValidator(s *schema.Structural, isResourceRoot bool, perCallLimit uint64 if !hasXValidations(s) { return nil } - return validator(s, isResourceRoot, model.SchemaDeclType(s, isResourceRoot), perCallLimit) + return validator(s, s, isResourceRoot, model.SchemaDeclType(s, isResourceRoot), perCallLimit) } // validator creates a Validator for all x-kubernetes-validations at the level of the provided schema and lower and // returns the Validator if any x-kubernetes-validations exist in the schema, or nil if no x-kubernetes-validations // exist. declType is expected to be a CEL DeclType corresponding to the structural schema. // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input. -func validator(s *schema.Structural, isResourceRoot bool, declType *cel.DeclType, perCallLimit uint64) *Validator { - // strictCost is always true to enforce cost limits. - compiledRules, err := Compile(s, declType, perCallLimit, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), StoredExpressionsEnvLoader()) +func validator(validationSchema, nodeSchema *schema.Structural, isResourceRoot bool, declType *cel.DeclType, perCallLimit uint64) *Validator { + compilationSchema := *nodeSchema + compilationSchema.XValidations = validationSchema.XValidations + compiledRules, err := Compile(&compilationSchema, declType, perCallLimit, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), StoredExpressionsEnvLoader()) + var itemsValidator, additionalPropertiesValidator *Validator var propertiesValidators map[string]Validator - if s.Items != nil { - itemsValidator = validator(s.Items, s.Items.XEmbeddedResource, declType.ElemType, perCallLimit) + var allOfValidators []*Validator + var elemType *cel.DeclType + if declType != nil { + elemType = declType.ElemType + } else { + elemType = declType } - if len(s.Properties) > 0 { - propertiesValidators = make(map[string]Validator, len(s.Properties)) - for k, p := range s.Properties { - prop := p + + if validationSchema.Items != nil && nodeSchema.Items != nil { + itemsValidator = validator(validationSchema.Items, nodeSchema.Items, nodeSchema.Items.XEmbeddedResource, elemType, perCallLimit) + } + + if len(validationSchema.Properties) > 0 { + propertiesValidators = make(map[string]Validator, len(validationSchema.Properties)) + for k, validationProperty := range validationSchema.Properties { + nodeProperty, ok := nodeSchema.Properties[k] + if !ok { + // Can only add value validations for fields that are on the + // structural spine of the schema. + continue + } + var fieldType *cel.DeclType if escapedPropName, ok := cel.Escape(k); ok { + if declType == nil { + continue + } if f, ok := declType.Fields[escapedPropName]; ok { fieldType = f.Type } else { @@ -112,20 +135,32 @@ func validator(s *schema.Structural, isResourceRoot bool, declType *cel.DeclType } else { // field may be absent from declType if the property name is unescapable, in which case we should convert // the field value type to a DeclType. - fieldType = model.SchemaDeclType(&prop, prop.XEmbeddedResource) + fieldType = model.SchemaDeclType(&nodeProperty, nodeProperty.XEmbeddedResource) if fieldType == nil { continue } } - if p := validator(&prop, prop.XEmbeddedResource, fieldType, perCallLimit); p != nil { + if p := validator(&validationProperty, &nodeProperty, nodeProperty.XEmbeddedResource, fieldType, perCallLimit); p != nil { propertiesValidators[k] = *p } } } - if s.AdditionalProperties != nil && s.AdditionalProperties.Structural != nil { - additionalPropertiesValidator = validator(s.AdditionalProperties.Structural, s.AdditionalProperties.Structural.XEmbeddedResource, declType.ElemType, perCallLimit) + if validationSchema.AdditionalProperties != nil && validationSchema.AdditionalProperties.Structural != nil && + nodeSchema.AdditionalProperties != nil && nodeSchema.AdditionalProperties.Structural != nil { + additionalPropertiesValidator = validator(validationSchema.AdditionalProperties.Structural, nodeSchema.AdditionalProperties.Structural, nodeSchema.AdditionalProperties.Structural.XEmbeddedResource, elemType, perCallLimit) } - if len(compiledRules) > 0 || err != nil || itemsValidator != nil || additionalPropertiesValidator != nil || len(propertiesValidators) > 0 { + + if validationSchema.ValueValidation != nil && len(validationSchema.ValueValidation.AllOf) > 0 { + allOfValidators = make([]*Validator, 0, len(validationSchema.ValueValidation.AllOf)) + for _, allOf := range validationSchema.ValueValidation.AllOf { + allOfValidator := validator(nestedToStructural(&allOf), nodeSchema, isResourceRoot, declType, perCallLimit) + if allOfValidator != nil { + allOfValidators = append(allOfValidators, allOfValidator) + } + } + } + + if len(compiledRules) > 0 || err != nil || itemsValidator != nil || additionalPropertiesValidator != nil || len(propertiesValidators) > 0 || len(allOfValidators) > 0 { activationFactory := validationActivationWithoutOldSelf for _, rule := range compiledRules { if rule.UsesOldSelf { @@ -136,12 +171,15 @@ func validator(s *schema.Structural, isResourceRoot bool, declType *cel.DeclType return &Validator{ compiledRules: compiledRules, + uncompiledRules: validationSchema.XValidations, compilationErr: err, isResourceRoot: isResourceRoot, Items: itemsValidator, AdditionalProperties: additionalPropertiesValidator, Properties: propertiesValidators, + AllOfValidators: allOfValidators, celActivationFactory: activationFactory, + Schema: nodeSchema, } } @@ -164,13 +202,13 @@ func WithRatcheting(correlation *common.CorrelatedObject) Option { // If the validation rules exceed the costBudget, subsequent evaluations will be skipped, the list of errs returned will not be empty, and a negative remainingBudget will be returned. // Most callers can ignore the returned remainingBudget value unless another validate call is going to be made // context is passed for supporting context cancellation during cel validation -func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, costBudget int64, opts ...Option) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, _ *schema.Structural, obj, oldObj interface{}, costBudget int64, opts ...Option) (errs field.ErrorList, remainingBudget int64) { opt := options{} for _, o := range opts { o(&opt) } - return s.validate(ctx, fldPath, sts, obj, oldObj, opt.ratchetingOptions, costBudget) + return s.validate(ctx, fldPath, obj, oldObj, opt.ratchetingOptions, costBudget) } // ratchetingOptions stores the current correlation object and the nearest @@ -234,7 +272,36 @@ func (r ratchetingOptions) index(idx int) ratchetingOptions { return ratchetingOptions{currentCorrelation: r.currentCorrelation.Index(idx), nearestParentCorrelation: r.currentCorrelation} } -func (s *Validator) validate(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func nestedToStructural(nested *schema.NestedValueValidation) *schema.Structural { + if nested == nil { + return nil + } + + structuralConversion := &schema.Structural{ + ValueValidation: &nested.ValueValidation, + ValidationExtensions: nested.ValidationExtensions, + Generic: nested.ForbiddenGenerics, + Extensions: nested.ForbiddenExtensions, + Items: nestedToStructural(nested.Items), + } + + if len(nested.Properties) > 0 { + structuralConversion.Properties = make(map[string]schema.Structural, len(nested.Properties)) + for k, v := range nested.Properties { + structuralConversion.Properties[k] = *nestedToStructural(&v) + } + } + + if nested.AdditionalProperties != nil { + structuralConversion.AdditionalProperties = &schema.StructuralOrBool{ + Structural: nestedToStructural(nested.AdditionalProperties), + } + } + + return structuralConversion +} + +func (s *Validator) validate(ctx context.Context, fldPath *field.Path, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { t := time.Now() defer func() { metrics.Metrics.ObserveEvaluation(time.Since(t)) @@ -244,23 +311,37 @@ func (s *Validator) validate(ctx context.Context, fldPath *field.Path, sts *sche return nil, remainingBudget } - errs, remainingBudget = s.validateExpressions(ctx, fldPath, sts, obj, oldObj, correlation, remainingBudget) + errs, remainingBudget = s.validateExpressions(ctx, fldPath, obj, oldObj, correlation, remainingBudget) if remainingBudget < 0 { return errs, remainingBudget } + // If the schema has allOf, recurse through those elements to see if there + // are any validation rules that need to be evaluated. + for _, allOfValidator := range s.AllOfValidators { + var allOfErrs field.ErrorList + // Pass options with nil currentCorrelation to mirror schema ratcheting + // behavior which does not ratchet allOf errors. This may change in the + // future for allOf. + allOfErrs, remainingBudget = allOfValidator.validate(ctx, fldPath, obj, oldObj, ratchetingOptions{nearestParentCorrelation: correlation.nearestParentCorrelation}, remainingBudget) + errs = append(errs, allOfErrs...) + if remainingBudget < 0 { + return errs, remainingBudget + } + } + switch obj := obj.(type) { case []interface{}: oldArray, _ := oldObj.([]interface{}) var arrayErrs field.ErrorList - arrayErrs, remainingBudget = s.validateArray(ctx, fldPath, sts, obj, oldArray, correlation, remainingBudget) + arrayErrs, remainingBudget = s.validateArray(ctx, fldPath, obj, oldArray, correlation, remainingBudget) errs = append(errs, arrayErrs...) return errs, remainingBudget case map[string]interface{}: oldMap, _ := oldObj.(map[string]interface{}) var mapErrs field.ErrorList - mapErrs, remainingBudget = s.validateMap(ctx, fldPath, sts, obj, oldMap, correlation, remainingBudget) + mapErrs, remainingBudget = s.validateMap(ctx, fldPath, obj, oldMap, correlation, remainingBudget) errs = append(errs, mapErrs...) return errs, remainingBudget } @@ -268,7 +349,9 @@ func (s *Validator) validate(ctx context.Context, fldPath *field.Path, sts *sche return errs, remainingBudget } -func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { + sts := s.Schema + // guard against oldObj being a non-nil interface with a nil value if oldObj != nil { v := reflect.ValueOf(oldObj) @@ -303,7 +386,7 @@ func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path } activation, optionalOldSelfActivation := s.celActivationFactory(sts, obj, oldObj) for i, compiled := range s.compiledRules { - rule := sts.XValidations[i] + rule := s.uncompiledRules[i] if compiled.Error != nil { errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("rule compile error: %v", compiled.Error))) continue @@ -720,7 +803,7 @@ func (a *validationActivation) Parent() interpreter.Activation { return nil } -func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj map[string]interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, obj, oldObj map[string]interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { remainingBudget = costBudget if remainingBudget < 0 { return errs, remainingBudget @@ -729,9 +812,9 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s return nil, remainingBudget } - correlatable := MapIsCorrelatable(sts.XMapType) + correlatable := MapIsCorrelatable(s.Schema.XMapType) - if s.AdditionalProperties != nil && sts.AdditionalProperties != nil && sts.AdditionalProperties.Structural != nil { + if s.AdditionalProperties != nil { for k, v := range obj { var oldV interface{} if correlatable { @@ -739,25 +822,24 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s } var err field.ErrorList - err, remainingBudget = s.AdditionalProperties.validate(ctx, fldPath.Key(k), sts.AdditionalProperties.Structural, v, oldV, correlation.key(k), remainingBudget) + err, remainingBudget = s.AdditionalProperties.validate(ctx, fldPath.Key(k), v, oldV, correlation.key(k), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget } } } - if s.Properties != nil && sts.Properties != nil { + if s.Properties != nil { for k, v := range obj { - stsProp, stsOk := sts.Properties[k] sub, ok := s.Properties[k] - if ok && stsOk { + if ok { var oldV interface{} if correlatable { oldV = oldObj[k] // +k8s:verify-mutation:reason=clone } var err field.ErrorList - err, remainingBudget = sub.validate(ctx, fldPath.Child(k), &stsProp, v, oldV, correlation.key(k), remainingBudget) + err, remainingBudget = sub.validate(ctx, fldPath.Child(k), v, oldV, correlation.key(k), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget @@ -769,19 +851,19 @@ func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, sts *s return errs, remainingBudget } -func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, sts *schema.Structural, obj, oldObj []interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { +func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, obj, oldObj []interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) { remainingBudget = costBudget if remainingBudget < 0 { return errs, remainingBudget } - if s.Items != nil && sts.Items != nil { + if s.Items != nil { // only map-type lists support self-oldSelf correlation for cel rules. if this isn't a // map-type list, then makeMapList returns an implementation that always returns nil - correlatableOldItems := makeMapList(sts, oldObj) + correlatableOldItems := makeMapList(s.Schema, oldObj) for i := range obj { var err field.ErrorList - err, remainingBudget = s.Items.validate(ctx, fldPath.Index(i), sts.Items, obj[i], correlatableOldItems.Get(obj[i]), correlation.index(i), remainingBudget) + err, remainingBudget = s.Items.validate(ctx, fldPath.Index(i), obj[i], correlatableOldItems.Get(obj[i]), correlation.index(i), remainingBudget) errs = append(errs, err...) if remainingBudget < 0 { return errs, remainingBudget diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go index 08e222f0d..65bf3cef4 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go @@ -22,17 +22,24 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -// validateStructuralCompleteness checks that every specified field or array in s is also specified -// outside of value validation. -func validateStructuralCompleteness(s *Structural, fldPath *field.Path) field.ErrorList { +// validateStructuralCompleteness checks that all value validations in s have +// a structural counterpart so that every value validation applies to a value +// with a known schema: +// - validations for specific properties must have that property (or additionalProperties under an option) structurally defined +// - additionalProperties validations must have additionalProperties defined in the structural portion of the schema corresponding to that node +// - Items validations must have also have a corresponding items structurally +// +// The "structural" portion of the schema refers to all nodes in the +// schema traversible without following any NestedValueValidations. +func validateStructuralCompleteness(s *Structural, fldPath *field.Path, opts ValidationOptions) field.ErrorList { if s == nil { return nil } - return validateValueValidationCompleteness(s.ValueValidation, s, fldPath, fldPath) + return validateValueValidationCompleteness(s.ValueValidation, s, fldPath, fldPath, opts) } -func validateValueValidationCompleteness(v *ValueValidation, s *Structural, sPath, vPath *field.Path) field.ErrorList { +func validateValueValidationCompleteness(v *ValueValidation, s *Structural, sPath, vPath *field.Path, opts ValidationOptions) field.ErrorList { if v == nil { return nil } @@ -42,21 +49,21 @@ func validateValueValidationCompleteness(v *ValueValidation, s *Structural, sPat allErrs := field.ErrorList{} - allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Not, s, sPath, vPath.Child("not"))...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Not, s, sPath, vPath.Child("not"), opts)...) for i := range v.AllOf { - allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AllOf[i], s, sPath, vPath.Child("allOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AllOf[i], s, sPath, vPath.Child("allOf").Index(i), opts)...) } for i := range v.AnyOf { - allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AnyOf[i], s, sPath, vPath.Child("anyOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AnyOf[i], s, sPath, vPath.Child("anyOf").Index(i), opts)...) } for i := range v.OneOf { - allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.OneOf[i], s, sPath, vPath.Child("oneOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.OneOf[i], s, sPath, vPath.Child("oneOf").Index(i), opts)...) } return allErrs } -func validateNestedValueValidationCompleteness(v *NestedValueValidation, s *Structural, sPath, vPath *field.Path) field.ErrorList { +func validateNestedValueValidationCompleteness(v *NestedValueValidation, s *Structural, sPath, vPath *field.Path, opts ValidationOptions) field.ErrorList { if v == nil { return nil } @@ -66,17 +73,34 @@ func validateNestedValueValidationCompleteness(v *NestedValueValidation, s *Stru allErrs := field.ErrorList{} - allErrs = append(allErrs, validateValueValidationCompleteness(&v.ValueValidation, s, sPath, vPath)...) - allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Items, s.Items, sPath.Child("items"), vPath.Child("items"))...) + allErrs = append(allErrs, validateValueValidationCompleteness(&v.ValueValidation, s, sPath, vPath, opts)...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Items, s.Items, sPath.Child("items"), vPath.Child("items"), opts)...) + + var sAdditionalPropertiesSchema *Structural + if s.AdditionalProperties != nil { + sAdditionalPropertiesSchema = s.AdditionalProperties.Structural + } + for k, vFld := range v.Properties { if sFld, ok := s.Properties[k]; !ok { - allErrs = append(allErrs, field.Required(sPath.Child("properties").Key(k), fmt.Sprintf("because it is defined in %s", vPath.Child("properties").Key(k)))) + if sAdditionalPropertiesSchema == nil || !opts.AllowValidationPropertiesWithAdditionalProperties { + allErrs = append(allErrs, field.Required(sPath.Child("properties").Key(k), fmt.Sprintf("because it is defined in %s", vPath.Child("properties").Key(k)))) + } else { + // Allow validations on specific properties if there exists an + // additionalProperties structural schema specified instead of + // direct properties + // NOTE: This does not allow `additionalProperties: true` structural + // schema to be combined with specific property validations. + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, sAdditionalPropertiesSchema, sPath.Child("additionalProperties"), vPath.Child("properties").Key(k), opts)...) + } } else { - allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, &sFld, sPath.Child("properties").Key(k), vPath.Child("properties").Key(k))...) + allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, &sFld, sPath.Child("properties").Key(k), vPath.Child("properties").Key(k), opts)...) } } - // don't check additionalProperties as this is not allowed (and checked during validation) + if v.AdditionalProperties != nil && opts.AllowNestedAdditionalProperties { + allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.AdditionalProperties, sAdditionalPropertiesSchema, sPath.Child("additionalProperties"), vPath.Child("additionalProperties"), opts)...) + } return allErrs } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go index 9ec23b332..78350c135 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go @@ -18,6 +18,7 @@ package schema import ( "fmt" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) @@ -62,10 +63,16 @@ func NewStructural(s *apiextensions.JSONSchemaProps) (*Structural, error) { return nil, err } + vx, err := newValidationExtensions(s) + if err != nil { + return nil, err + } + ss := &Structural{ - Generic: *g, - Extensions: *x, - ValueValidation: vv, + Generic: *g, + Extensions: *x, + ValueValidation: vv, + ValidationExtensions: *vx, } if s.Items != nil { @@ -91,6 +98,18 @@ func NewStructural(s *apiextensions.JSONSchemaProps) (*Structural, error) { } } + if s.AdditionalProperties != nil { + if s.AdditionalProperties.Schema != nil { + additionalPropertiesSchema, err := NewStructural(s.AdditionalProperties.Schema) + if err != nil { + return nil, err + } + ss.AdditionalProperties = &StructuralOrBool{Structural: additionalPropertiesSchema, Bool: true} + } else { + ss.AdditionalProperties = &StructuralOrBool{Bool: s.AdditionalProperties.Allows} + } + } + return ss, nil } @@ -108,18 +127,6 @@ func newGenerics(s *apiextensions.JSONSchemaProps) (*Generic, error) { g.Default = JSON{interface{}(*s.Default)} } - if s.AdditionalProperties != nil { - if s.AdditionalProperties.Schema != nil { - ss, err := NewStructural(s.AdditionalProperties.Schema) - if err != nil { - return nil, err - } - g.AdditionalProperties = &StructuralOrBool{Structural: ss, Bool: true} - } else { - g.AdditionalProperties = &StructuralOrBool{Bool: s.AdditionalProperties.Allows} - } - } - return g, nil } @@ -205,10 +212,16 @@ func newNestedValueValidation(s *apiextensions.JSONSchemaProps) (*NestedValueVal return nil, err } + vx, err := newValidationExtensions(s) + if err != nil { + return nil, err + } + v := &NestedValueValidation{ - ValueValidation: *vv, - ForbiddenGenerics: *g, - ForbiddenExtensions: *x, + ValueValidation: *vv, + ValidationExtensions: *vx, + ForbiddenGenerics: *g, + ForbiddenExtensions: *x, } if s.Items != nil { @@ -232,6 +245,18 @@ func newNestedValueValidation(s *apiextensions.JSONSchemaProps) (*NestedValueVal v.Properties[k] = *nvv } } + if s.AdditionalProperties != nil { + if s.AdditionalProperties.Schema != nil { + additionalPropertiesSchema, err := newNestedValueValidation(s.AdditionalProperties.Schema) + if err != nil { + return nil, err + } + v.AdditionalProperties = additionalPropertiesSchema + } else if s.AdditionalProperties.Allows { + v.AdditionalProperties = &NestedValueValidation{} + } + + } return v, nil } @@ -248,9 +273,6 @@ func newExtensions(s *apiextensions.JSONSchemaProps) (*Extensions, error) { XListType: s.XListType, XMapType: s.XMapType, } - if err := apiextensionsv1.Convert_apiextensions_ValidationRules_To_v1_ValidationRules(&s.XValidations, &ret.XValidations, nil); err != nil { - return nil, err - } if s.XPreserveUnknownFields != nil { if !*s.XPreserveUnknownFields { @@ -262,6 +284,19 @@ func newExtensions(s *apiextensions.JSONSchemaProps) (*Extensions, error) { return ret, nil } +func newValidationExtensions(s *apiextensions.JSONSchemaProps) (*ValidationExtensions, error) { + if s == nil { + return nil, nil + } + + ret := &ValidationExtensions{} + if err := apiextensionsv1.Convert_apiextensions_ValidationRules_To_v1_ValidationRules(&s.XValidations, &ret.XValidations, nil); err != nil { + return nil, err + } + + return ret, nil +} + // validateUnsupportedFields checks that those fields rejected by validation are actually unset. func validateUnsupportedFields(s *apiextensions.JSONSchemaProps) error { if len(s.ID) > 0 { diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go index 23bffbfb1..df78ba77e 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go @@ -37,10 +37,16 @@ func (s *Structural) ToKubeOpenAPI() *spec.Schema { ret.Properties[k] = *v.ToKubeOpenAPI() } } + if s.AdditionalProperties != nil { + ret.AdditionalProperties = &spec.SchemaOrBool{ + Allows: s.AdditionalProperties.Bool, + Schema: s.AdditionalProperties.Structural.ToKubeOpenAPI(), + } + } s.Generic.toKubeOpenAPI(ret) s.Extensions.toKubeOpenAPI(ret) s.ValueValidation.toKubeOpenAPI(ret) - + s.ValidationExtensions.toKubeOpenAPI(ret) return ret } @@ -53,12 +59,6 @@ func (g *Generic) toKubeOpenAPI(ret *spec.Schema) { ret.Type = spec.StringOrArray{g.Type} } ret.Nullable = g.Nullable - if g.AdditionalProperties != nil { - ret.AdditionalProperties = &spec.SchemaOrBool{ - Allows: g.AdditionalProperties.Bool, - Schema: g.AdditionalProperties.Structural.ToKubeOpenAPI(), - } - } ret.Description = g.Description ret.Title = g.Title ret.Default = g.Default.Object @@ -87,6 +87,13 @@ func (x *Extensions) toKubeOpenAPI(ret *spec.Schema) { if x.XMapType != nil { ret.VendorExtensible.AddExtension("x-kubernetes-map-type", *x.XMapType) } +} + +func (x *ValidationExtensions) toKubeOpenAPI(ret *spec.Schema) { + if x == nil { + return + } + if len(x.XValidations) > 0 { ret.VendorExtensible.AddExtension("x-kubernetes-validations", x.XValidations) } @@ -138,6 +145,7 @@ func (vv *NestedValueValidation) toKubeOpenAPI() *spec.Schema { ret := &spec.Schema{} vv.ValueValidation.toKubeOpenAPI(ret) + vv.ValidationExtensions.toKubeOpenAPI(ret) if vv.Items != nil { ret.Items = &spec.SchemaOrArray{Schema: vv.Items.toKubeOpenAPI()} } @@ -149,6 +157,5 @@ func (vv *NestedValueValidation) toKubeOpenAPI() *spec.Schema { } vv.ForbiddenGenerics.toKubeOpenAPI(ret) // normally empty. Exception: int-or-string vv.ForbiddenExtensions.toKubeOpenAPI(ret) // shouldn't do anything - return ret } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go index 234998d90..5688c2ac4 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go @@ -25,11 +25,13 @@ import ( // Structural represents a structural schema. type Structural struct { - Items *Structural - Properties map[string]Structural + Items *Structural + Properties map[string]Structural + AdditionalProperties *StructuralOrBool Generic Extensions + ValidationExtensions ValueValidation *ValueValidation } @@ -51,11 +53,10 @@ type Generic struct { // It can be object, array, number, integer, boolean, string. // It is optional only if x-kubernetes-preserve-unknown-fields // or x-kubernetes-int-or-string is true. - Type string - Title string - Default JSON - AdditionalProperties *StructuralOrBool - Nullable bool + Type string + Title string + Default JSON + Nullable bool } // +k8s:deepcopy-gen=true @@ -128,7 +129,13 @@ type Extensions struct { // Atomic maps will be entirely replaced when updated. // +optional XMapType *string +} + +// +k8s:deepcopy-gen=true +// ValidationExtensions contains the Kubernetes OpenAPI v3 extensions that are +// used for validation rather than structure. +type ValidationExtensions struct { // x-kubernetes-validations describes a list of validation rules for expression validation. // Use the v1 struct since this gets serialized as an extension. XValidations apiextensionsv1.ValidationRules @@ -166,9 +173,11 @@ type ValueValidation struct { // under a logical junctor, and catch all structs for generic and vendor extensions schema fields. type NestedValueValidation struct { ValueValidation + ValidationExtensions - Items *NestedValueValidation - Properties map[string]NestedValueValidation + Items *NestedValueValidation + Properties map[string]NestedValueValidation + AdditionalProperties *NestedValueValidation // Anything set in the following will make the scheme // non-structural, with the exception of these two patterns if diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go index f7f29e70c..a9caddb38 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go @@ -42,6 +42,22 @@ const ( fieldLevel ) +type ValidationOptions struct { + // AllowNestedAdditionalProperties allows additionalProperties to be specified in + // nested contexts (allOf, anyOf, oneOf, not). + AllowNestedAdditionalProperties bool + + // AllowNestedXValidations allows x-kubernetes-validations to be specified in + // nested contexts (allOf, anyOf, oneOf, not). + AllowNestedXValidations bool + + // AllowValidationPropertiesWithAdditionalProperties allows + // value validations on specific properties that are structually + // defined by additionalProperties. i.e. additionalProperties in main structural + // schema, but properties within an allOf. + AllowValidationPropertiesWithAdditionalProperties bool +} + // ValidateStructural checks that s is a structural schema with the invariants: // // * structurality: both `ForbiddenGenerics` and `ForbiddenExtensions` only have zero values, with the two exceptions for IntOrString. @@ -61,10 +77,21 @@ const ( // * metadata at the root can only restrict the name and generateName, and not be specified at all in nested contexts. // * additionalProperties at the root is not allowed. func ValidateStructural(fldPath *field.Path, s *Structural) field.ErrorList { + return ValidateStructuralWithOptions(fldPath, s, ValidationOptions{ + // This would widen the schema for CRD if set to true, so first few releases will still + // not admit any. But it can still be used by libraries and + // declarative validation for native types + AllowNestedAdditionalProperties: false, + AllowNestedXValidations: false, + AllowValidationPropertiesWithAdditionalProperties: false, + }) +} + +func ValidateStructuralWithOptions(fldPath *field.Path, s *Structural, opts ValidationOptions) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, validateStructuralInvariants(s, rootLevel, fldPath)...) - allErrs = append(allErrs, validateStructuralCompleteness(s, fldPath)...) + allErrs = append(allErrs, validateStructuralInvariants(s, rootLevel, fldPath, opts)...) + allErrs = append(allErrs, validateStructuralCompleteness(s, fldPath, opts)...) // sort error messages. Otherwise, the errors slice will change every time due to // maps in the types and randomized iteration. @@ -76,7 +103,7 @@ func ValidateStructural(fldPath *field.Path, s *Structural) field.ErrorList { } // validateStructuralInvariants checks the invariants of a structural schema. -func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) field.ErrorList { +func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList { if s == nil { return nil } @@ -86,11 +113,21 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) if s.Type == "array" && s.Items == nil { allErrs = append(allErrs, field.Required(fldPath.Child("items"), "must be specified")) } - allErrs = append(allErrs, validateStructuralInvariants(s.Items, itemLevel, fldPath.Child("items"))...) + allErrs = append(allErrs, validateStructuralInvariants(s.Items, itemLevel, fldPath.Child("items"), opts)...) for k, v := range s.Properties { - allErrs = append(allErrs, validateStructuralInvariants(&v, fieldLevel, fldPath.Child("properties").Key(k))...) + allErrs = append(allErrs, validateStructuralInvariants(&v, fieldLevel, fldPath.Child("properties").Key(k), opts)...) + } + + if s.AdditionalProperties != nil { + if lvl == rootLevel { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must not be used at the root")) + } + if s.AdditionalProperties.Structural != nil { + allErrs = append(allErrs, validateStructuralInvariants(s.AdditionalProperties.Structural, fieldLevel, fldPath.Child("additionalProperties"), opts)...) + } } + allErrs = append(allErrs, validateGeneric(&s.Generic, lvl, fldPath)...) allErrs = append(allErrs, validateExtensions(&s.Extensions, fldPath)...) @@ -106,7 +143,7 @@ func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path) skipAnyOf := isIntOrStringAnyOfPattern(s) skipFirstAllOfAnyOf := isIntOrStringAllOfPattern(s) - allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, lvl, fldPath)...) + allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, lvl, fldPath, opts)...) checkMetadata := (lvl == rootLevel) || s.XEmbeddedResource @@ -207,18 +244,7 @@ func validateGeneric(g *Generic, lvl level, fldPath *field.Path) field.ErrorList return nil } - allErrs := field.ErrorList{} - - if g.AdditionalProperties != nil { - if lvl == rootLevel { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must not be used at the root")) - } - if g.AdditionalProperties.Structural != nil { - allErrs = append(allErrs, validateStructuralInvariants(g.AdditionalProperties.Structural, fieldLevel, fldPath.Child("additionalProperties"))...) - } - } - - return allErrs + return nil } // validateExtensions checks Kubernetes vendor extensions of a structural schema. @@ -236,16 +262,23 @@ func validateExtensions(x *Extensions, fldPath *field.Path) field.ErrorList { } // validateValueValidation checks the value validation in a structural schema. -func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, lvl level, fldPath *field.Path) field.ErrorList { +func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList { if v == nil { return nil } allErrs := field.ErrorList{} + // We still unconditionally forbid XValidations in quantifiers, the only + // quantifier that is allowed to have right now is AllOf. This is due to + // implementation constraints - the SchemaValidator would need to become + // aware of CEL to properly implement the other quantifiers. + optsWithCELDisabled := opts + optsWithCELDisabled.AllowNestedXValidations = false + if !skipAnyOf { for i := range v.AnyOf { - allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, lvl, fldPath.Child("anyOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, lvl, fldPath.Child("anyOf").Index(i), optsWithCELDisabled)...) } } @@ -254,14 +287,14 @@ func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf if skipFirstAllOfAnyOf && i == 0 { skipAnyOf = true } - allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, lvl, fldPath.Child("allOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, lvl, fldPath.Child("allOf").Index(i), opts)...) } for i := range v.OneOf { - allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, lvl, fldPath.Child("oneOf").Index(i))...) + allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, lvl, fldPath.Child("oneOf").Index(i), optsWithCELDisabled)...) } - allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, lvl, fldPath.Child("not"))...) + allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, lvl, fldPath.Child("not"), optsWithCELDisabled)...) if len(v.Pattern) > 0 { if _, err := regexp.Compile(v.Pattern); err != nil { @@ -273,25 +306,27 @@ func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf } // validateNestedValueValidation checks the nested value validation under a logic junctor in a structural schema. -func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, lvl level, fldPath *field.Path) field.ErrorList { +func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList { if v == nil { return nil } allErrs := field.ErrorList{} - allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, lvl, fldPath)...) - allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, lvl, fldPath.Child("items"))...) + allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, lvl, fldPath, opts)...) + allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, lvl, fldPath.Child("items"), opts)...) for k, fld := range v.Properties { - allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fieldLevel, fldPath.Child("properties").Key(k))...) + allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fieldLevel, fldPath.Child("properties").Key(k), opts)...) } if len(v.ForbiddenGenerics.Type) > 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("type"), "must be empty to be structural")) } - if v.ForbiddenGenerics.AdditionalProperties != nil { + if v.AdditionalProperties != nil && !opts.AllowNestedAdditionalProperties { allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must be undefined to be structural")) + } else { + allErrs = append(allErrs, validateNestedValueValidation(v.AdditionalProperties, false, false, lvl, fldPath.Child("additionalProperties"), opts)...) } if v.ForbiddenGenerics.Default.Object != nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("default"), "must be undefined to be structural")) @@ -324,7 +359,7 @@ func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllO if v.ForbiddenExtensions.XMapType != nil { allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-map-type"), "must be undefined to be structural")) } - if len(v.ForbiddenExtensions.XValidations) > 0 { + if len(v.ValidationExtensions.XValidations) > 0 && !opts.AllowNestedXValidations { allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-validations"), "must be empty to be structural")) } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go index 1f4267dde..37eb72ed0 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go @@ -50,8 +50,8 @@ func (m *Visitor) visitStructural(s *Structural) bool { s.Properties[k] = v } } - if s.Generic.AdditionalProperties != nil && s.Generic.AdditionalProperties.Structural != nil { - m.visitStructural(s.Generic.AdditionalProperties.Structural) + if s.AdditionalProperties != nil && s.AdditionalProperties.Structural != nil { + m.visitStructural(s.AdditionalProperties.Structural) } if s.ValueValidation != nil { for i := range s.ValueValidation.AllOf { @@ -86,8 +86,8 @@ func (m *Visitor) visitNestedValueValidation(vv *NestedValueValidation) bool { vv.Properties[k] = v } } - if vv.ForbiddenGenerics.AdditionalProperties != nil && vv.ForbiddenGenerics.AdditionalProperties.Structural != nil { - m.visitStructural(vv.ForbiddenGenerics.AdditionalProperties.Structural) + if vv.AdditionalProperties != nil { + m.visitNestedValueValidation(vv.AdditionalProperties) } for i := range vv.ValueValidation.AllOf { m.visitNestedValueValidation(&vv.ValueValidation.AllOf[i]) diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go index b764e13e7..9cec0262b 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go @@ -43,13 +43,6 @@ func (in *Extensions) DeepCopyInto(out *Extensions) { *out = new(string) **out = **in } - if in.XValidations != nil { - in, out := &in.XValidations, &out.XValidations - *out = make(v1.ValidationRules, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -67,11 +60,6 @@ func (in *Extensions) DeepCopy() *Extensions { func (in *Generic) DeepCopyInto(out *Generic) { *out = *in out.Default = in.Default.DeepCopy() - if in.AdditionalProperties != nil { - in, out := &in.AdditionalProperties, &out.AdditionalProperties - *out = new(StructuralOrBool) - (*in).DeepCopyInto(*out) - } return } @@ -89,6 +77,7 @@ func (in *Generic) DeepCopy() *Generic { func (in *NestedValueValidation) DeepCopyInto(out *NestedValueValidation) { *out = *in in.ValueValidation.DeepCopyInto(&out.ValueValidation) + in.ValidationExtensions.DeepCopyInto(&out.ValidationExtensions) if in.Items != nil { in, out := &in.Items, &out.Items *out = new(NestedValueValidation) @@ -101,6 +90,11 @@ func (in *NestedValueValidation) DeepCopyInto(out *NestedValueValidation) { (*out)[key] = *val.DeepCopy() } } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(NestedValueValidation) + (*in).DeepCopyInto(*out) + } in.ForbiddenGenerics.DeepCopyInto(&out.ForbiddenGenerics) in.ForbiddenExtensions.DeepCopyInto(&out.ForbiddenExtensions) return @@ -131,8 +125,14 @@ func (in *Structural) DeepCopyInto(out *Structural) { (*out)[key] = *val.DeepCopy() } } + if in.AdditionalProperties != nil { + in, out := &in.AdditionalProperties, &out.AdditionalProperties + *out = new(StructuralOrBool) + (*in).DeepCopyInto(*out) + } in.Generic.DeepCopyInto(&out.Generic) in.Extensions.DeepCopyInto(&out.Extensions) + in.ValidationExtensions.DeepCopyInto(&out.ValidationExtensions) if in.ValueValidation != nil { in, out := &in.ValueValidation, &out.ValueValidation *out = new(ValueValidation) @@ -172,6 +172,29 @@ func (in *StructuralOrBool) DeepCopy() *StructuralOrBool { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidationExtensions) DeepCopyInto(out *ValidationExtensions) { + *out = *in + if in.XValidations != nil { + in, out := &in.XValidations, &out.XValidations + *out = make(v1.ValidationRules, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationExtensions. +func (in *ValidationExtensions) DeepCopy() *ValidationExtensions { + if in == nil { + return nil + } + out := new(ValidationExtensions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValueValidation) DeepCopyInto(out *ValueValidation) { *out = *in diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go index 7304018fb..85f1ffca7 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go @@ -182,15 +182,11 @@ func kubeOpenAPIResultToFieldErrors(fldPath *field.Path, result *validate.Result allErrs = append(allErrs, field.NotSupported(errPath, err.Value, values)) case openapierrors.TooLongFailCode: - value := interface{}("") - if err.Value != nil { - value = err.Value - } max := int64(-1) if i, ok := err.Valid.(int64); ok { max = i } - allErrs = append(allErrs, field.TooLongMaxLength(errPath, value, int(max))) + allErrs = append(allErrs, field.TooLong(errPath, "" /*unused*/, int(max))) case openapierrors.MaxItemsFailCode: actual := int64(-1) diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go index b83de1c41..eb77daba3 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcecolumndefinition.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// CustomResourceColumnDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceColumnDefinition type for use // with apply. type CustomResourceColumnDefinitionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceColumnDefinitionApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// CustomResourceColumnDefinitionApplyConfiguration constructs a declarative configuration of the CustomResourceColumnDefinition type for use with // apply. func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { return &CustomResourceColumnDefinitionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go index 8705d1a21..25e43cc00 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourceconversion.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { - Strategy *v1.ConversionStrategyType `json:"strategy,omitempty"` - Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` + Strategy *apiextensionsv1.ConversionStrategyType `json:"strategy,omitempty"` + Webhook *WebhookConversionApplyConfiguration `json:"webhook,omitempty"` } -// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with // apply. func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { return &CustomResourceConversionApplyConfiguration{} @@ -38,7 +38,7 @@ func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { // WithStrategy sets the Strategy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Strategy field is set to the value of the last call. -func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value apiextensionsv1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { b.Strategy = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go index f6b6edb7f..110620d65 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// CustomResourceDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceDefinition type for use // with apply. type CustomResourceDefinitionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` - Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CustomResourceDefinitionSpecApplyConfiguration `json:"spec,omitempty"` + Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` } -// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// CustomResourceDefinition constructs a declarative configuration of the CustomResourceDefinition type for use with // apply. func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { b := &CustomResourceDefinitionApplyConfiguration{} @@ -47,7 +47,7 @@ func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfigu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *Cus // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string // If called multiple times, the Name field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *Cus // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value stri // If called multiple times, the Namespace field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *C // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,25 +109,25 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value s // If called multiple times, the Generation field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeco // overwriting an existing map entries in Labels field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[stri // overwriting an existing map entries in Annotations field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -173,13 +173,13 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { +func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,14 +190,14 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CustomResourceDefinitionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -216,3 +216,9 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go index 2cf9dd4e5..228120520 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitioncondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { - Type *v1.CustomResourceDefinitionConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiextensionsv1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *apiextensionsv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with // apply. func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { return &CustomResourceDefinitionConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApply // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value apiextensionsv1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1. // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value apiextensionsv1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go index 06b7a4042..ca0c02f0e 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionnames.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// CustomResourceDefinitionNamesApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionNames type for use // with apply. type CustomResourceDefinitionNamesApplyConfiguration struct { Plural *string `json:"plural,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceDefinitionNamesApplyConfiguration struct { Categories []string `json:"categories,omitempty"` } -// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// CustomResourceDefinitionNamesApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionNames type for use with // apply. func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { return &CustomResourceDefinitionNamesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go index 0f52e4b16..9d0573f44 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionspec.go @@ -22,7 +22,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// CustomResourceDefinitionSpecApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionSpec type for use // with apply. type CustomResourceDefinitionSpecApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionSpecApplyConfiguration struct { PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` } -// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// CustomResourceDefinitionSpecApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionSpec type for use with // apply. func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { return &CustomResourceDefinitionSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go index a30fb726b..4fd09be5a 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// CustomResourceDefinitionStatusApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionStatus type for use // with apply. type CustomResourceDefinitionStatusApplyConfiguration struct { Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceDefinitionStatusApplyConfiguration struct { StoredVersions []string `json:"storedVersions,omitempty"` } -// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// CustomResourceDefinitionStatusApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionStatus type for use with // apply. func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { return &CustomResourceDefinitionStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go index aaf2a139c..f96ba88f4 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcedefinitionversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// CustomResourceDefinitionVersionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionVersion type for use // with apply. type CustomResourceDefinitionVersionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } -// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// CustomResourceDefinitionVersionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionVersion type for use with // apply. func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { return &CustomResourceDefinitionVersionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go index e91ede178..f8d5be3c7 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresources.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { - Status *v1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Status *apiextensionsv1.CustomResourceSubresourceStatus `json:"status,omitempty"` Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } -// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// CustomResourceSubresourcesApplyConfiguration constructs a declarative configuration of the CustomResourceSubresources type for use with // apply. func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { return &CustomResourceSubresourcesApplyConfiguration{} @@ -38,7 +38,7 @@ func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value apiextensionsv1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go index 8159cec2a..7859675fd 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcesubresourcescale.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// CustomResourceSubresourceScaleApplyConfiguration represents a declarative configuration of the CustomResourceSubresourceScale type for use // with apply. type CustomResourceSubresourceScaleApplyConfiguration struct { SpecReplicasPath *string `json:"specReplicasPath,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceSubresourceScaleApplyConfiguration struct { LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` } -// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// CustomResourceSubresourceScaleApplyConfiguration constructs a declarative configuration of the CustomResourceSubresourceScale type for use with // apply. func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { return &CustomResourceSubresourceScaleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go index 2e0bcbcb5..6a8cf17d5 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/customresourcevalidation.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// CustomResourceValidationApplyConfiguration represents a declarative configuration of the CustomResourceValidation type for use // with apply. type CustomResourceValidationApplyConfiguration struct { OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` } -// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// CustomResourceValidationApplyConfiguration constructs a declarative configuration of the CustomResourceValidation type for use with // apply. func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { return &CustomResourceValidationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go index 61856a15c..761a957a0 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/externaldocumentation.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// ExternalDocumentationApplyConfiguration represents a declarative configuration of the ExternalDocumentation type for use // with apply. type ExternalDocumentationApplyConfiguration struct { Description *string `json:"description,omitempty"` URL *string `json:"url,omitempty"` } -// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// ExternalDocumentationApplyConfiguration constructs a declarative configuration of the ExternalDocumentation type for use with // apply. func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { return &ExternalDocumentationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go index 730203add..d6595ce1d 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/jsonschemaprops.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` - Schema *v1.JSONSchemaURL `json:"$schema,omitempty"` + Schema *apiextensionsv1.JSONSchemaURL `json:"$schema,omitempty"` Ref *string `json:"$ref,omitempty"` Description *string `json:"description,omitempty"` Type *string `json:"type,omitempty"` Format *string `json:"format,omitempty"` Title *string `json:"title,omitempty"` - Default *v1.JSON `json:"default,omitempty"` + Default *apiextensionsv1.JSON `json:"default,omitempty"` Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` Minimum *float64 `json:"minimum,omitempty"` @@ -44,23 +44,23 @@ type JSONSchemaPropsApplyConfiguration struct { MinItems *int64 `json:"minItems,omitempty"` UniqueItems *bool `json:"uniqueItems,omitempty"` MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []v1.JSON `json:"enum,omitempty"` + Enum []apiextensionsv1.JSON `json:"enum,omitempty"` MaxProperties *int64 `json:"maxProperties,omitempty"` MinProperties *int64 `json:"minProperties,omitempty"` Required []string `json:"required,omitempty"` - Items *v1.JSONSchemaPropsOrArray `json:"items,omitempty"` + Items *apiextensionsv1.JSONSchemaPropsOrArray `json:"items,omitempty"` AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` - AdditionalProperties *v1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + AdditionalProperties *apiextensionsv1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` - Dependencies *v1.JSONSchemaDependencies `json:"dependencies,omitempty"` - AdditionalItems *v1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` - Definitions *v1.JSONSchemaDefinitions `json:"definitions,omitempty"` + Dependencies *apiextensionsv1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *apiextensionsv1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *apiextensionsv1.JSONSchemaDefinitions `json:"definitions,omitempty"` ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` - Example *v1.JSON `json:"example,omitempty"` + Example *apiextensionsv1.JSON `json:"example,omitempty"` Nullable *bool `json:"nullable,omitempty"` XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` @@ -68,10 +68,10 @@ type JSONSchemaPropsApplyConfiguration struct { XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` XListType *string `json:"x-kubernetes-list-type,omitempty"` XMapType *string `json:"x-kubernetes-map-type,omitempty"` - XValidations *v1.ValidationRules `json:"x-kubernetes-validations,omitempty"` + XValidations *apiextensionsv1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } -// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with // apply. func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { return &JSONSchemaPropsApplyConfiguration{} @@ -88,7 +88,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaProp // WithSchema sets the Schema field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Schema field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value apiextensionsv1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { b.Schema = &value return b } @@ -136,7 +136,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaP // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { b.Default = &value return b } @@ -232,7 +232,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONS // WithEnum adds the given value to the Enum field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enum field. -func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { for i := range values { b.Enum = append(b.Enum, values[i]) } @@ -268,7 +268,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSON // WithItems sets the Items field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Items field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value apiextensionsv1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { b.Items = &value return b } @@ -337,7 +337,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JS // WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalProperties field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value apiextensionsv1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalProperties = &value return b } @@ -359,7 +359,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[st // WithDependencies sets the Dependencies field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Dependencies field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value apiextensionsv1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { b.Dependencies = &value return b } @@ -367,7 +367,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1.JSONSchema // WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalItems field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value apiextensionsv1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalItems = &value return b } @@ -375,7 +375,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1.JSONSch // WithDefinitions sets the Definitions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Definitions field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value apiextensionsv1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { b.Definitions = &value return b } @@ -391,7 +391,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocu // WithExample sets the Example field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Example field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value apiextensionsv1.JSON) *JSONSchemaPropsApplyConfiguration { b.Example = &value return b } @@ -457,7 +457,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSche // WithXValidations sets the XValidations field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the XValidations field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1.ValidationRules) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value apiextensionsv1.ValidationRules) *JSONSchemaPropsApplyConfiguration { b.XValidations = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go index 876dfa71c..33f655a76 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/selectablefield.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// SelectableFieldApplyConfiguration represents a declarative configuration of the SelectableField type for use // with apply. type SelectableFieldApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// SelectableFieldApplyConfiguration constructs a declarative configuration of the SelectableField type for use with // apply. func SelectableField() *SelectableFieldApplyConfiguration { return &SelectableFieldApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go index 2cd55d9ea..239780664 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go index c0eb0b51b..5ee414928 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/validationrule.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { - Rule *string `json:"rule,omitempty"` - Message *string `json:"message,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` - Reason *v1.FieldValueErrorReason `json:"reason,omitempty"` - FieldPath *string `json:"fieldPath,omitempty"` - OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` + Reason *apiextensionsv1.FieldValueErrorReason `json:"reason,omitempty"` + FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } -// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with // apply. func ValidationRule() *ValidationRuleApplyConfiguration { return &ValidationRuleApplyConfiguration{} @@ -66,7 +66,7 @@ func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) * // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationRuleApplyConfiguration) WithReason(value v1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { +func (b *ValidationRuleApplyConfiguration) WithReason(value apiextensionsv1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { b.Reason = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go index aa358ae20..77f2227b9 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go index 2af1b70ba..884fbc5fa 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1/webhookconversion.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WebhookConversionApplyConfiguration represents an declarative configuration of the WebhookConversion type for use +// WebhookConversionApplyConfiguration represents a declarative configuration of the WebhookConversion type for use // with apply. type WebhookConversionApplyConfiguration struct { ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"` ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` } -// WebhookConversionApplyConfiguration constructs an declarative configuration of the WebhookConversion type for use with +// WebhookConversionApplyConfiguration constructs a declarative configuration of the WebhookConversion type for use with // apply. func WebhookConversion() *WebhookConversionApplyConfiguration { return &WebhookConversionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go index e47317843..9ee2318d1 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcecolumndefinition.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceColumnDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceColumnDefinition type for use +// CustomResourceColumnDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceColumnDefinition type for use // with apply. type CustomResourceColumnDefinitionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceColumnDefinitionApplyConfiguration struct { JSONPath *string `json:"JSONPath,omitempty"` } -// CustomResourceColumnDefinitionApplyConfiguration constructs an declarative configuration of the CustomResourceColumnDefinition type for use with +// CustomResourceColumnDefinitionApplyConfiguration constructs a declarative configuration of the CustomResourceColumnDefinition type for use with // apply. func CustomResourceColumnDefinition() *CustomResourceColumnDefinitionApplyConfiguration { return &CustomResourceColumnDefinitionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go index d9825f85a..f652c96d5 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourceconversion.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceConversionApplyConfiguration represents an declarative configuration of the CustomResourceConversion type for use +// CustomResourceConversionApplyConfiguration represents a declarative configuration of the CustomResourceConversion type for use // with apply. type CustomResourceConversionApplyConfiguration struct { - Strategy *v1beta1.ConversionStrategyType `json:"strategy,omitempty"` - WebhookClientConfig *WebhookClientConfigApplyConfiguration `json:"webhookClientConfig,omitempty"` - ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` + Strategy *apiextensionsv1beta1.ConversionStrategyType `json:"strategy,omitempty"` + WebhookClientConfig *WebhookClientConfigApplyConfiguration `json:"webhookClientConfig,omitempty"` + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty"` } -// CustomResourceConversionApplyConfiguration constructs an declarative configuration of the CustomResourceConversion type for use with +// CustomResourceConversionApplyConfiguration constructs a declarative configuration of the CustomResourceConversion type for use with // apply. func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { return &CustomResourceConversionApplyConfiguration{} @@ -39,7 +39,7 @@ func CustomResourceConversion() *CustomResourceConversionApplyConfiguration { // WithStrategy sets the Strategy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Strategy field is set to the value of the last call. -func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value v1beta1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { +func (b *CustomResourceConversionApplyConfiguration) WithStrategy(value apiextensionsv1beta1.ConversionStrategyType) *CustomResourceConversionApplyConfiguration { b.Strategy = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go index 9117748c7..d56cff21f 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinition.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CustomResourceDefinitionApplyConfiguration represents an declarative configuration of the CustomResourceDefinition type for use +// CustomResourceDefinitionApplyConfiguration represents a declarative configuration of the CustomResourceDefinition type for use // with apply. type CustomResourceDefinitionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -33,7 +33,7 @@ type CustomResourceDefinitionApplyConfiguration struct { Status *CustomResourceDefinitionStatusApplyConfiguration `json:"status,omitempty"` } -// CustomResourceDefinition constructs an declarative configuration of the CustomResourceDefinition type for use with +// CustomResourceDefinition constructs a declarative configuration of the CustomResourceDefinition type for use with // apply. func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfiguration { b := &CustomResourceDefinitionApplyConfiguration{} @@ -47,7 +47,7 @@ func CustomResourceDefinition(name string) *CustomResourceDefinitionApplyConfigu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *CustomResourceDefinitionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithKind(value string) *Cus // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string) *CustomResourceDefinitionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithAPIVersion(value string // If called multiple times, the Name field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithName(value string) *Cus // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGenerateName(value stri // If called multiple times, the Namespace field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithUID(value types.UID) *C // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithResourceVersion(value s // If called multiple times, the Generation field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithDeletionGracePeriodSeco // overwriting an existing map entries in Labels field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithLabels(entries map[stri // overwriting an existing map entries in Annotations field with the same key. func (b *CustomResourceDefinitionApplyConfiguration) WithAnnotations(entries map[string]string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithOwnerReferences(values func (b *CustomResourceDefinitionApplyConfiguration) WithFinalizers(values ...string) *CustomResourceDefinitionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -216,3 +216,9 @@ func (b *CustomResourceDefinitionApplyConfiguration) WithStatus(value *CustomRes b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CustomResourceDefinitionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go index cf2400c9c..4d19e1b5b 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitioncondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CustomResourceDefinitionConditionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionCondition type for use +// CustomResourceDefinitionConditionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionCondition type for use // with apply. type CustomResourceDefinitionConditionApplyConfiguration struct { - Type *v1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiextensionsv1beta1.CustomResourceDefinitionConditionType `json:"type,omitempty"` + Status *apiextensionsv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// CustomResourceDefinitionConditionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionCondition type for use with +// CustomResourceDefinitionConditionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionCondition type for use with // apply. func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApplyConfiguration { return &CustomResourceDefinitionConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func CustomResourceDefinitionCondition() *CustomResourceDefinitionConditionApply // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value apiextensionsv1beta1.CustomResourceDefinitionConditionType) *CustomResourceDefinitionConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *CustomResourceDefinitionConditionApplyConfiguration) WithType(value v1b // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { +func (b *CustomResourceDefinitionConditionApplyConfiguration) WithStatus(value apiextensionsv1beta1.ConditionStatus) *CustomResourceDefinitionConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go index a20200723..44b49bcbb 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionnames.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionNamesApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionNames type for use +// CustomResourceDefinitionNamesApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionNames type for use // with apply. type CustomResourceDefinitionNamesApplyConfiguration struct { Plural *string `json:"plural,omitempty"` @@ -29,7 +29,7 @@ type CustomResourceDefinitionNamesApplyConfiguration struct { Categories []string `json:"categories,omitempty"` } -// CustomResourceDefinitionNamesApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionNames type for use with +// CustomResourceDefinitionNamesApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionNames type for use with // apply. func CustomResourceDefinitionNames() *CustomResourceDefinitionNamesApplyConfiguration { return &CustomResourceDefinitionNamesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go index 49f4e433c..5046882ae 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionspec.go @@ -22,7 +22,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceDefinitionSpecApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionSpec type for use +// CustomResourceDefinitionSpecApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionSpec type for use // with apply. type CustomResourceDefinitionSpecApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -38,7 +38,7 @@ type CustomResourceDefinitionSpecApplyConfiguration struct { PreserveUnknownFields *bool `json:"preserveUnknownFields,omitempty"` } -// CustomResourceDefinitionSpecApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionSpec type for use with +// CustomResourceDefinitionSpecApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionSpec type for use with // apply. func CustomResourceDefinitionSpec() *CustomResourceDefinitionSpecApplyConfiguration { return &CustomResourceDefinitionSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go index 79b2ebdab..2c9c5e23c 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionStatusApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionStatus type for use +// CustomResourceDefinitionStatusApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionStatus type for use // with apply. type CustomResourceDefinitionStatusApplyConfiguration struct { Conditions []CustomResourceDefinitionConditionApplyConfiguration `json:"conditions,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceDefinitionStatusApplyConfiguration struct { StoredVersions []string `json:"storedVersions,omitempty"` } -// CustomResourceDefinitionStatusApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionStatus type for use with +// CustomResourceDefinitionStatusApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionStatus type for use with // apply. func CustomResourceDefinitionStatus() *CustomResourceDefinitionStatusApplyConfiguration { return &CustomResourceDefinitionStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go index e110a1ec5..19ac26b03 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcedefinitionversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceDefinitionVersionApplyConfiguration represents an declarative configuration of the CustomResourceDefinitionVersion type for use +// CustomResourceDefinitionVersionApplyConfiguration represents a declarative configuration of the CustomResourceDefinitionVersion type for use // with apply. type CustomResourceDefinitionVersionApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,7 +32,7 @@ type CustomResourceDefinitionVersionApplyConfiguration struct { SelectableFields []SelectableFieldApplyConfiguration `json:"selectableFields,omitempty"` } -// CustomResourceDefinitionVersionApplyConfiguration constructs an declarative configuration of the CustomResourceDefinitionVersion type for use with +// CustomResourceDefinitionVersionApplyConfiguration constructs a declarative configuration of the CustomResourceDefinitionVersion type for use with // apply. func CustomResourceDefinitionVersion() *CustomResourceDefinitionVersionApplyConfiguration { return &CustomResourceDefinitionVersionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go index a62f8a205..3ee82a037 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresources.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// CustomResourceSubresourcesApplyConfiguration represents an declarative configuration of the CustomResourceSubresources type for use +// CustomResourceSubresourcesApplyConfiguration represents a declarative configuration of the CustomResourceSubresources type for use // with apply. type CustomResourceSubresourcesApplyConfiguration struct { - Status *v1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` - Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` + Status *apiextensionsv1beta1.CustomResourceSubresourceStatus `json:"status,omitempty"` + Scale *CustomResourceSubresourceScaleApplyConfiguration `json:"scale,omitempty"` } -// CustomResourceSubresourcesApplyConfiguration constructs an declarative configuration of the CustomResourceSubresources type for use with +// CustomResourceSubresourcesApplyConfiguration constructs a declarative configuration of the CustomResourceSubresources type for use with // apply. func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration { return &CustomResourceSubresourcesApplyConfiguration{} @@ -38,7 +38,7 @@ func CustomResourceSubresources() *CustomResourceSubresourcesApplyConfiguration // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value v1beta1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { +func (b *CustomResourceSubresourcesApplyConfiguration) WithStatus(value apiextensionsv1beta1.CustomResourceSubresourceStatus) *CustomResourceSubresourcesApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go index 72934ce97..b94d0e668 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcesubresourcescale.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CustomResourceSubresourceScaleApplyConfiguration represents an declarative configuration of the CustomResourceSubresourceScale type for use +// CustomResourceSubresourceScaleApplyConfiguration represents a declarative configuration of the CustomResourceSubresourceScale type for use // with apply. type CustomResourceSubresourceScaleApplyConfiguration struct { SpecReplicasPath *string `json:"specReplicasPath,omitempty"` @@ -26,7 +26,7 @@ type CustomResourceSubresourceScaleApplyConfiguration struct { LabelSelectorPath *string `json:"labelSelectorPath,omitempty"` } -// CustomResourceSubresourceScaleApplyConfiguration constructs an declarative configuration of the CustomResourceSubresourceScale type for use with +// CustomResourceSubresourceScaleApplyConfiguration constructs a declarative configuration of the CustomResourceSubresourceScale type for use with // apply. func CustomResourceSubresourceScale() *CustomResourceSubresourceScaleApplyConfiguration { return &CustomResourceSubresourceScaleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go index 9f65653da..a5cf3c096 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/customresourcevalidation.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CustomResourceValidationApplyConfiguration represents an declarative configuration of the CustomResourceValidation type for use +// CustomResourceValidationApplyConfiguration represents a declarative configuration of the CustomResourceValidation type for use // with apply. type CustomResourceValidationApplyConfiguration struct { OpenAPIV3Schema *JSONSchemaPropsApplyConfiguration `json:"openAPIV3Schema,omitempty"` } -// CustomResourceValidationApplyConfiguration constructs an declarative configuration of the CustomResourceValidation type for use with +// CustomResourceValidationApplyConfiguration constructs a declarative configuration of the CustomResourceValidation type for use with // apply. func CustomResourceValidation() *CustomResourceValidationApplyConfiguration { return &CustomResourceValidationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go index 360f6d969..5140d66ce 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/externaldocumentation.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExternalDocumentationApplyConfiguration represents an declarative configuration of the ExternalDocumentation type for use +// ExternalDocumentationApplyConfiguration represents a declarative configuration of the ExternalDocumentation type for use // with apply. type ExternalDocumentationApplyConfiguration struct { Description *string `json:"description,omitempty"` URL *string `json:"url,omitempty"` } -// ExternalDocumentationApplyConfiguration constructs an declarative configuration of the ExternalDocumentation type for use with +// ExternalDocumentationApplyConfiguration constructs a declarative configuration of the ExternalDocumentation type for use with // apply. func ExternalDocumentation() *ExternalDocumentationApplyConfiguration { return &ExternalDocumentationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go index 158b5750d..b90b9281c 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/jsonschemaprops.go @@ -19,20 +19,20 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// JSONSchemaPropsApplyConfiguration represents an declarative configuration of the JSONSchemaProps type for use +// JSONSchemaPropsApplyConfiguration represents a declarative configuration of the JSONSchemaProps type for use // with apply. type JSONSchemaPropsApplyConfiguration struct { ID *string `json:"id,omitempty"` - Schema *v1beta1.JSONSchemaURL `json:"$schema,omitempty"` + Schema *apiextensionsv1beta1.JSONSchemaURL `json:"$schema,omitempty"` Ref *string `json:"$ref,omitempty"` Description *string `json:"description,omitempty"` Type *string `json:"type,omitempty"` Format *string `json:"format,omitempty"` Title *string `json:"title,omitempty"` - Default *v1beta1.JSON `json:"default,omitempty"` + Default *apiextensionsv1beta1.JSON `json:"default,omitempty"` Maximum *float64 `json:"maximum,omitempty"` ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"` Minimum *float64 `json:"minimum,omitempty"` @@ -44,23 +44,23 @@ type JSONSchemaPropsApplyConfiguration struct { MinItems *int64 `json:"minItems,omitempty"` UniqueItems *bool `json:"uniqueItems,omitempty"` MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []v1beta1.JSON `json:"enum,omitempty"` + Enum []apiextensionsv1beta1.JSON `json:"enum,omitempty"` MaxProperties *int64 `json:"maxProperties,omitempty"` MinProperties *int64 `json:"minProperties,omitempty"` Required []string `json:"required,omitempty"` - Items *v1beta1.JSONSchemaPropsOrArray `json:"items,omitempty"` + Items *apiextensionsv1beta1.JSONSchemaPropsOrArray `json:"items,omitempty"` AllOf []JSONSchemaPropsApplyConfiguration `json:"allOf,omitempty"` OneOf []JSONSchemaPropsApplyConfiguration `json:"oneOf,omitempty"` AnyOf []JSONSchemaPropsApplyConfiguration `json:"anyOf,omitempty"` Not *JSONSchemaPropsApplyConfiguration `json:"not,omitempty"` Properties map[string]JSONSchemaPropsApplyConfiguration `json:"properties,omitempty"` - AdditionalProperties *v1beta1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` + AdditionalProperties *apiextensionsv1beta1.JSONSchemaPropsOrBool `json:"additionalProperties,omitempty"` PatternProperties map[string]JSONSchemaPropsApplyConfiguration `json:"patternProperties,omitempty"` - Dependencies *v1beta1.JSONSchemaDependencies `json:"dependencies,omitempty"` - AdditionalItems *v1beta1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` - Definitions *v1beta1.JSONSchemaDefinitions `json:"definitions,omitempty"` + Dependencies *apiextensionsv1beta1.JSONSchemaDependencies `json:"dependencies,omitempty"` + AdditionalItems *apiextensionsv1beta1.JSONSchemaPropsOrBool `json:"additionalItems,omitempty"` + Definitions *apiextensionsv1beta1.JSONSchemaDefinitions `json:"definitions,omitempty"` ExternalDocs *ExternalDocumentationApplyConfiguration `json:"externalDocs,omitempty"` - Example *v1beta1.JSON `json:"example,omitempty"` + Example *apiextensionsv1beta1.JSON `json:"example,omitempty"` Nullable *bool `json:"nullable,omitempty"` XPreserveUnknownFields *bool `json:"x-kubernetes-preserve-unknown-fields,omitempty"` XEmbeddedResource *bool `json:"x-kubernetes-embedded-resource,omitempty"` @@ -68,10 +68,10 @@ type JSONSchemaPropsApplyConfiguration struct { XListMapKeys []string `json:"x-kubernetes-list-map-keys,omitempty"` XListType *string `json:"x-kubernetes-list-type,omitempty"` XMapType *string `json:"x-kubernetes-map-type,omitempty"` - XValidations *v1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` + XValidations *apiextensionsv1beta1.ValidationRules `json:"x-kubernetes-validations,omitempty"` } -// JSONSchemaPropsApplyConfiguration constructs an declarative configuration of the JSONSchemaProps type for use with +// JSONSchemaPropsApplyConfiguration constructs a declarative configuration of the JSONSchemaProps type for use with // apply. func JSONSchemaProps() *JSONSchemaPropsApplyConfiguration { return &JSONSchemaPropsApplyConfiguration{} @@ -88,7 +88,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithID(value string) *JSONSchemaProp // WithSchema sets the Schema field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Schema field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value v1beta1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithSchema(value apiextensionsv1beta1.JSONSchemaURL) *JSONSchemaPropsApplyConfiguration { b.Schema = &value return b } @@ -136,7 +136,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithTitle(value string) *JSONSchemaP // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefault(value apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { b.Default = &value return b } @@ -232,7 +232,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithMultipleOf(value float64) *JSONS // WithEnum adds the given value to the Enum field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enum field. -func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithEnum(values ...apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { for i := range values { b.Enum = append(b.Enum, values[i]) } @@ -268,7 +268,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithRequired(values ...string) *JSON // WithItems sets the Items field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Items field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithItems(value v1beta1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithItems(value apiextensionsv1beta1.JSONSchemaPropsOrArray) *JSONSchemaPropsApplyConfiguration { b.Items = &value return b } @@ -337,7 +337,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithProperties(entries map[string]JS // WithAdditionalProperties sets the AdditionalProperties field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalProperties field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalProperties(value apiextensionsv1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalProperties = &value return b } @@ -359,7 +359,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithPatternProperties(entries map[st // WithDependencies sets the Dependencies field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Dependencies field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1beta1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value apiextensionsv1beta1.JSONSchemaDependencies) *JSONSchemaPropsApplyConfiguration { b.Dependencies = &value return b } @@ -367,7 +367,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithDependencies(value v1beta1.JSONS // WithAdditionalItems sets the AdditionalItems field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AdditionalItems field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value apiextensionsv1beta1.JSONSchemaPropsOrBool) *JSONSchemaPropsApplyConfiguration { b.AdditionalItems = &value return b } @@ -375,7 +375,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithAdditionalItems(value v1beta1.JS // WithDefinitions sets the Definitions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Definitions field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value v1beta1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithDefinitions(value apiextensionsv1beta1.JSONSchemaDefinitions) *JSONSchemaPropsApplyConfiguration { b.Definitions = &value return b } @@ -391,7 +391,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithExternalDocs(value *ExternalDocu // WithExample sets the Example field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Example field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithExample(value v1beta1.JSON) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithExample(value apiextensionsv1beta1.JSON) *JSONSchemaPropsApplyConfiguration { b.Example = &value return b } @@ -457,7 +457,7 @@ func (b *JSONSchemaPropsApplyConfiguration) WithXMapType(value string) *JSONSche // WithXValidations sets the XValidations field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the XValidations field is set to the value of the last call. -func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value v1beta1.ValidationRules) *JSONSchemaPropsApplyConfiguration { +func (b *JSONSchemaPropsApplyConfiguration) WithXValidations(value apiextensionsv1beta1.ValidationRules) *JSONSchemaPropsApplyConfiguration { b.XValidations = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go index 8729d9586..1a372e6fa 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/selectablefield.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// SelectableFieldApplyConfiguration represents an declarative configuration of the SelectableField type for use +// SelectableFieldApplyConfiguration represents a declarative configuration of the SelectableField type for use // with apply. type SelectableFieldApplyConfiguration struct { JSONPath *string `json:"jsonPath,omitempty"` } -// SelectableFieldApplyConfiguration constructs an declarative configuration of the SelectableField type for use with +// SelectableFieldApplyConfiguration constructs a declarative configuration of the SelectableField type for use with // apply. func SelectableField() *SelectableFieldApplyConfiguration { return &SelectableFieldApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go index c21b57490..70cc6b5b2 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go index 1b0df078b..c9b3da89b 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/validationrule.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" ) -// ValidationRuleApplyConfiguration represents an declarative configuration of the ValidationRule type for use +// ValidationRuleApplyConfiguration represents a declarative configuration of the ValidationRule type for use // with apply. type ValidationRuleApplyConfiguration struct { - Rule *string `json:"rule,omitempty"` - Message *string `json:"message,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` - Reason *v1beta1.FieldValueErrorReason `json:"reason,omitempty"` - FieldPath *string `json:"fieldPath,omitempty"` - OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` + Rule *string `json:"rule,omitempty"` + Message *string `json:"message,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` + Reason *apiextensionsv1beta1.FieldValueErrorReason `json:"reason,omitempty"` + FieldPath *string `json:"fieldPath,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` } -// ValidationRuleApplyConfiguration constructs an declarative configuration of the ValidationRule type for use with +// ValidationRuleApplyConfiguration constructs a declarative configuration of the ValidationRule type for use with // apply. func ValidationRule() *ValidationRuleApplyConfiguration { return &ValidationRuleApplyConfiguration{} @@ -66,7 +66,7 @@ func (b *ValidationRuleApplyConfiguration) WithMessageExpression(value string) * // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationRuleApplyConfiguration) WithReason(value v1beta1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { +func (b *ValidationRuleApplyConfiguration) WithReason(value apiextensionsv1beta1.FieldValueErrorReason) *ValidationRuleApplyConfiguration { b.Reason = &value return b } diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go index 490f9d5f3..76ff71b4a 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index 449285e17..93dd79d63 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package clientset import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go index 0bdc44c40..cd766a2dc 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/apiextensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *ApiextensionsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := apiextensionsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go index 3949426cb..1197071d0 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/customresourcedefinition.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + applyconfigurationapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. @@ -41,203 +38,40 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error) - Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) - UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.CreateOptions) (*apiextensionsv1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinition, opts metav1.UpdateOptions) (*apiextensionsv1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiextensionsv1.CustomResourceDefinition, error) + List(ctx context.Context, opts metav1.ListOptions) (*apiextensionsv1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) - Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) - ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *apiextensionsv1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions { return &customResourceDefinitions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*apiextensionsv1.CustomResourceDefinition, *apiextensionsv1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1.CustomResourceDefinitionApplyConfiguration]( + "customresourcedefinitions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *apiextensionsv1.CustomResourceDefinition { return &apiextensionsv1.CustomResourceDefinition{} }, + func() *apiextensionsv1.CustomResourceDefinitionList { + return &apiextensionsv1.CustomResourceDefinitionList{} + }, + gentype.PrefersProtobuf[*apiextensionsv1.CustomResourceDefinition](), + ), } } - -// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. -func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. -func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CustomResourceDefinitionList{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested customResourceDefinitions. -func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Post(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. -func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("customresourcedefinitions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("customresourcedefinitions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) { - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(pt). - Resource("customresourcedefinitions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. -func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1.CustomResourceDefinitionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - - result = &v1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go index 657ce2ca8..e45f25d58 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *ApiextensionsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := apiextensionsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go index 0e5b482a3..e7ea4e971 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + applyconfigurationapiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/applyconfiguration/apiextensions/v1beta1" scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. @@ -41,203 +38,42 @@ type CustomResourceDefinitionsGetter interface { // CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. type CustomResourceDefinitionInterface interface { - Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*v1beta1.CustomResourceDefinition, error) - Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) - UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*v1beta1.CustomResourceDefinition, error) + Create(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.CreateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) + Update(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*apiextensionsv1beta1.CustomResourceDefinition, error) + List(ctx context.Context, opts v1.ListOptions) (*apiextensionsv1beta1.CustomResourceDefinitionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) - Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) - ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) + Apply(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, customResourceDefinition *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *apiextensionsv1beta1.CustomResourceDefinition, err error) CustomResourceDefinitionExpansion } // customResourceDefinitions implements CustomResourceDefinitionInterface type customResourceDefinitions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*apiextensionsv1beta1.CustomResourceDefinition, *apiextensionsv1beta1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration] } // newCustomResourceDefinitions returns a CustomResourceDefinitions func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { return &customResourceDefinitions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*apiextensionsv1beta1.CustomResourceDefinition, *apiextensionsv1beta1.CustomResourceDefinitionList, *applyconfigurationapiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration]( + "customresourcedefinitions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{} + }, + func() *apiextensionsv1beta1.CustomResourceDefinitionList { + return &apiextensionsv1beta1.CustomResourceDefinitionList{} + }, + gentype.PrefersProtobuf[*apiextensionsv1beta1.CustomResourceDefinition](), + ), } } - -// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. -func (c *customResourceDefinitions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. -func (c *customResourceDefinitions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CustomResourceDefinitionList{} - err = c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested customResourceDefinitions. -func (c *customResourceDefinitions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.CreateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Post(). - Resource("customresourcedefinitions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. -func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *customResourceDefinitions) UpdateStatus(ctx context.Context, customResourceDefinition *v1beta1.CustomResourceDefinition, opts v1.UpdateOptions) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Put(). - Resource("customresourcedefinitions"). - Name(customResourceDefinition.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(customResourceDefinition). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. -func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("customresourcedefinitions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("customresourcedefinitions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched customResourceDefinition. -func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(pt). - Resource("customresourcedefinitions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied customResourceDefinition. -func (c *customResourceDefinitions) Apply(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *customResourceDefinitions) ApplyStatus(ctx context.Context, customResourceDefinition *apiextensionsv1beta1.CustomResourceDefinitionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CustomResourceDefinition, err error) { - if customResourceDefinition == nil { - return nil, fmt.Errorf("customResourceDefinition provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(customResourceDefinition) - if err != nil { - return nil, err - } - - name := customResourceDefinition.Name - if name == nil { - return nil, fmt.Errorf("customResourceDefinition.Name must be provided to Apply") - } - - result = &v1beta1.CustomResourceDefinition{} - err = c.client.Patch(types.ApplyPatchType). - Resource("customresourcedefinitions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index 7773f3d14..c562263f3 100644 --- a/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/constraint/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -17,6 +17,8 @@ limitations under the License. package features import ( + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" ) @@ -25,18 +27,15 @@ const ( // Every feature gate should add method here following this template: // // // owner: @username - // // alpha: v1.4 // MyFeature() bool // owner: @alexzielenski - // alpha: v1.28 // // Ignores errors raised on unchanged fields of Custom Resources // across UPDATE/PATCH requests. CRDValidationRatcheting featuregate.Feature = "CRDValidationRatcheting" // owner: @jpbetz - // alpha: v1.30 // // CustomResourceDefinitions may include SelectableFields to declare which fields // may be used as field selectors. @@ -44,13 +43,23 @@ const ( ) func init() { - utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates) + runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) } -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be +// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. +// To add a new feature, define a key for it above and add it below. The features will be // available throughout Kubernetes binaries. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - CRDValidationRatcheting: {Default: true, PreRelease: featuregate.Beta}, - CustomResourceFieldSelectors: {Default: false, PreRelease: featuregate.Alpha}, +// To support n-3 compatibility version, features may only be removed 3 releases after graduation. +// +// Entries are alphabetized. +var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + CRDValidationRatcheting: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + CustomResourceFieldSelectors: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/constraint/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 57e0e71f6..6a3ab8f24 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{ metav1.StatusReasonGone: {}, metav1.StatusReasonInvalid: {}, metav1.StatusReasonServerTimeout: {}, + metav1.StatusReasonStoreReadError: {}, metav1.StatusReasonTimeout: {}, metav1.StatusReasonTooManyRequests: {}, metav1.StatusReasonBadRequest: {}, @@ -775,6 +776,12 @@ func IsUnexpectedObjectError(err error) bool { return err != nil && (ok || errors.As(err, &uoe)) } +// IsStoreReadError determines if err is due to either failure to transform the +// data from the storage, or failure to decode the object appropriately. +func IsStoreReadError(err error) bool { + return ReasonForError(err) == metav1.StatusReasonStoreReadError +} + // SuggestsClientDelay returns true if this error suggests a client delay as well as the // suggested seconds to wait, or false if the error does not imply a wait. It does not // address whether the error *should* be retried, since some errors (like a 3xx) may diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 1e1330fff..3bd8bf535 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -10,5 +10,6 @@ reviewers: - mikedanese - liggitt - janetkuo - - ncdc - dims +emeritus_reviewers: + - ncdc diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go b/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go new file mode 100644 index 000000000..72c6438cb --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go @@ -0,0 +1,165 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testrestmapper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TestOnlyStaticRESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. legacy kube group preferred version, extensions preferred version, metrics preferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +// +// TODO callers of this method should be updated to build their own specific restmapper based on their scheme for their tests +// TODO the things being tested are related to whether various cases are handled, not tied to the particular types being checked. +func TestOnlyStaticRESTMapper(scheme *runtime.Scheme, versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + unionMapper = append(unionMapper, newRESTMapper(enabledVersion.Group, scheme)) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := prioritiesForGroups(scheme, prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for _, enabledVersion := range scheme.PrioritizedVersionsAllGroups() { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := prioritiesForGroups(scheme, remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func prioritiesForGroups(scheme *runtime.Scheme, groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := scheme.PrioritizedVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +func newRESTMapper(group string, scheme *runtime.Scheme) meta.RESTMapper { + mapper := meta.NewDefaultRESTMapper(scheme.PrioritizedVersionsForGroup(group)) + for _, gv := range scheme.PrioritizedVersionsForGroup(group) { + for kind := range scheme.KnownTypes(gv) { + if ignoredKinds.Has(kind) { + continue + } + scope := meta.RESTScopeNamespace + if rootScopedKinds[gv.WithKind(kind).GroupKind()] { + scope = meta.RESTScopeRoot + } + mapper.Add(gv.WithKind(kind), scope) + } + } + + return mapper +} + +// hardcoded is good enough for the test we're running +var rootScopedKinds = map[schema.GroupKind]bool{ + {Group: "admission.k8s.io", Kind: "AdmissionReview"}: true, + + {Group: "admissionregistration.k8s.io", Kind: "ValidatingWebhookConfiguration"}: true, + {Group: "admissionregistration.k8s.io", Kind: "MutatingWebhookConfiguration"}: true, + + {Group: "authentication.k8s.io", Kind: "TokenReview"}: true, + + {Group: "authorization.k8s.io", Kind: "SubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectAccessReview"}: true, + {Group: "authorization.k8s.io", Kind: "SelfSubjectRulesReview"}: true, + + {Group: "certificates.k8s.io", Kind: "CertificateSigningRequest"}: true, + + {Group: "", Kind: "Node"}: true, + {Group: "", Kind: "Namespace"}: true, + {Group: "", Kind: "PersistentVolume"}: true, + {Group: "", Kind: "ComponentStatus"}: true, + + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRole"}: true, + {Group: "rbac.authorization.k8s.io", Kind: "ClusterRoleBinding"}: true, + + {Group: "scheduling.k8s.io", Kind: "PriorityClass"}: true, + + {Group: "storage.k8s.io", Kind: "StorageClass"}: true, + {Group: "storage.k8s.io", Kind: "VolumeAttachment"}: true, + + {Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: true, + + {Group: "apiserver.k8s.io", Kind: "AdmissionConfiguration"}: true, + + {Group: "audit.k8s.io", Kind: "Event"}: true, + {Group: "audit.k8s.io", Kind: "Policy"}: true, + + {Group: "apiregistration.k8s.io", Kind: "APIService"}: true, + + {Group: "metrics.k8s.io", Kind: "NodeMetrics"}: true, + + {Group: "wardle.example.com", Kind: "Fischer"}: true, +} + +// hardcoded is good enough for the test we're running +var ignoredKinds = sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", +) diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/constraint/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 69f1bc336..d0aada9dd 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -20,11 +20,13 @@ import ( "bytes" "errors" "fmt" - "math" + math "math" "math/big" "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -458,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { } } -// AsApproximateFloat64 returns a float64 representation of the quantity which may -// lose precision. If the value of the quantity is outside the range of a float64 -// +Inf/-Inf will be returned. +// AsApproximateFloat64 returns a float64 representation of the quantity which +// may lose precision. If precision matter more than performance, see +// AsFloat64Slow. If the value of the quantity is outside the range of a +// float64 +Inf/-Inf will be returned. func (q *Quantity) AsApproximateFloat64() float64 { var base float64 var exponent int @@ -478,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 { return base * math.Pow10(exponent) } +// AsFloat64Slow returns a float64 representation of the quantity. This is +// more precise than AsApproximateFloat64 but significantly slower. If the +// value of the quantity is outside the range of a float64 +Inf/-Inf will be +// returned. +func (q *Quantity) AsFloat64Slow() float64 { + infDec := q.AsDec() + + var absScale int64 + if infDec.Scale() < 0 { + absScale = int64(-infDec.Scale()) + } else { + absScale = int64(infDec.Scale()) + } + pow10AbsScale := big.NewInt(10) + pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil) + + var resultBigFloat *big.Float + if infDec.Scale() < 0 { + resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale) + resultBigFloat = new(big.Float).SetInt(resultBigInt) + } else { + pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale) + resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig()) + resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat) + } + + result, _ := resultBigFloat.Float64() + return result +} + // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { @@ -683,6 +716,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -711,6 +750,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/constraint/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go index 593d7ba8c..54a2883a3 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -50,7 +50,7 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie } } if err := ValidateAnnotationsSize(annotations); err != nil { - allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB)) + allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, TotalAnnotationSizeLimitB)) } return allErrs } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go index 472a9aeb2..585d7f44b 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -24,16 +24,16 @@ import ( ) // Scheme is the registry for any type that adheres to the meta API spec. -var scheme = runtime.NewScheme() +var Scheme = runtime.NewScheme() // Codecs provides access to encoding and decoding for the scheme. -var Codecs = serializer.NewCodecFactory(scheme) +var Codecs = serializer.NewCodecFactory(Scheme) // ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) // Unlike other API groups, meta internal knows about all meta external versions, but keeps // the logic for conversion private. func init() { - utilruntime.Must(internalversion.AddToScheme(scheme)) + utilruntime.Must(internalversion.AddToScheme(Scheme)) } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go new file mode 100644 index 000000000..2734a8f3b --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateListOptions returns all validation errors found while validating the ListOptions. +func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + if options.Watch { + return validateWatchOptions(options, isWatchListFeatureEnabled) + } + allErrs := field.ErrorList{} + if match := options.ResourceVersionMatch; len(match) > 0 { + if len(options.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""})) + } + if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) + } + } + if options.SendInitialEvents != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list")) + } + return allErrs +} + +func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + allErrs := field.ErrorList{} + match := options.ResourceVersionMatch + if options.SendInitialEvents != nil { + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan))) + } + if !isWatchListFeatureEnabled { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled")) + } + } + if len(match) > 0 { + if options.SendInitialEvents == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided")) + } + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)})) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + } + return allErrs +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index e7e5c152d..ec414a84b 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -11,6 +11,7 @@ reviewers: - luxas - janetkuo - justinsb - - ncdc - soltysh - dims +emeritus_reviewers: + - ncdc diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa8..5005beb12 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 75b88890f..9ee6c0591 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_a8431b6e0aeeb761, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1326,186 +1355,190 @@ func init() { } var fileDescriptor_a8431b6e0aeeb761 = []byte{ - // 2853 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, - 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, - 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, - 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, - 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x13, 0x77, 0x90, 0xc8, 0x31, 0x88, 0x4b, 0x24, 0xd0, 0x28, 0x31, 0x07, 0x8e, 0x88, 0xab, 0x85, - 0x04, 0xaa, 0x47, 0x77, 0x57, 0xcf, 0x63, 0xdd, 0x93, 0x5d, 0x22, 0x6e, 0xd3, 0xdf, 0xbb, 0xaa, - 0xbe, 0xfa, 0xea, 0x7b, 0x0c, 0x3c, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0x7f, 0xdd, 0xea, 0x3a, 0x1d, - 0xab, 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, - 0xeb, 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, - 0x63, 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, - 0xb2, 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, - 0x5d, 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0xf5, 0x49, 0xa6, - 0xd0, 0x9e, 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, - 0x58, 0xc3, 0x7c, 0xe6, 0x9f, 0xb2, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, - 0x41, 0xce, 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, - 0xe9, 0xa0, 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, - 0x3d, 0x56, 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, - 0x57, 0xb2, 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, - 0x14, 0x92, 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, - 0xf8, 0x6a, 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, - 0x11, 0xfd, 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, - 0xa6, 0xeb, 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, - 0xf6, 0x27, 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, - 0xca, 0xe1, 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, - 0xac, 0xa7, 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, - 0x67, 0x39, 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, - 0xca, 0xcc, 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, - 0xaf, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, - 0x33, 0x2e, 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, - 0x4b, 0xab, 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, - 0xbc, 0xac, 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, - 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, - 0xa1, 0x07, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, - 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, - 0xf6, 0x72, 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, - 0xda, 0x3e, 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, - 0x2c, 0xf0, 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, - 0x75, 0x68, 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, - 0xae, 0x41, 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, - 0x81, 0x22, 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, - 0xc3, 0xb1, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, - 0xf6, 0x95, 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, - 0xbd, 0xda, 0xcc, 0xdb, 0x7f, 0x5b, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, - 0xbb, 0xdd, 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, - 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, - 0xe2, 0x40, 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, - 0x97, 0x23, 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, - 0xdb, 0x09, 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, - 0x73, 0x30, 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, - 0xf4, 0x6c, 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, - 0xf6, 0x4d, 0xf9, 0xec, 0x85, 0xef, 0x47, 0x36, 0xf6, 0xf4, 0xdd, 0x11, 0x0a, 0x3c, 0x86, 0x0b, - 0x9d, 0x00, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x87, 0xa8, 0x0b, - 0xff, 0xa5, 0x74, 0x27, 0xce, 0x39, 0x62, 0xbd, 0xb7, 0x46, 0xa4, 0xe1, 0x31, 0x1a, 0xd0, 0xe3, - 0x30, 0x4b, 0x89, 0xc5, 0x7c, 0xaf, 0x9a, 0x17, 0xcb, 0x8f, 0xa2, 0x32, 0x16, 0x50, 0xac, 0xb0, - 0x3c, 0xa0, 0x75, 0x08, 0x63, 0x56, 0x3b, 0x0c, 0xaf, 0x51, 0x40, 0xdb, 0x91, 0x60, 0x1c, 0xe2, - 0xcd, 0xdf, 0x1a, 0x50, 0xd9, 0xa4, 0xc4, 0x0a, 0xc8, 0x34, 0x6e, 0xf1, 0xa9, 0x4f, 0x1c, 0x6d, - 0xc0, 0x82, 0xf8, 0xbe, 0x6b, 0xb9, 0x8e, 0x2d, 0xcf, 0x20, 0x27, 0x98, 0x3f, 0xaf, 0x98, 0x17, - 0xb6, 0x92, 0x68, 0x3c, 0x4c, 0x6f, 0xfe, 0x24, 0x0b, 0x95, 0x26, 0x71, 0x49, 0x6c, 0xf2, 0x16, - 0xa0, 0x36, 0xb5, 0x5a, 0x64, 0x8f, 0x50, 0xc7, 0xb7, 0xf7, 0x49, 0xcb, 0xf7, 0x6c, 0x26, 0xdc, - 0x28, 0xdb, 0xf8, 0x1c, 0xdf, 0xdf, 0x9b, 0x23, 0x58, 0x3c, 0x86, 0x03, 0xb9, 0x50, 0xe9, 0x52, - 0xf1, 0x5b, 0xec, 0xb9, 0xf4, 0xb2, 0xd2, 0xd5, 0xaf, 0xa4, 0x3b, 0xd2, 0x3d, 0x9d, 0xb5, 0xb1, - 0x74, 0x3a, 0xa8, 0x55, 0x12, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x8b, 0x3e, 0xed, 0x1e, 0x59, - 0x5e, 0x93, 0x74, 0x89, 0x67, 0x13, 0x2f, 0x60, 0x62, 0x23, 0x0b, 0x8d, 0x65, 0x9e, 0x8b, 0xec, - 0x0e, 0xe1, 0xf0, 0x08, 0x35, 0x7a, 0x0d, 0x96, 0xba, 0xd4, 0xef, 0x5a, 0x6d, 0xb1, 0x31, 0x7b, - 0xbe, 0xeb, 0xb4, 0xfa, 0x6a, 0x3b, 0x9f, 0x3c, 0x1d, 0xd4, 0x96, 0xf6, 0x86, 0x91, 0x67, 0x83, - 0xda, 0x05, 0xb1, 0x75, 0x1c, 0x12, 0x23, 0xf1, 0xa8, 0x18, 0xcd, 0x0d, 0xf2, 0x93, 0xdc, 0xc0, - 0xdc, 0x86, 0x42, 0xb3, 0xa7, 0xee, 0xc4, 0x0b, 0x50, 0xb0, 0xd5, 0x6f, 0xb5, 0xf3, 0xe1, 0xe5, - 0x8c, 0x68, 0xce, 0x06, 0xb5, 0x0a, 0x4f, 0x3f, 0xeb, 0x21, 0x00, 0x47, 0x2c, 0xe6, 0xe3, 0x50, - 0x10, 0x07, 0xcf, 0xee, 0x5e, 0x41, 0x8b, 0x90, 0xc5, 0xd6, 0x3d, 0x21, 0xa5, 0x8c, 0xf9, 0x4f, - 0x2d, 0x8a, 0xed, 0x02, 0xdc, 0x24, 0x41, 0x78, 0xf0, 0x1b, 0xb0, 0x10, 0x86, 0xf2, 0xe4, 0x0b, - 0x13, 0x79, 0x13, 0x4e, 0xa2, 0xf1, 0x30, 0xbd, 0xf9, 0x3a, 0x14, 0xc5, 0x2b, 0xc4, 0x9f, 0xf0, - 0x38, 0x5d, 0x30, 0xee, 0x93, 0x2e, 0x84, 0x39, 0x40, 0x66, 0x52, 0x0e, 0xa0, 0x99, 0xeb, 0x42, - 0x45, 0xf2, 0x86, 0x09, 0x52, 0x2a, 0x0d, 0x4f, 0x42, 0x21, 0x34, 0x53, 0x69, 0x89, 0x12, 0xe3, - 0x50, 0x10, 0x8e, 0x28, 0x34, 0x6d, 0x47, 0x90, 0x78, 0x51, 0xd3, 0x29, 0xd3, 0xb2, 0x9f, 0xcc, - 0xfd, 0xb3, 0x1f, 0x4d, 0xd3, 0x0f, 0xa1, 0x3a, 0x29, 0x9b, 0x7e, 0x80, 0x37, 0x3f, 0xbd, 0x29, - 0xe6, 0x3b, 0x06, 0x2c, 0xea, 0x92, 0xd2, 0x1f, 0x5f, 0x7a, 0x25, 0xe7, 0x67, 0x7b, 0xda, 0x8e, - 0xfc, 0xca, 0x80, 0xe5, 0xc4, 0xd2, 0xa6, 0x3a, 0xf1, 0x29, 0x8c, 0xd2, 0x9d, 0x23, 0x3b, 0x85, - 0x73, 0xfc, 0x25, 0x03, 0x95, 0x5b, 0xd6, 0x01, 0x71, 0xf7, 0x89, 0x4b, 0x5a, 0x81, 0x4f, 0xd1, - 0x0f, 0xa0, 0xd4, 0xb1, 0x82, 0xd6, 0x91, 0x80, 0x86, 0x95, 0x41, 0x33, 0x5d, 0xb0, 0x4b, 0x48, - 0xaa, 0xef, 0xc4, 0x62, 0x6e, 0x78, 0x01, 0xed, 0x37, 0x2e, 0x28, 0x93, 0x4a, 0x1a, 0x06, 0xeb, - 0xda, 0x44, 0x39, 0x27, 0xbe, 0x6f, 0xbc, 0xd5, 0xe5, 0x69, 0xcb, 0xf4, 0x55, 0x64, 0xc2, 0x04, - 0x4c, 0xde, 0xec, 0x39, 0x94, 0x74, 0x88, 0x17, 0xc4, 0xe5, 0xdc, 0xce, 0x90, 0x7c, 0x3c, 0xa2, - 0x71, 0xe5, 0x45, 0x58, 0x1c, 0x36, 0x9e, 0xc7, 0x9f, 0x63, 0xd2, 0x97, 0xe7, 0x85, 0xf9, 0x4f, - 0xb4, 0x0c, 0xf9, 0x13, 0xcb, 0xed, 0xa9, 0xdb, 0x88, 0xe5, 0xc7, 0xf5, 0xcc, 0x35, 0xc3, 0xfc, - 0x8d, 0x01, 0xd5, 0x49, 0x86, 0xa0, 0x2f, 0x6a, 0x82, 0x1a, 0x25, 0x65, 0x55, 0xf6, 0x15, 0xd2, - 0x97, 0x52, 0x6f, 0x40, 0xc1, 0xef, 0xf2, 0x9c, 0xc2, 0xa7, 0xea, 0xd4, 0x9f, 0x08, 0x4f, 0x72, - 0x57, 0xc1, 0xcf, 0x06, 0xb5, 0x8b, 0x09, 0xf1, 0x21, 0x02, 0x47, 0xac, 0x3c, 0x52, 0x0b, 0x7b, - 0xf8, 0xeb, 0x11, 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0xbd, 0x01, 0x39, 0x91, 0x90, - 0xbf, 0x0e, 0x05, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xa5, 0x20, 0xe7, 0xde, 0x21, - 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, - 0x53, 0x13, 0x45, 0xab, 0x46, 0x44, 0x1d, 0x5b, 0xf7, 0x6e, 0xbc, 0x15, 0x10, 0x8f, 0x1f, 0x46, - 0x7c, 0x35, 0xb6, 0xb9, 0x0c, 0x2c, 0x45, 0x99, 0xff, 0x32, 0x20, 0x52, 0xc5, 0x9d, 0x9f, 0x11, - 0xf7, 0xf0, 0x96, 0xe3, 0x1d, 0xab, 0x6d, 0x8d, 0xcc, 0xd9, 0x57, 0x70, 0x1c, 0x51, 0x8c, 0x7b, - 0x1e, 0x32, 0xd3, 0x3d, 0x0f, 0x5c, 0x61, 0xcb, 0xf7, 0x02, 0xc7, 0xeb, 0x8d, 0xdc, 0xb6, 0x4d, - 0x05, 0xc7, 0x11, 0x05, 0x4f, 0x44, 0x28, 0xe9, 0x58, 0x8e, 0xe7, 0x78, 0x6d, 0xbe, 0x88, 0x4d, - 0xbf, 0xe7, 0x05, 0xe2, 0x45, 0x56, 0x89, 0x08, 0x1e, 0xc1, 0xe2, 0x31, 0x1c, 0xe6, 0xbf, 0x73, - 0x50, 0xe2, 0x6b, 0x0e, 0xdf, 0xb9, 0xe7, 0xa1, 0xe2, 0xea, 0x5e, 0xa0, 0xd6, 0x7e, 0x51, 0x99, - 0x92, 0xbc, 0xd7, 0x38, 0x49, 0xcb, 0x99, 0x45, 0x0a, 0x15, 0x31, 0x67, 0x92, 0xcc, 0x5b, 0x3a, - 0x12, 0x27, 0x69, 0x79, 0xf4, 0xba, 0xc7, 0xef, 0x87, 0xca, 0x4c, 0xa2, 0x23, 0xfa, 0x26, 0x07, - 0x62, 0x89, 0x43, 0x3b, 0x70, 0xc1, 0x72, 0x5d, 0xff, 0x9e, 0x00, 0x36, 0x7c, 0xff, 0xb8, 0x63, - 0xd1, 0x63, 0x26, 0x8a, 0xe9, 0x42, 0xe3, 0x0b, 0x8a, 0xe5, 0xc2, 0xc6, 0x28, 0x09, 0x1e, 0xc7, - 0x37, 0xee, 0xd8, 0x72, 0x53, 0x1e, 0xdb, 0x11, 0x2c, 0x0f, 0x81, 0xc4, 0x2d, 0x57, 0x95, 0xed, - 0x33, 0x4a, 0xce, 0x32, 0x1e, 0x43, 0x73, 0x36, 0x01, 0x8e, 0xc7, 0x4a, 0x44, 0xd7, 0x61, 0x9e, - 0x7b, 0xb2, 0xdf, 0x0b, 0xc2, 0xbc, 0x33, 0x2f, 0x8e, 0x1b, 0x9d, 0x0e, 0x6a, 0xf3, 0xb7, 0x13, - 0x18, 0x3c, 0x44, 0xc9, 0x37, 0xd7, 0x75, 0x3a, 0x4e, 0x50, 0x9d, 0x13, 0x2c, 0xd1, 0xe6, 0xde, - 0xe2, 0x40, 0x2c, 0x71, 0x09, 0x0f, 0x2c, 0x9c, 0xeb, 0x81, 0x9b, 0xb0, 0xc4, 0x88, 0x67, 0x6f, - 0x7b, 0x4e, 0xe0, 0x58, 0xee, 0x8d, 0x13, 0x91, 0x55, 0x96, 0xc4, 0x41, 0x5c, 0xe4, 0x29, 0xe1, - 0xfe, 0x30, 0x12, 0x8f, 0xd2, 0x9b, 0x7f, 0xce, 0x02, 0x92, 0x09, 0xbb, 0x2d, 0x93, 0x32, 0x19, - 0x17, 0x79, 0x59, 0xa1, 0x12, 0x7e, 0x63, 0xa8, 0xac, 0x50, 0xb9, 0x7e, 0x88, 0x47, 0x3b, 0x50, - 0x94, 0xf1, 0x29, 0xbe, 0x73, 0xeb, 0x8a, 0xb8, 0xb8, 0x1b, 0x22, 0xce, 0x06, 0xb5, 0x95, 0x84, - 0x9a, 0x08, 0x23, 0x4a, 0xbe, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1d, 0xbd, 0xe9, 0x57, 0x8c, - 0x5b, 0x3f, 0x71, 0xf9, 0x8e, 0x35, 0x2a, 0xf4, 0x12, 0xe4, 0x82, 0x4f, 0x57, 0x96, 0x15, 0x44, - 0xd5, 0xc9, 0x8b, 0x30, 0x21, 0x81, 0x6b, 0x17, 0x97, 0x82, 0x71, 0xb3, 0x54, 0x45, 0x15, 0x69, - 0xdf, 0x8a, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xc2, 0xa1, 0xca, 0x67, 0xc5, 0xe9, 0xa6, 0x8e, - 0xb3, 0x61, 0x16, 0x2c, 0xfb, 0x0e, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x89, 0xf5, 0x0e, - 0xa2, 0x14, 0x40, 0xba, 0x44, 0xf4, 0xde, 0xee, 0xc7, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x42, 0x71, - 0xc7, 0x69, 0x51, 0x5f, 0x14, 0x92, 0x4f, 0xc0, 0x1c, 0x4b, 0x54, 0x49, 0xd1, 0x49, 0x86, 0xae, - 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x5a, 0x28, 0x1f, 0xfb, 0xe8, 0xab, 0x1c, 0x88, - 0x25, 0xee, 0xfa, 0x32, 0xcf, 0x32, 0x7e, 0xfa, 0x7e, 0x6d, 0xe6, 0xdd, 0xf7, 0x6b, 0x33, 0xef, - 0xbd, 0xaf, 0x32, 0x8e, 0x3f, 0x00, 0xc0, 0xee, 0xc1, 0xf7, 0x48, 0x4b, 0xc6, 0xee, 0x54, 0xbd, - 0xc1, 0xb0, 0x25, 0x2d, 0x7a, 0x83, 0x99, 0xa1, 0xcc, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, 0x87, - 0x62, 0xd4, 0xf5, 0x53, 0xfe, 0xb1, 0x14, 0xfa, 0x5b, 0xd4, 0x1a, 0xc4, 0x31, 0x4d, 0xe2, 0x21, - 0xc9, 0x9d, 0xfb, 0x90, 0x34, 0x20, 0xdb, 0x73, 0x6c, 0x55, 0x75, 0x3f, 0x1d, 0x3e, 0xe4, 0x77, - 0xb6, 0x9b, 0x67, 0x83, 0xda, 0x23, 0x93, 0x9a, 0xed, 0x41, 0xbf, 0x4b, 0x58, 0xfd, 0xce, 0x76, - 0x13, 0x73, 0xe6, 0x71, 0x51, 0x6d, 0x76, 0xca, 0xa8, 0x76, 0x15, 0xa0, 0x1d, 0xf7, 0x2e, 0x64, - 0xd0, 0x88, 0x1c, 0x51, 0xeb, 0x59, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xd7, 0xf7, 0xaa, 0x87, - 0xc0, 0x02, 0xab, 0x23, 0xbb, 0xa1, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, 0xb0, 0x30, - 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x32, 0x33, 0x56, 0x5a, 0x9c, 0x5a, 0xa9, 0x88, 0x58, - 0xcd, 0x61, 0x41, 0x78, 0x54, 0x36, 0xfa, 0x2e, 0xac, 0x84, 0xc0, 0xd1, 0x5a, 0x5f, 0x44, 0xfd, - 0x6c, 0x63, 0xf5, 0x74, 0x50, 0x5b, 0x69, 0x4e, 0xa4, 0xc2, 0xf7, 0x91, 0x80, 0x6c, 0x98, 0x75, - 0x65, 0x96, 0x5c, 0x12, 0x99, 0xcd, 0xd7, 0xd2, 0xad, 0x22, 0xf6, 0xfe, 0xba, 0x9e, 0x1d, 0x47, - 0x7d, 0x1b, 0x95, 0x18, 0x2b, 0xd9, 0xe8, 0x2d, 0x28, 0x59, 0x9e, 0xe7, 0x07, 0x96, 0xec, 0x3e, - 0x94, 0x85, 0xaa, 0x8d, 0xa9, 0x55, 0x6d, 0xc4, 0x32, 0x86, 0xb2, 0x71, 0x0d, 0x83, 0x75, 0x55, - 0xe8, 0x1e, 0x2c, 0xf8, 0xf7, 0x3c, 0x42, 0x31, 0x39, 0x24, 0x94, 0x78, 0x2d, 0xc2, 0xaa, 0x15, - 0xa1, 0xfd, 0x99, 0x94, 0xda, 0x13, 0xcc, 0xb1, 0x4b, 0x27, 0xe1, 0x0c, 0x0f, 0x6b, 0x41, 0x75, - 0x1e, 0x5b, 0x3d, 0xcb, 0x75, 0xbe, 0x4f, 0x28, 0xab, 0xce, 0xc7, 0x0d, 0xeb, 0xad, 0x08, 0x8a, - 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x7f, 0x32, 0xaa, 0x4b, 0xc2, 0xcc, 0x6b, 0xe9, 0xcc, 0x1c, - 0x7d, 0xd4, 0xe2, 0x34, 0x28, 0x81, 0xc3, 0x49, 0x2d, 0x2b, 0xcf, 0x41, 0xe9, 0x53, 0x56, 0x08, - 0xbc, 0xc2, 0x18, 0x3e, 0x90, 0xa9, 0x2a, 0x8c, 0x3f, 0x66, 0x60, 0x3e, 0xb9, 0x8d, 0x43, 0xcf, - 0x61, 0x3e, 0xd5, 0x73, 0x18, 0xd6, 0xb2, 0xc6, 0xc4, 0xc9, 0x45, 0x18, 0x9f, 0xb3, 0x13, 0xe3, - 0xb3, 0x0a, 0x83, 0xb9, 0x07, 0x09, 0x83, 0x75, 0x00, 0x9e, 0xac, 0x50, 0xdf, 0x75, 0x09, 0x15, - 0x11, 0xb0, 0xa0, 0x26, 0x14, 0x11, 0x14, 0x6b, 0x14, 0x3c, 0xa5, 0x3e, 0x70, 0xfd, 0xd6, 0xb1, - 0xd8, 0x82, 0xf0, 0xf6, 0x8a, 0xd8, 0x57, 0x90, 0x29, 0x75, 0x63, 0x04, 0x8b, 0xc7, 0x70, 0x98, - 0x7d, 0xb8, 0xb8, 0x67, 0x51, 0x9e, 0xe4, 0xc4, 0x37, 0x45, 0xd4, 0x2c, 0x6f, 0x8c, 0x54, 0x44, - 0x4f, 0x4f, 0x7b, 0xe3, 0xe2, 0xcd, 0x8f, 0x61, 0x71, 0x55, 0x64, 0xfe, 0xd5, 0x80, 0x4b, 0x63, - 0x75, 0x7f, 0x06, 0x15, 0xd9, 0x1b, 0xc9, 0x8a, 0xec, 0xf9, 0x94, 0xad, 0xcc, 0x71, 0xd6, 0x4e, - 0xa8, 0xcf, 0xe6, 0x20, 0xbf, 0xc7, 0x33, 0x61, 0xf3, 0x43, 0x03, 0xca, 0xe2, 0xd7, 0x34, 0x9d, - 0xe4, 0x5a, 0x72, 0xc0, 0x50, 0x7c, 0x78, 0xc3, 0x85, 0x87, 0xd1, 0x6a, 0x7e, 0xc7, 0x80, 0x64, - 0x0f, 0x17, 0xbd, 0x28, 0xaf, 0x80, 0x11, 0x35, 0x59, 0xa7, 0x74, 0xff, 0x17, 0x26, 0x95, 0xa4, - 0x17, 0x52, 0x75, 0x2b, 0x9f, 0x84, 0x22, 0xf6, 0xfd, 0x60, 0xcf, 0x0a, 0x8e, 0x18, 0xdf, 0xbb, - 0x2e, 0xff, 0xa1, 0xb6, 0x57, 0xec, 0x9d, 0xc0, 0x60, 0x09, 0x37, 0x7f, 0x6e, 0xc0, 0xa5, 0x89, - 0x73, 0x23, 0x1e, 0x45, 0x5a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, 0x78, - 0x2d, 0x99, 0x18, 0x36, 0x0d, 0xd7, 0x92, 0x09, 0x6d, 0x38, 0x49, 0x6b, 0xfe, 0x33, 0x03, 0x6a, - 0x50, 0xf3, 0x3f, 0x76, 0xfa, 0xc7, 0x87, 0xc6, 0x44, 0xf3, 0xc9, 0x31, 0x51, 0x34, 0x13, 0xd2, - 0xe6, 0x24, 0xd9, 0xfb, 0xcf, 0x49, 0xd0, 0xb3, 0xd1, 0xe8, 0x45, 0xfa, 0xd0, 0x6a, 0x72, 0xf4, - 0x72, 0x36, 0xa8, 0x95, 0x95, 0xf0, 0xe4, 0x28, 0xe6, 0x35, 0x98, 0xb3, 0x49, 0x60, 0x39, 0xae, - 0xac, 0x0b, 0x53, 0x0f, 0x13, 0xa4, 0xb0, 0xa6, 0x64, 0x6d, 0x94, 0xb8, 0x4d, 0xea, 0x03, 0x87, - 0x02, 0x79, 0xc0, 0x6e, 0xf9, 0xb6, 0xac, 0x48, 0xf2, 0x71, 0xc0, 0xde, 0xf4, 0x6d, 0x82, 0x05, - 0xc6, 0x7c, 0xd7, 0x80, 0x92, 0x94, 0xb4, 0x69, 0xf5, 0x18, 0x41, 0x57, 0xa2, 0x55, 0xc8, 0xe3, - 0xbe, 0xa4, 0xcf, 0xd8, 0xce, 0x06, 0xb5, 0xa2, 0x20, 0x13, 0xc5, 0xcc, 0x98, 0x59, 0x52, 0xe6, - 0x9c, 0x3d, 0x7a, 0x14, 0xf2, 0xe2, 0x02, 0xa9, 0xcd, 0x8c, 0x87, 0x85, 0x1c, 0x88, 0x25, 0xce, - 0xfc, 0x38, 0x03, 0x95, 0xc4, 0xe2, 0x52, 0xd4, 0x05, 0x51, 0x0b, 0x35, 0x93, 0xa2, 0x2d, 0x3f, - 0x79, 0x34, 0xaf, 0x9e, 0xaf, 0xd9, 0x07, 0x79, 0xbe, 0xbe, 0x0d, 0xb3, 0x2d, 0xbe, 0x47, 0xe1, - 0x3f, 0x3d, 0xae, 0x4c, 0x73, 0x9c, 0x62, 0x77, 0x63, 0x6f, 0x14, 0x9f, 0x0c, 0x2b, 0x81, 0xe8, - 0x26, 0x2c, 0x51, 0x12, 0xd0, 0xfe, 0xc6, 0x61, 0x40, 0xa8, 0xde, 0x4c, 0xc8, 0xc7, 0xd9, 0x37, - 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, 0x34, 0x1e, 0xc3, 0x50, - 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, 0xed, 0xb6, 0x8e, 0x3c, - 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x1e, 0x84, 0x93, 0x22, 0x4c, 0x17, 0x72, 0x9f, 0x61, 0x25, - 0xf9, 0x1d, 0x28, 0xc6, 0xb9, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, 0xee, 0xf1, 0x61, 0x8d, - 0x7a, 0x4e, 0x96, 0x94, 0xcc, 0xbd, 0x32, 0x69, 0x72, 0x2f, 0x31, 0x64, 0xbd, 0xd3, 0xb5, 0x1f, - 0x70, 0xc8, 0x9a, 0x79, 0x90, 0x97, 0x2f, 0x3b, 0xe5, 0xcb, 0x77, 0x15, 0xe4, 0x1f, 0x51, 0xf8, - 0x23, 0x23, 0x13, 0x08, 0xed, 0x91, 0xd1, 0xdf, 0x7f, 0x6d, 0xc2, 0xf0, 0x63, 0x03, 0x40, 0xb4, - 0xf2, 0x44, 0x1b, 0x29, 0xc5, 0x38, 0xff, 0x0e, 0xcc, 0xfa, 0xd2, 0x23, 0xe5, 0xa0, 0x75, 0xca, - 0x7e, 0x71, 0x74, 0x91, 0xa4, 0x4f, 0x62, 0x25, 0xac, 0xf1, 0xf2, 0x07, 0x9f, 0xac, 0xce, 0x7c, - 0xf8, 0xc9, 0xea, 0xcc, 0x47, 0x9f, 0xac, 0xce, 0xbc, 0x7d, 0xba, 0x6a, 0x7c, 0x70, 0xba, 0x6a, - 0x7c, 0x78, 0xba, 0x6a, 0x7c, 0x74, 0xba, 0x6a, 0x7c, 0x7c, 0xba, 0x6a, 0xbc, 0xfb, 0xf7, 0xd5, - 0x99, 0xd7, 0x1e, 0x4b, 0xf3, 0x07, 0xbf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x82, 0xff, - 0xd4, 0x07, 0x28, 0x00, 0x00, + // 2928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47, + 0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e, + 0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c, + 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93, + 0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c, + 0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44, + 0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb, + 0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2, + 0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d, + 0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3, + 0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa, + 0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc, + 0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda, + 0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3, + 0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb, + 0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf, + 0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c, + 0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93, + 0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4, + 0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c, + 0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f, + 0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc, + 0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f, + 0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61, + 0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62, + 0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d, + 0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3, + 0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6, + 0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55, + 0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49, + 0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33, + 0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6, + 0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05, + 0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea, + 0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a, + 0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f, + 0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0, + 0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75, + 0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17, + 0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c, + 0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69, + 0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5, + 0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb, + 0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef, + 0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42, + 0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b, + 0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4, + 0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc, + 0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c, + 0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5, + 0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f, + 0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f, + 0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9, + 0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80, + 0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1, + 0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38, + 0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e, + 0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79, + 0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b, + 0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77, + 0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5, + 0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c, + 0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc, + 0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69, + 0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e, + 0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30, + 0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76, + 0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a, + 0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0, + 0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7, + 0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d, + 0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e, + 0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa, + 0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f, + 0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12, + 0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b, + 0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d, + 0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05, + 0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19, + 0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10, + 0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d, + 0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe, + 0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2, + 0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f, + 0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27, + 0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb, + 0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52, + 0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd, + 0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb, + 0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9, + 0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d, + 0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c, + 0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70, + 0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53, + 0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a, + 0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde, + 0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14, + 0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7, + 0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97, + 0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e, + 0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60, + 0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e, + 0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38, + 0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe, + 0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed, + 0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c, + 0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf, + 0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec, + 0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b, + 0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2, + 0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94, + 0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40, + 0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8, + 0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba, + 0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0, + 0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e, + 0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09, + 0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f, + 0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd, + 0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03, + 0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53, + 0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8, + 0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d, + 0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03, + 0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0, + 0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7, + 0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b, + 0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13, + 0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c, + 0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b, + 0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86, + 0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e, + 0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18, + 0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5, + 0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77, + 0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, + 0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86, + 0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, + 0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32, + 0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95, + 0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e, + 0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30, + 0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a, + 0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac, + 0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63, + 0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8, + 0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51, + 0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b, + 0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64, + 0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b, + 0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99, + 0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71, + 0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e, + 0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c, + 0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b, + 0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84, + 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e, + 0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4, + 0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9, + 0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2, + 0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa, + 0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2, + 0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf, + 0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03, + 0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e, + 0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9, + 0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c, + 0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde, + 0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, + 0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2, + 0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8, + 0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5, + 0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed, + 0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72, + 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb, + 0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a, + 0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92, + 0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e, + 0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1, + 0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd, + 0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -1953,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + i-- + if *m.IgnoreStoreReadErrorWithClusterBreakingPotential { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if len(m.DryRun) > 0 { for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DryRun[iNdEx]) @@ -2025,6 +2068,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3701,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + n += 2 + } return n } @@ -3714,6 +3802,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -4415,6 +4522,7 @@ func (this *DeleteOptions) String() string { `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`, `}`, }, "") return s @@ -4429,6 +4537,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -6353,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6443,6 +6584,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2b95700f7..865d3e7ca 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -315,6 +315,21 @@ message DeleteOptions { // +optional // +listType=atomic repeated string dryRun = 5; + + // if set to true, it will trigger an unsafe deletion of the resource in + // case the normal deletion flow fails with a corrupt object error. + // A resource is considered corrupt if it can not be retrieved from + // the underlying storage successfully because of a) its data can + // not be transformed e.g. decryption failure, or b) it fails + // to decode into an object. + // NOTE: unsafe deletion ignores finalizer constraints, skips + // precondition checks, and removes the object from the storage. + // WARNING: This may potentially break the cluster if the workload + // associated with the resource being unsafe-deleted relies on normal + // deletion flow. Use only if you REALLY know what you are doing. + // The default value is false, and the user must opt in to enable it + // +optional + optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6; } // Duration is a wrapper around time.Duration which supports correct @@ -324,6 +339,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -460,7 +494,7 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -1209,6 +1243,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 592dcb8a7..c748071ed 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f436..9f302b3f3 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d43..0333cfdb3 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 9695ba50b..4cf3f4795 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -439,6 +439,20 @@ const ( // // The annotation is added to a "Bookmark" event. InitialEventsAnnotationKey = "k8s.io/initial-events-end" + + // InitialEventsListBlueprintAnnotationKey is the name of the key + // where an empty, versioned list is encoded in the requested format + // (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string. + // + // This encoding matches the request encoding format, which may be + // protobuf, JSON, CBOR, or others, depending on what the client requested. + // This ensures that the reconstructed list can be processed through the + // same decoder chain that would handle a standard LIST call response. + // + // The annotation is added to a "Bookmark" event and is used by clients + // to guarantee the format consistency when reconstructing + // the list during WatchList processing. + InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint" ) // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch @@ -546,6 +560,21 @@ type DeleteOptions struct { // +optional // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` + + // if set to true, it will trigger an unsafe deletion of the resource in + // case the normal deletion flow fails with a corrupt object error. + // A resource is considered corrupt if it can not be retrieved from + // the underlying storage successfully because of a) its data can + // not be transformed e.g. decryption failure, or b) it fails + // to decode into an object. + // NOTE: unsafe deletion ignores finalizer constraints, skips + // precondition checks, and removes the object from the storage. + // WARNING: This may potentially break the cluster if the workload + // associated with the resource being unsafe-deleted relies on normal + // deletion flow. Use only if you REALLY know what you are doing. + // The default value is false, and the user must opt in to enable it + // +optional + IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"` } const ( @@ -902,6 +931,22 @@ const ( // Status code 500 StatusReasonServerTimeout StatusReason = "ServerTimeout" + // StatusReasonStoreReadError means that the server encountered an error while + // retrieving resources from the backend object store. + // This may be due to backend database error, or because processing of the read + // resource failed. + // Details: + // "kind" string - the kind attribute of the resource being acted on. + // "name" string - the prefix where the reading error(s) occurred + // "causes" []StatusCause + // - (optional): + // - "type" CauseType - CauseTypeUnexpectedServerResponse + // - "message" string - the error message from the store backend + // - "field" string - the full path with the key of the resource that failed reading + // + // Status code 500 + StatusReasonStoreReadError StatusReason = "StorageReadError" + // StatusReasonTimeout means that the request could not be completed within the given time. // Clients can get this response only when they specified a timeout param in the request, // or if the server cannot complete the operation within a reasonable amount of time. @@ -1278,6 +1323,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index b736e8371..405496d3d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -129,12 +129,24 @@ var map_DeleteOptions = map[string]string{ "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it", } func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 0f58d66c0..71f7b163a 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -20,6 +20,7 @@ import ( gojson "encoding/json" "fmt" "io" + "math/big" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err return i, true, nil } +// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a +// float64, it is returned. If the field's value is an int64 that can be losslessly converted to +// float64, it will be converted and returned. Returns false if value is not found and an error if +// not a float64 or an int64 that can be accurately represented as a float64. +func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + switch x := val.(type) { + case int64: + f, accuracy := big.NewInt(x).Float64() + if accuracy != big.Exact { + return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x) + } + return f, true, nil + case float64: + return x, true, nil + default: + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val) + } +} + // NestedStringSlice returns a copy of []string value of a nested field. // Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 40d289f37..5e36a91ee 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { } func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { - items, found, err := NestedSlice(u.Object, "metadata", "managedFields") + v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields") if !found || err != nil { return nil } + items, ok := v.([]interface{}) + if !ok { + return nil + } managedFields := []metav1.ManagedFieldsEntry{} for _, item := range items { m, ok := item.(map[string]interface{}) diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index a0f709ad8..b1eb1bbfc 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -26,12 +26,18 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/utils/ptr" ) // LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options type LabelSelectorValidationOptions struct { // Allow invalid label value in selector AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool } // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. @@ -79,7 +85,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) if !opts.AllowInvalidLabelValueInSelector { @@ -113,6 +121,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi return allErrs } +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs := field.ErrorList{} //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed @@ -126,6 +167,7 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) } allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateIgnoreStoreReadError(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), options)...) return allErrs } @@ -147,15 +189,16 @@ func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { allErrs := field.ErrorList{} - if patchType != types.ApplyPatchType { - if options.Force != nil { - allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) - } - } else { + switch patchType { + case types.ApplyYAMLPatchType, types.ApplyCBORPatchType: if options.FieldManager == "" { // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) } + default: + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } } allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) @@ -173,7 +216,7 @@ func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorL // considered as not set and is defaulted by the rest of the process // (unless apply is used, in which case it is required). if len(fieldManager) > FieldManagerMaxLength { - allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength)) } // Verify that all characters are printable. for i, r := range fieldManager { @@ -238,7 +281,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) if len(fields.Subresource) > MaxSubresourceNameLength { - allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), "" /*unused*/, MaxSubresourceNameLength)) } } return allErrs @@ -295,12 +338,12 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) } if len(condition.Reason) > maxReasonLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen)) } } if len(condition.Message) > maxMessageLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen)) } return allErrs @@ -318,3 +361,31 @@ func isValidConditionReason(value string) []string { } return nil } + +// ValidateIgnoreStoreReadError validates that delete options are valid when +// ignoreStoreReadErrorWithClusterBreakingPotential is enabled +func ValidateIgnoreStoreReadError(fldPath *field.Path, options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + if enabled := ptr.Deref[bool](options.IgnoreStoreReadErrorWithClusterBreakingPotential, false); !enabled { + return allErrs + } + + if len(options.DryRun) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .dryRun")) + } + if options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .propagationPolicy")) + } + //nolint:staticcheck // Keep validation for deprecated OrphanDependents option until it's being removed + if options.OrphanDependents != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .orphanDependents")) + } + if options.GracePeriodSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .gracePeriodSeconds")) + } + if options.Preconditions != nil { + allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .preconditions")) + } + + return allErrs +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index afe01ed5a..82e272240 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio } else { out.DryRun = nil } + if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil { + return err + } + } else { + out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil + } return nil } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 7d29c504a..6b0d0dfee 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil { + in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential + *out = new(bool) + **out = **in + } return } @@ -327,6 +332,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index d14d42591..fcec55354 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/constraint/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/constraint/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 5e6014240..fafa81a3d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -18,6 +18,7 @@ package labels import ( "fmt" + "slices" "sort" "strconv" "strings" @@ -27,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/klog/v2" - stringslices "k8s.io/utils/strings/slices" ) var ( @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -285,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { @@ -293,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool { if r.operator != x.operator { return false } - return stringslices.Equal(r.strValues, x.strValues) + return slices.Equal(r.strValues, x.strValues) } // Empty returns true if the internalSelector doesn't restrict selection space diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397fa..60c000bcb 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index cc0a77bba..395dfdbd0 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { func (e *encoderWithAllocator) Identifier() Identifier { return e.encoder.Identifier() } + +type nondeterministicEncoderToEncoderAdapter struct { + NondeterministicEncoder +} + +func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error { + return e.EncodeNondeterministic(obj, w) +} + +// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's +// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the +// provided Encoder as-is. +func UseNondeterministicEncoding(encoder Encoder) Encoder { + if nondeterministic, ok := encoder.(NondeterministicEncoder); ok { + return nondeterministicEncoderToEncoderAdapter{nondeterministic} + } + return encoder +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index e89ea8939..2703300cd 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -69,6 +69,19 @@ type Encoder interface { Identifier() Identifier } +// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in +// cases where the output does not need to be deterministic. +type NondeterministicEncoder interface { + Encoder + + // EncodeNondeterministic writes an object to the stream. Unlike the Encode method of + // Encoder, EncodeNondeterministic does not guarantee that any two invocations will write + // the same sequence of bytes to the io.Writer. Any differences will not be significant to a + // generic decoder. For example, map entries and struct fields might be encoded in any + // order. + EncodeNondeterministic(Object, io.Writer) error +} + // MemoryAllocator is responsible for allocating memory. // By encapsulating memory allocation into its own interface, we can reuse the memory // across many operations in places we know it can significantly improve the performance. diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go new file mode 100644 index 000000000..4d069a903 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go @@ -0,0 +1,389 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + util "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/fxamacker/cbor/v2" +) + +type metaFactory interface { + // Interpret should return the version and kind of the wire-format of the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +type defaultMetaFactory struct{} + +func (mf *defaultMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + var tm metav1.TypeMeta + // The input is expected to include additional map keys besides apiVersion and kind, so use + // lax mode for decoding into TypeMeta. + if err := modes.DecodeLax.Unmarshal(data, &tm); err != nil { + return nil, fmt.Errorf("unable to determine group/version/kind: %w", err) + } + actual := tm.GetObjectKind().GroupVersionKind() + return &actual, nil +} + +type Serializer interface { + runtime.Serializer + runtime.NondeterministicEncoder + recognizer.RecognizingDecoder + + // NewSerializer returns a value of this interface type rather than exporting the serializer + // type and returning one of those because the zero value of serializer isn't ready to + // use. Users aren't intended to implement cbor.Serializer themselves, and this unexported + // interface method is here to prevent that (https://go.dev/blog/module-compatibility). + private() +} + +var _ Serializer = &serializer{} + +type options struct { + strict bool + transcode bool +} + +type Option func(*options) + +// Strict configures a serializer to return a strict decoding error when it encounters map keys that +// do not correspond to a field in the target object of a decode operation. This option is disabled +// by default. +func Strict(s bool) Option { + return func(opts *options) { + opts.strict = s + } +} + +// Transcode configures a serializer to transcode the "raw" bytes of a decoded runtime.RawExtension +// or metav1.FieldsV1 object to JSON. This is enabled by default to support existing programs that +// depend on the assumption that objects of either type contain valid JSON. +func Transcode(s bool) Option { + return func(opts *options) { + opts.transcode = s + } +} + +type serializer struct { + metaFactory metaFactory + creater runtime.ObjectCreater + typer runtime.ObjectTyper + options options +} + +func (serializer) private() {} + +// NewSerializer creates and returns a serializer configured with the provided options. The default +// options are equivalent to explicitly passing Strict(false) and Transcode(true). +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) Serializer { + return newSerializer(&defaultMetaFactory{}, creater, typer, options...) +} + +func newSerializer(metaFactory metaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) *serializer { + s := &serializer{ + metaFactory: metaFactory, + creater: creater, + typer: typer, + } + s.options.transcode = true + for _, o := range options { + o(&s.options) + } + return s +} + +func (s *serializer) Identifier() runtime.Identifier { + return "cbor" +} + +// Encode writes a CBOR representation of the given object. +// +// Because the CBOR data item written by a call to Encode is always enclosed in the "self-described +// CBOR" tag, its encoded form always has the prefix 0xd9d9f7. This prefix is suitable for use as a +// "magic number" for distinguishing encoded CBOR from other protocols. +// +// The default serialization behavior for any given object replicates the behavior of the JSON +// serializer as far as it is necessary to allow the CBOR serializer to be used as a drop-in +// replacement for the JSON serializer, with limited exceptions. For example, the distinction +// between integers and floating-point numbers is preserved in CBOR due to its distinct +// representations for each type. +// +// Objects implementing runtime.Unstructured will have their unstructured content encoded rather +// than following the default behavior for their dynamic type. +func (s *serializer) Encode(obj runtime.Object, w io.Writer) error { + return s.encode(modes.Encode, obj, w) +} + +func (s *serializer) EncodeNondeterministic(obj runtime.Object, w io.Writer) error { + return s.encode(modes.EncodeNondeterministic, obj, w) +} + +func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer) error { + var v interface{} = obj + if u, ok := obj.(runtime.Unstructured); ok { + v = u.UnstructuredContent() + } + + if err := modes.RejectCustomMarshalers(v); err != nil { + return err + } + + if _, err := w.Write(selfDescribedCBOR); err != nil { + return err + } + + return mode.MarshalTo(v, w) +} + +// gvkWithDefaults returns group kind and version defaulting from provided default +func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { + if len(actual.Kind) == 0 { + actual.Kind = defaultGVK.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = defaultGVK.Group + actual.Version = defaultGVK.Version + } + if len(actual.Version) == 0 && actual.Group == defaultGVK.Group { + actual.Version = defaultGVK.Version + } + return actual +} + +// diagnose returns the diagnostic encoding of a well-formed CBOR data item. +func diagnose(data []byte) string { + diag, err := modes.Diagnostic.Diagnose(data) + if err != nil { + // Since the input must already be well-formed CBOR, converting it to diagnostic + // notation should not fail. + util.HandleError(err) + + return hex.EncodeToString(data) + } + return diag +} + +// unmarshal unmarshals CBOR data from the provided byte slice into a Go object. If the decoder is +// configured to report strict errors, the first error return value may be a non-nil strict decoding +// error. If the last error return value is non-nil, then the unmarshal failed entirely and the +// state of the destination object should not be relied on. +func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error) { + if u, ok := into.(runtime.Unstructured); ok { + var content map[string]interface{} + defer func() { + switch u := u.(type) { + case *unstructured.UnstructuredList: + // UnstructuredList's implementation of SetUnstructuredContent + // produces different objects than those produced by a decode using + // UnstructuredJSONScheme: + // + // 1. SetUnstructuredContent retains the "items" key in the list's + // Object field. It is omitted from Object when decoding with + // UnstructuredJSONScheme. + // 2. SetUnstructuredContent does not populate "apiVersion" and + // "kind" on each entry of its Items + // field. UnstructuredJSONScheme does, inferring the singular + // Kind from the list Kind. + // 3. SetUnstructuredContent ignores entries of "items" that are + // not JSON objects or are objects without + // "kind". UnstructuredJSONScheme returns an error in either + // case. + // + // UnstructuredJSONScheme's behavior is replicated here. + var items []interface{} + if uncast, present := content["items"]; present { + var cast bool + items, cast = uncast.([]interface{}) + if !cast { + strict, lax = nil, fmt.Errorf("items field of UnstructuredList must be encoded as an array or null if present") + return + } + } + apiVersion, _ := content["apiVersion"].(string) + kind, _ := content["kind"].(string) + kind = strings.TrimSuffix(kind, "List") + var unstructureds []unstructured.Unstructured + if len(items) > 0 { + unstructureds = make([]unstructured.Unstructured, len(items)) + } + for i := range items { + object, cast := items[i].(map[string]interface{}) + if !cast { + strict, lax = nil, fmt.Errorf("elements of the items field of UnstructuredList must be encoded as a map") + return + } + + // As in UnstructuredJSONScheme, only set the heuristic + // singular GVK when both "apiVersion" and "kind" are either + // missing, non-string, or empty. + object["apiVersion"], _ = object["apiVersion"].(string) + object["kind"], _ = object["kind"].(string) + if object["apiVersion"] == "" && object["kind"] == "" { + object["apiVersion"] = apiVersion + object["kind"] = kind + } + + if object["kind"] == "" { + strict, lax = nil, runtime.NewMissingKindErr(diagnose(data)) + return + } + if object["apiVersion"] == "" { + strict, lax = nil, runtime.NewMissingVersionErr(diagnose(data)) + return + } + + unstructureds[i].Object = object + } + delete(content, "items") + u.Object = content + u.Items = unstructureds + default: + u.SetUnstructuredContent(content) + } + }() + into = &content + } else if err := modes.RejectCustomMarshalers(into); err != nil { + return nil, err + } + + if !s.options.strict { + return nil, modes.DecodeLax.Unmarshal(data, into) + } + + err := modes.Decode.Unmarshal(data, into) + // TODO: UnknownFieldError is ambiguous. It only provides the index of the first problematic + // map entry encountered and does not indicate which map the index refers to. + var unknownField *cbor.UnknownFieldError + if errors.As(err, &unknownField) { + // Unlike JSON, there are no strict errors in CBOR for duplicate map keys. CBOR maps + // with duplicate keys are considered invalid according to the spec and are rejected + // entirely. + return runtime.NewStrictDecodingError([]error{unknownField}), modes.DecodeLax.Unmarshal(data, into) + } + return nil, err +} + +func (s *serializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + // A preliminary pass over the input to obtain the actual GVK is redundant on a successful + // decode into Unstructured. + if _, ok := into.(runtime.Unstructured); ok { + if _, unmarshalErr := s.unmarshal(data, into); unmarshalErr != nil { + actual, interpretErr := s.metaFactory.Interpret(data) + if interpretErr != nil { + return nil, nil, interpretErr + } + + if gvk != nil { + *actual = gvkWithDefaults(*actual, *gvk) + } + + return nil, actual, unmarshalErr + } + + actual := into.GetObjectKind().GroupVersionKind() + if len(actual.Kind) == 0 { + return nil, &actual, runtime.NewMissingKindErr(diagnose(data)) + } + if len(actual.Version) == 0 { + return nil, &actual, runtime.NewMissingVersionErr(diagnose(data)) + } + + return into, &actual, nil + } + + actual, err := s.metaFactory.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + *actual = gvkWithDefaults(*actual, *gvk) + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + if err != nil { + return nil, actual, err + } + *actual = gvkWithDefaults(*actual, types[0]) + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(diagnose(data)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(diagnose(data)) + } + + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + strict, err := s.unmarshal(data, obj) + if err != nil { + return nil, actual, err + } + + if s.options.transcode { + if err := transcodeRawTypes(obj); err != nil { + return nil, actual, err + } + } + + return obj, actual, strict +} + +// selfDescribedCBOR is the CBOR encoding of the head of tag number 55799. This tag, specified in +// RFC 8949 Section 3.4.6 "Self-Described CBOR", encloses all output from the encoder, has no +// special semantics, and is used as a magic number to recognize CBOR-encoded data items. +// +// See https://www.rfc-editor.org/rfc/rfc8949.html#name-self-described-cbor. +var selfDescribedCBOR = []byte{0xd9, 0xd9, 0xf7} + +func (s *serializer) RecognizesData(data []byte) (ok, unknown bool, err error) { + return bytes.HasPrefix(data, selfDescribedCBOR), false, nil +} + +// NewSerializerInfo returns a default SerializerInfo for CBOR using the given creater and typer. +func NewSerializerInfo(creater runtime.ObjectCreater, typer runtime.ObjectTyper) runtime.SerializerInfo { + return runtime.SerializerInfo{ + MediaType: "application/cbor", + MediaTypeType: "application", + MediaTypeSubType: "cbor", + Serializer: NewSerializer(creater, typer), + StrictSerializer: NewSerializer(creater, typer, Strict(true)), + StreamSerializer: &runtime.StreamSerializerInfo{ + Framer: NewFramer(), + Serializer: NewSerializer(creater, typer, Transcode(false)), + }, + } +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 000000000..a71a487f9 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,61 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will +// make the same choice as the CBOR implementation of runtime.Serializer. +// +// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its +// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler, +// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless +// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be +// removed in favor of automatic transcoding to CBOR. +func Marshal(src interface{}) ([]byte, error) { + if err := modes.RejectCustomMarshalers(src); err != nil { + return nil, err + } + return modes.Encode.Marshal(src) +} + +// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to +// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer. +// +// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its +// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler, +// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless +// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be +// removed in favor of automatic transcoding to CBOR. +func Unmarshal(src []byte, dst interface{}) error { + if err := modes.RejectCustomMarshalers(dst); err != nil { + return err + } + return modes.Decode.Unmarshal(src, dst) +} + +// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in +// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to +// be parsed. +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go new file mode 100644 index 000000000..28a733c67 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fxamacker/cbor/v2" +) + +// NewFramer returns a runtime.Framer based on RFC 8742 CBOR Sequences. Each frame contains exactly +// one encoded CBOR data item. +func NewFramer() runtime.Framer { + return framer{} +} + +var _ runtime.Framer = framer{} + +type framer struct{} + +func (framer) NewFrameReader(rc io.ReadCloser) io.ReadCloser { + return &frameReader{ + decoder: cbor.NewDecoder(rc), + closer: rc, + } +} + +func (framer) NewFrameWriter(w io.Writer) io.Writer { + // Each data item in a CBOR sequence is self-delimiting (like JSON objects). + return w +} + +type frameReader struct { + decoder *cbor.Decoder + closer io.Closer + + overflow []byte +} + +func (fr *frameReader) Read(dst []byte) (int, error) { + if len(fr.overflow) > 0 { + // We read a frame that was too large for the destination slice in a previous call + // to Read and have bytes left over. + n := copy(dst, fr.overflow) + if n < len(fr.overflow) { + fr.overflow = fr.overflow[n:] + return n, io.ErrShortBuffer + } + fr.overflow = nil + return n, nil + } + + // The Reader contract allows implementations to use all of dst[0:len(dst)] as scratch + // space, even if n < len(dst), but it does not allow implementations to use + // dst[len(dst):cap(dst)]. Slicing it up-front allows us to append to it without worrying + // about overwriting dst[len(dst):cap(dst)]. + m := cbor.RawMessage(dst[0:0:len(dst)]) + if err := fr.decoder.Decode(&m); err != nil { + return 0, err + } + + if len(m) > len(dst) { + // The frame was too big, m has a newly-allocated underlying array to accommodate + // it. + fr.overflow = m[len(dst):] + return copy(dst, m), io.ErrShortBuffer + } + + return len(m), nil +} + +func (fr *frameReader) Close() error { + return fr.closer.Close() +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 000000000..f14cbd6b5 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 000000000..858529e95 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 000000000..895b0deff --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go new file mode 100644 index 000000000..61f3f145f --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "github.com/fxamacker/cbor/v2" +) + +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() + if err != nil { + panic(err) + } + return diagnostic +}() diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 000000000..5fae14151 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortFastShuffle + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go new file mode 100644 index 000000000..09d1340f9 --- /dev/null +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go @@ -0,0 +1,236 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cbor + +import ( + "fmt" + "reflect" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var sharedTranscoders transcoders + +var rawTypeTranscodeFuncs = map[reflect.Type]func(reflect.Value) error{ + reflect.TypeFor[runtime.RawExtension](): func(rv reflect.Value) error { + if !rv.CanAddr() { + return nil + } + re := rv.Addr().Interface().(*runtime.RawExtension) + if re.Raw == nil { + // When Raw is nil it encodes to null. Don't change nil Raw values during + // transcoding, they would have unmarshalled from JSON as nil too. + return nil + } + j, err := re.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to transcode RawExtension to JSON: %w", err) + } + re.Raw = j + return nil + }, + reflect.TypeFor[metav1.FieldsV1](): func(rv reflect.Value) error { + if !rv.CanAddr() { + return nil + } + fields := rv.Addr().Interface().(*metav1.FieldsV1) + if fields.Raw == nil { + // When Raw is nil it encodes to null. Don't change nil Raw values during + // transcoding, they would have unmarshalled from JSON as nil too. + return nil + } + j, err := fields.MarshalJSON() + if err != nil { + return fmt.Errorf("failed to transcode FieldsV1 to JSON: %w", err) + } + fields.Raw = j + return nil + }, +} + +func transcodeRawTypes(v interface{}) error { + if v == nil { + return nil + } + + rv := reflect.ValueOf(v) + return sharedTranscoders.getTranscoder(rv.Type()).fn(rv) +} + +type transcoder struct { + fn func(rv reflect.Value) error +} + +var noop = transcoder{ + fn: func(reflect.Value) error { + return nil + }, +} + +type transcoders struct { + lock sync.RWMutex + m map[reflect.Type]**transcoder +} + +func (ts *transcoders) getTranscoder(rt reflect.Type) transcoder { + ts.lock.RLock() + tpp, ok := ts.m[rt] + ts.lock.RUnlock() + if ok { + return **tpp + } + + ts.lock.Lock() + defer ts.lock.Unlock() + tp := ts.getTranscoderLocked(rt) + return *tp +} + +func (ts *transcoders) getTranscoderLocked(rt reflect.Type) *transcoder { + if tpp, ok := ts.m[rt]; ok { + // A transcoder for this type was cached while waiting to acquire the lock. + return *tpp + } + + // Cache the transcoder now, before populating fn, so that circular references between types + // don't overflow the call stack. + t := new(transcoder) + if ts.m == nil { + ts.m = make(map[reflect.Type]**transcoder) + } + ts.m[rt] = &t + + for rawType, fn := range rawTypeTranscodeFuncs { + if rt == rawType { + t = &transcoder{fn: fn} + return t + } + } + + switch rt.Kind() { + case reflect.Array: + te := ts.getTranscoderLocked(rt.Elem()) + rtlen := rt.Len() + if rtlen == 0 || te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for i := 0; i < rtlen; i++ { + if err := te.fn(rv.Index(i)); err != nil { + return err + } + } + return nil + } + case reflect.Interface: + // Any interface value might have a dynamic type involving RawExtension. It needs to + // be checked. + t.fn = func(rv reflect.Value) error { + if rv.IsNil() { + return nil + } + rv = rv.Elem() + // The interface element's type is dynamic so its transcoder can't be + // determined statically. + return ts.getTranscoder(rv.Type()).fn(rv) + } + case reflect.Map: + rtk := rt.Key() + tk := ts.getTranscoderLocked(rtk) + rte := rt.Elem() + te := ts.getTranscoderLocked(rte) + if tk == &noop && te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := tk.fn(rvk); err != nil { + return err + } + rve.SetIterValue(iter) + if err := te.fn(rve); err != nil { + return err + } + } + return nil + } + case reflect.Pointer: + te := ts.getTranscoderLocked(rt.Elem()) + if te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + if rv.IsNil() { + return nil + } + return te.fn(rv.Elem()) + } + case reflect.Slice: + te := ts.getTranscoderLocked(rt.Elem()) + if te == &noop { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for i := 0; i < rv.Len(); i++ { + if err := te.fn(rv.Index(i)); err != nil { + return err + } + } + return nil + } + case reflect.Struct: + type fieldTranscoder struct { + Index int + Transcoder *transcoder + } + var fieldTranscoders []fieldTranscoder + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + tf := ts.getTranscoderLocked(f.Type) + if tf == &noop { + continue + } + fieldTranscoders = append(fieldTranscoders, fieldTranscoder{Index: i, Transcoder: tf}) + } + if len(fieldTranscoders) == 0 { + t = &noop + break + } + t.fn = func(rv reflect.Value) error { + for _, ft := range fieldTranscoders { + if err := ft.Transcoder.fn(rv.Field(ft.Index)); err != nil { + return err + } + } + return nil + } + default: + t = &noop + } + + return t +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index ff9820842..77bb30745 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,9 +17,6 @@ limitations under the License. package serializer import ( - "mime" - "strings" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -28,41 +25,26 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) -// serializerExtensions are for serializers that are conditionally compiled in -var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} - -type serializerType struct { - AcceptContentTypes []string - ContentType string - FileExtensions []string - // EncodesAsText should be true if this content type can be represented safely in UTF-8 - EncodesAsText bool - - Serializer runtime.Serializer - PrettySerializer runtime.Serializer - StrictSerializer runtime.Serializer - - AcceptStreamContentTypes []string - StreamContentType string - - Framer runtime.Framer - StreamSerializer runtime.Serializer -} - -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo { jsonSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, ) - jsonSerializerType := serializerType{ - AcceptContentTypes: []string{runtime.ContentTypeJSON}, - ContentType: runtime.ContentTypeJSON, - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - - Framer: json.Framer, - StreamSerializer: jsonSerializer, + jsonSerializerType := runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + MediaTypeType: "application", + MediaTypeSubType: "json", + EncodesAsText: true, + Serializer: jsonSerializer, + StrictSerializer: json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + ), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: jsonSerializer, + Framer: json.Framer, + }, } if options.Pretty { jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( @@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option ) } - strictJSONSerializer := json.NewSerializerWithOptions( - mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, - ) - jsonSerializerType.StrictSerializer = strictJSONSerializer - yamlSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, @@ -88,35 +64,35 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option protoSerializer := protobuf.NewSerializer(scheme, scheme) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) - serializers := []serializerType{ + serializers := []runtime.SerializerInfo{ jsonSerializerType, { - AcceptContentTypes: []string{runtime.ContentTypeYAML}, - ContentType: runtime.ContentTypeYAML, - FileExtensions: []string{"yaml"}, - EncodesAsText: true, - Serializer: yamlSerializer, - StrictSerializer: strictYAMLSerializer, + MediaType: runtime.ContentTypeYAML, + MediaTypeType: "application", + MediaTypeSubType: "yaml", + EncodesAsText: true, + Serializer: yamlSerializer, + StrictSerializer: strictYAMLSerializer, }, { - AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, - ContentType: runtime.ContentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: protoSerializer, + MediaType: runtime.ContentTypeProtobuf, + MediaTypeType: "application", + MediaTypeSubType: "vnd.kubernetes.protobuf", + Serializer: protoSerializer, // note, strict decoding is unsupported for protobuf, // fall back to regular serializing StrictSerializer: protoSerializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: protoRawSerializer, + StreamSerializer: &runtime.StreamSerializerInfo{ + Serializer: protoRawSerializer, + Framer: protobuf.LengthDelimitedFramer, + }, }, } - for _, fn := range serializerExtensions { - if serializer, ok := fn(scheme); ok { - serializers = append(serializers, serializer) - } + for _, f := range options.serializers { + serializers = append(serializers, f(scheme, scheme)) } + return serializers } @@ -136,6 +112,8 @@ type CodecFactoryOptions struct { Strict bool // Pretty includes a pretty serializer along with the non-pretty one Pretty bool + + serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo } // CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. @@ -162,6 +140,13 @@ func DisableStrict(options *CodecFactoryOptions) { options.Strict = false } +// WithSerializer configures a serializer to be supported in addition to the default serializers. +func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.serializers = append(options.serializers, f) + } +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and @@ -184,7 +169,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta } // newCodecFactory is a helper for testing that allows a different metafactory to be specified. -func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { +func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory { decoders := make([]runtime.Decoder, 0, len(serializers)) var accepts []runtime.SerializerInfo alreadyAccepted := make(map[string]struct{}) @@ -192,38 +177,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec var legacySerializer runtime.Serializer for _, d := range serializers { decoders = append(decoders, d.Serializer) - for _, mediaType := range d.AcceptContentTypes { - if _, ok := alreadyAccepted[mediaType]; ok { - continue - } - alreadyAccepted[mediaType] = struct{}{} - info := runtime.SerializerInfo{ - MediaType: d.ContentType, - EncodesAsText: d.EncodesAsText, - Serializer: d.Serializer, - PrettySerializer: d.PrettySerializer, - StrictSerializer: d.StrictSerializer, - } - - mediaType, _, err := mime.ParseMediaType(info.MediaType) - if err != nil { - panic(err) - } - parts := strings.SplitN(mediaType, "/", 2) - info.MediaTypeType = parts[0] - info.MediaTypeSubType = parts[1] - - if d.StreamSerializer != nil { - info.StreamSerializer = &runtime.StreamSerializerInfo{ - Serializer: d.StreamSerializer, - EncodesAsText: d.EncodesAsText, - Framer: d.Framer, - } - } - accepts = append(accepts, info) - if mediaType == runtime.ContentTypeJSON { - legacySerializer = d.Serializer - } + if _, ok := alreadyAccepted[d.MediaType]; ok { + continue + } + alreadyAccepted[d.MediaType] = struct{}{} + + acceptedSerializerShallowCopy := d + if d.StreamSerializer != nil { + cloned := *d.StreamSerializer + acceptedSerializerShallowCopy.StreamSerializer = &cloned + } + accepts = append(accepts, acceptedSerializerShallowCopy) + + if d.MediaType == runtime.ContentTypeJSON { + legacySerializer = d.Serializer } } if legacySerializer == nil { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/types.go index ce77c7910..ca7b7cc2d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -43,9 +43,11 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" - ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" // RFC 8949 + ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742 ) // RawExtension is used to hold extensions in external versions. diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/types/patch.go b/constraint/vendor/k8s.io/apimachinery/pkg/types/patch.go index fe8ecaaff..d338cf213 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,5 +25,7 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" - ApplyPatchType PatchType = "application/apply-patch+yaml" + ApplyPatchType PatchType = ApplyYAMLPatchType + ApplyYAMLPatchType PatchType = "application/apply-patch+yaml" + ApplyCBORPatchType PatchType = "application/apply-patch+cbor" ) diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 9b3c9c8d5..1ab8fd396 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index f358c794d..5fd2e16c8 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -92,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -126,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go index 978ffb3c3..de540c82f 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -19,11 +19,12 @@ package managedfields import ( "fmt" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/managedfields/internal" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) // FieldManager updates the managed fields and merges applied @@ -32,7 +33,7 @@ type FieldManager = internal.FieldManager // NewDefaultFieldManager creates a new FieldManager that merges apply requests // and update managed fields for other types of requests. -func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (*FieldManager, error) { f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) @@ -43,7 +44,7 @@ func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime // NewDefaultCRDFieldManager creates a new FieldManager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. -func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ *FieldManager, err error) { f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 786ad991c..3fe36edc9 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -19,13 +19,14 @@ package internal import ( "fmt" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" - "sigs.k8s.io/structured-merge-diff/v4/merge" - "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -41,7 +42,7 @@ var _ Manager = &structuredMergeManager{} // NewStructuredMergeManager creates a new Manager that merges apply requests // and update managed fields for other types of requests. -func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (Manager, error) { if typeConverter == nil { return nil, fmt.Errorf("typeconverter must not be nil") } @@ -52,8 +53,8 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt groupVersion: gv, hubVersion: hub, updater: merge.Updater{ - Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s - IgnoredFields: resetFields, + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoreFilter: resetFields, }, }, nil } @@ -61,7 +62,7 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt // NewCRDStructuredMergeManager creates a new Manager specifically for // CRDs. This allows for the possibility of fields which are not defined // in models, as well as having no models defined at all. -func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ Manager, err error) { return &structuredMergeManager{ typeConverter: typeConverter, objectConverter: objectConverter, @@ -69,8 +70,8 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r groupVersion: gv, hubVersion: hub, updater: merge.Updater{ - Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), - IgnoredFields: resetFields, + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoreFilter: resetFields, }, }, nil } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3674914f7..df374949d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() d := now.Sub(r.lastErrorTime) diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index b76129a1c..cd961c8c5 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -68,14 +68,8 @@ func (s Set[T]) Delete(items ...T) Set[T] { // Clear empties the set. // It is preferable to replace the set with a newly constructed set, // but not all callers can do that (when there are other references to the map). -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN -// can't be cleared because NaN can't be removed. -// For sets containing items of a type that is reflexive for ==, -// this is optimized to a single call to runtime.mapclear(). func (s Set[T]) Clear() Set[T] { - for key := range s { - delete(s, key) - } + clear(s) return s } diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 920c113bb..6825a808e 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1361,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go deleted file mode 100644 index 1fa351aab..000000000 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package uuid - -import ( - "github.com/google/uuid" - - "k8s.io/apimachinery/pkg/types" -) - -func NewUUID() types.UID { - return types.UID(uuid.New().String()) -} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index bc387d011..f1634bc0d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -220,26 +220,24 @@ func Forbidden(field *Path, detail string) *Error { return &Error{ErrorTypeForbidden, field.String(), "", detail} } -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. +// TooLong returns a *Error indicating "too long". This is used to report that +// the given value is too long. This is similar to Invalid, but the returned +// error will not include the too-long value. If maxLength is negative, it will +// be included in the message. The value argument is not used. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} -} - -// TooLongMaxLength returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. If maxLength is negative, no max length will be included in the message. -func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error { var msg string if maxLength >= 0 { - msg = fmt.Sprintf("may not be longer than %d", maxLength) + msg = fmt.Sprintf("may not be more than %d bytes", maxLength) } else { msg = "value is too long" } - return &Error{ErrorTypeTooLong, field.String(), value, msg} + return &Error{ErrorTypeTooLong, field.String(), "", msg} +} + +// TooLongMaxLength returns a *Error indicating "too long". +// Deprecated: Use TooLong instead. +func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error { + return TooLong(field, "", maxLength) } // TooMany returns a *Error indicating "too many". This is used to diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index b32644902..9bc393cf5 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -175,6 +175,8 @@ func IsValidLabelValue(value string) []string { } const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?" + const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) @@ -204,10 +206,14 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*" +const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character" + // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") +var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$") // IsDNS1123Subdomain tests for a string that conforms to the definition of a // subdomain in DNS (RFC 1123). @@ -222,6 +228,19 @@ func IsDNS1123Subdomain(value string) []string { return errs } +// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string +func IsDNS1123SubdomainWithUnderscore(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com")) + } + return errs +} + const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/constraint/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 2292ba137..b7812ff2d 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -23,6 +23,8 @@ import ( "regexp" "strconv" "strings" + + apimachineryversion "k8s.io/apimachinery/pkg/version" ) // Version is an opaque representation of a version number @@ -31,6 +33,7 @@ type Version struct { semver bool preRelease string buildMetadata string + info apimachineryversion.Info } var ( @@ -145,6 +148,43 @@ func MustParseGeneric(str string) *Version { return v } +// Parse tries to do ParseSemantic first to keep more information. +// If ParseSemantic fails, it would just do ParseGeneric. +func Parse(str string) (*Version, error) { + v, err := parse(str, true) + if err != nil { + return parse(str, false) + } + return v, err +} + +// MustParse is like Parse except that it panics on error +func MustParse(str string) *Version { + v, err := Parse(str) + if err != nil { + panic(err) + } + return v +} + +// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version. +func ParseMajorMinor(str string) (*Version, error) { + v, err := ParseGeneric(str) + if err != nil { + return nil, err + } + return MajorMinor(v.Major(), v.Minor()), nil +} + +// MustParseMajorMinor is like ParseMajorMinor except that it panics on error +func MustParseMajorMinor(str string) *Version { + v, err := ParseMajorMinor(str) + if err != nil { + panic(err) + } + return v +} + // ParseSemantic parses a version string that exactly obeys the syntax and semantics of // the "Semantic Versioning" specification (http://semver.org/) (although it ignores // leading and trailing whitespace, and allows the version to be preceded by "v"). For @@ -215,6 +255,32 @@ func (v *Version) WithMinor(minor uint) *Version { return &result } +// SubtractMinor returns the version with offset from the original minor, with the same major and no patch. +// If -offset >= current minor, the minor would be 0. +func (v *Version) OffsetMinor(offset int) *Version { + var minor uint + if offset >= 0 { + minor = v.Minor() + uint(offset) + } else { + diff := uint(-offset) + if diff < v.Minor() { + minor = v.Minor() - diff + } + } + return MajorMinor(v.Major(), minor) +} + +// SubtractMinor returns the version diff minor versions back, with the same major and no patch. +// If diff >= current minor, the minor would be 0. +func (v *Version) SubtractMinor(diff uint) *Version { + return v.OffsetMinor(-int(diff)) +} + +// AddMinor returns the version diff minor versions forward, with the same major and no patch. +func (v *Version) AddMinor(diff uint) *Version { + return v.OffsetMinor(int(diff)) +} + // WithPatch returns copy of the version object with requested patch number func (v *Version) WithPatch(patch uint) *Version { result := *v @@ -224,6 +290,9 @@ func (v *Version) WithPatch(patch uint) *Version { // WithPreRelease returns copy of the version object with requested prerelease func (v *Version) WithPreRelease(preRelease string) *Version { + if len(preRelease) == 0 { + return v + } result := *v result.components = []uint{v.Major(), v.Minor(), v.Patch()} result.preRelease = preRelease @@ -345,6 +414,17 @@ func onlyZeros(array []uint) bool { return true } +// EqualTo tests if a version is equal to a given version. +func (v *Version) EqualTo(other *Version) bool { + if v == nil { + return other == nil + } + if other == nil { + return false + } + return v.compareInternal(other) == 0 +} + // AtLeast tests if a version is at least equal to a given minimum version. If both // Versions are Semantic Versions, this will use the Semantic Version comparison // algorithm. Otherwise, it will compare only the numeric components, with non-present @@ -360,6 +440,11 @@ func (v *Version) LessThan(other *Version) bool { return v.compareInternal(other) == -1 } +// GreaterThan tests if a version is greater than a given version. +func (v *Version) GreaterThan(other *Version) bool { + return v.compareInternal(other) == 1 +} + // Compare compares v against a version string (which will be parsed as either Semantic // or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if // it is greater than other, or 0 if they are equal. @@ -370,3 +455,30 @@ func (v *Version) Compare(other string) (int, error) { } return v.compareInternal(ov), nil } + +// WithInfo returns copy of the version object with requested info +func (v *Version) WithInfo(info apimachineryversion.Info) *Version { + result := *v + result.info = info + return &result +} + +func (v *Version) Info() *apimachineryversion.Info { + if v == nil { + return nil + } + // in case info is empty, or the major and minor in info is different from the actual major and minor + v.info.Major = itoa(v.Major()) + v.info.Minor = itoa(v.Minor()) + if v.info.GitVersion == "" { + v.info.GitVersion = v.String() + } + return &v.info +} + +func itoa(i uint) string { + if i == 0 { + return "" + } + return strconv.Itoa(int(i)) +} diff --git a/constraint/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/constraint/vendor/k8s.io/apimachinery/pkg/watch/watch.go index b6c7bbfa8..ce37fd8c1 100644 --- a/constraint/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/constraint/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stop stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // ResultChan returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/attributes.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/attributes.go deleted file mode 100644 index 1d291f6b2..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/attributes.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "fmt" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation" - auditinternal "k8s.io/apiserver/pkg/apis/audit" - "k8s.io/apiserver/pkg/authentication/user" -) - -type attributesRecord struct { - kind schema.GroupVersionKind - namespace string - name string - resource schema.GroupVersionResource - subresource string - operation Operation - options runtime.Object - dryRun bool - object runtime.Object - oldObject runtime.Object - userInfo user.Info - - // other elements are always accessed in single goroutine. - // But ValidatingAdmissionWebhook add annotations concurrently. - annotations map[string]annotation - annotationsLock sync.RWMutex - - reinvocationContext ReinvocationContext -} - -type annotation struct { - level auditinternal.Level - value string -} - -func NewAttributesRecord(object runtime.Object, oldObject runtime.Object, kind schema.GroupVersionKind, namespace, name string, resource schema.GroupVersionResource, subresource string, operation Operation, operationOptions runtime.Object, dryRun bool, userInfo user.Info) Attributes { - return &attributesRecord{ - kind: kind, - namespace: namespace, - name: name, - resource: resource, - subresource: subresource, - operation: operation, - options: operationOptions, - dryRun: dryRun, - object: object, - oldObject: oldObject, - userInfo: userInfo, - reinvocationContext: &reinvocationContext{}, - } -} - -func (record *attributesRecord) GetKind() schema.GroupVersionKind { - return record.kind -} - -func (record *attributesRecord) GetNamespace() string { - return record.namespace -} - -func (record *attributesRecord) GetName() string { - return record.name -} - -func (record *attributesRecord) GetResource() schema.GroupVersionResource { - return record.resource -} - -func (record *attributesRecord) GetSubresource() string { - return record.subresource -} - -func (record *attributesRecord) GetOperation() Operation { - return record.operation -} - -func (record *attributesRecord) GetOperationOptions() runtime.Object { - return record.options -} - -func (record *attributesRecord) IsDryRun() bool { - return record.dryRun -} - -func (record *attributesRecord) GetObject() runtime.Object { - return record.object -} - -func (record *attributesRecord) GetOldObject() runtime.Object { - return record.oldObject -} - -func (record *attributesRecord) GetUserInfo() user.Info { - return record.userInfo -} - -// getAnnotations implements privateAnnotationsGetter.It's a private method used -// by WithAudit decorator. -func (record *attributesRecord) getAnnotations(maxLevel auditinternal.Level) map[string]string { - record.annotationsLock.RLock() - defer record.annotationsLock.RUnlock() - - if record.annotations == nil { - return nil - } - cp := make(map[string]string, len(record.annotations)) - for key, value := range record.annotations { - if value.level.Less(maxLevel) || value.level == maxLevel { - cp[key] = value.value - } - } - return cp -} - -// AddAnnotation adds an annotation to attributesRecord with Metadata audit level -func (record *attributesRecord) AddAnnotation(key, value string) error { - return record.AddAnnotationWithLevel(key, value, auditinternal.LevelMetadata) -} - -func (record *attributesRecord) AddAnnotationWithLevel(key, value string, level auditinternal.Level) error { - if err := checkKeyFormat(key); err != nil { - return err - } - if level.Less(auditinternal.LevelMetadata) { - return fmt.Errorf("admission annotations are not allowed to be set at audit level lower than Metadata, key: %q, level: %s", key, level) - } - record.annotationsLock.Lock() - defer record.annotationsLock.Unlock() - - if record.annotations == nil { - record.annotations = make(map[string]annotation) - } - annotation := annotation{level: level, value: value} - if v, ok := record.annotations[key]; ok && v != annotation { - return fmt.Errorf("admission annotations are not allowd to be overwritten, key:%q, old value: %v, new value: %v", key, record.annotations[key], annotation) - } - record.annotations[key] = annotation - return nil -} - -func (record *attributesRecord) GetReinvocationContext() ReinvocationContext { - return record.reinvocationContext -} - -type reinvocationContext struct { - // isReinvoke is true when admission plugins are being reinvoked - isReinvoke bool - // reinvokeRequested is true when an admission plugin requested a re-invocation of the chain - reinvokeRequested bool - // values stores reinvoke context values per plugin. - values map[string]interface{} -} - -func (rc *reinvocationContext) IsReinvoke() bool { - return rc.isReinvoke -} - -func (rc *reinvocationContext) SetIsReinvoke() { - rc.isReinvoke = true -} - -func (rc *reinvocationContext) ShouldReinvoke() bool { - return rc.reinvokeRequested -} - -func (rc *reinvocationContext) SetShouldReinvoke() { - rc.reinvokeRequested = true -} - -func (rc *reinvocationContext) SetValue(plugin string, v interface{}) { - if rc.values == nil { - rc.values = map[string]interface{}{} - } - rc.values[plugin] = v -} - -func (rc *reinvocationContext) Value(plugin string) interface{} { - return rc.values[plugin] -} - -func checkKeyFormat(key string) error { - parts := strings.Split(key, "/") - if len(parts) != 2 { - return fmt.Errorf("annotation key has invalid format, the right format is a DNS subdomain prefix and '/' and key name. (e.g. 'podsecuritypolicy.admission.k8s.io/admit-policy')") - } - if msgs := validation.IsQualifiedName(key); len(msgs) != 0 { - return fmt.Errorf("annotation key has invalid format %s. A qualified name like 'podsecuritypolicy.admission.k8s.io/admit-policy' is required.", strings.Join(msgs, ",")) - } - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/audit.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/audit.go deleted file mode 100644 index 7c0993f09..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/audit.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "context" - "fmt" - - "k8s.io/apiserver/pkg/audit" -) - -// auditHandler logs annotations set by other admission handlers -type auditHandler struct { - Interface -} - -var _ Interface = &auditHandler{} -var _ MutationInterface = &auditHandler{} -var _ ValidationInterface = &auditHandler{} - -// WithAudit is a decorator for a admission phase. It saves annotations -// of attribute into the audit event. Attributes passed to the Admit and -// Validate function must be instance of privateAnnotationsGetter or -// AnnotationsGetter, otherwise an error is returned. -func WithAudit(i Interface) Interface { - if i == nil { - return i - } - return &auditHandler{Interface: i} -} - -func (handler *auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { - if !handler.Interface.Handles(a.GetOperation()) { - return nil - } - if err := ensureAnnotationGetter(a); err != nil { - return err - } - var err error - if mutator, ok := handler.Interface.(MutationInterface); ok { - err = mutator.Admit(ctx, a, o) - handler.logAnnotations(ctx, a) - } - return err -} - -func (handler *auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { - if !handler.Interface.Handles(a.GetOperation()) { - return nil - } - if err := ensureAnnotationGetter(a); err != nil { - return err - } - var err error - if validator, ok := handler.Interface.(ValidationInterface); ok { - err = validator.Validate(ctx, a, o) - handler.logAnnotations(ctx, a) - } - return err -} - -func ensureAnnotationGetter(a Attributes) error { - _, okPrivate := a.(privateAnnotationsGetter) - _, okPublic := a.(AnnotationsGetter) - if okPrivate || okPublic { - return nil - } - return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter") -} - -func (handler *auditHandler) logAnnotations(ctx context.Context, a Attributes) { - ae := audit.AuditEventFrom(ctx) - if ae == nil { - return - } - - var annotations map[string]string - switch a := a.(type) { - case privateAnnotationsGetter: - annotations = a.getAnnotations(ae.Level) - case AnnotationsGetter: - annotations = a.GetAnnotations(ae.Level) - default: - // this will never happen, because we have already checked it in ensureAnnotationGetter - } - - audit.AddAuditAnnotationsMap(ctx, annotations) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/chain.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/chain.go deleted file mode 100644 index f2af01ef3..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/chain.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import "context" - -// chainAdmissionHandler is an instance of admission.NamedHandler that performs admission control using -// a chain of admission handlers -type chainAdmissionHandler []Interface - -// NewChainHandler creates a new chain handler from an array of handlers. Used for testing. -func NewChainHandler(handlers ...Interface) chainAdmissionHandler { - return chainAdmissionHandler(handlers) -} - -// Admit performs an admission control check using a chain of handlers, and returns immediately on first error -func (admissionHandler chainAdmissionHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { - for _, handler := range admissionHandler { - if !handler.Handles(a.GetOperation()) { - continue - } - if mutator, ok := handler.(MutationInterface); ok { - err := mutator.Admit(ctx, a, o) - if err != nil { - return err - } - } - } - return nil -} - -// Validate performs an admission control check using a chain of handlers, and returns immediately on first error -func (admissionHandler chainAdmissionHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { - for _, handler := range admissionHandler { - if !handler.Handles(a.GetOperation()) { - continue - } - if validator, ok := handler.(ValidationInterface); ok { - err := validator.Validate(ctx, a, o) - if err != nil { - return err - } - } - } - return nil -} - -// Handles will return true if any of the handlers handles the given operation -func (admissionHandler chainAdmissionHandler) Handles(operation Operation) bool { - for _, handler := range admissionHandler { - if handler.Handles(operation) { - return true - } - } - return false -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/config.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/config.go deleted file mode 100644 index c0b75a983..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/config.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "bytes" - "fmt" - "io" - "os" - "path" - "path/filepath" - - "k8s.io/klog/v2" - "sigs.k8s.io/yaml" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/apis/apiserver" - apiserverv1 "k8s.io/apiserver/pkg/apis/apiserver/v1" -) - -func makeAbs(path, base string) (string, error) { - if filepath.IsAbs(path) { - return path, nil - } - if len(base) == 0 || base == "." { - cwd, err := os.Getwd() - if err != nil { - return "", err - } - base = cwd - } - return filepath.Join(base, path), nil -} - -// ReadAdmissionConfiguration reads the admission configuration at the specified path. -// It returns the loaded admission configuration if the input file aligns with the required syntax. -// If it does not align with the provided syntax, it returns a default configuration for the enumerated -// set of pluginNames whose config location references the specified configFilePath. -// It does this to preserve backward compatibility when admission control files were opaque. -// It returns an error if the file did not exist. -func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, configScheme *runtime.Scheme) (ConfigProvider, error) { - if configFilePath == "" { - return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil - } - // a file was provided, so we just read it. - data, err := os.ReadFile(configFilePath) - if err != nil { - return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) - } - codecs := serializer.NewCodecFactory(configScheme) - decoder := codecs.UniversalDecoder() - decodedObj, err := runtime.Decode(decoder, data) - // we were able to decode the file successfully - if err == nil { - decodedConfig, ok := decodedObj.(*apiserver.AdmissionConfiguration) - if !ok { - return nil, fmt.Errorf("unexpected type: %T", decodedObj) - } - baseDir := path.Dir(configFilePath) - for i := range decodedConfig.Plugins { - if decodedConfig.Plugins[i].Path == "" { - continue - } - // we update relative file paths to absolute paths - absPath, err := makeAbs(decodedConfig.Plugins[i].Path, baseDir) - if err != nil { - return nil, err - } - decodedConfig.Plugins[i].Path = absPath - } - return configProvider{ - config: decodedConfig, - }, nil - } - // we got an error where the decode wasn't related to a missing type - if !(runtime.IsMissingVersion(err) || runtime.IsMissingKind(err) || runtime.IsNotRegisteredError(err)) { - return nil, err - } - - // Only tolerate load errors if the file appears to be one of the two legacy plugin configs - unstructuredData := map[string]interface{}{} - if err2 := yaml.Unmarshal(data, &unstructuredData); err2 != nil { - return nil, err - } - _, isLegacyImagePolicy := unstructuredData["imagePolicy"] - _, isLegacyPodNodeSelector := unstructuredData["podNodeSelectorPluginConfig"] - if !isLegacyImagePolicy && !isLegacyPodNodeSelector { - return nil, err - } - - // convert the legacy format to the new admission control format - // in order to preserve backwards compatibility, we set plugins that - // previously read input from a non-versioned file configuration to the - // current input file. - legacyPluginsWithUnversionedConfig := sets.NewString("ImagePolicyWebhook", "PodNodeSelector") - externalConfig := &apiserverv1.AdmissionConfiguration{} - for _, pluginName := range pluginNames { - if legacyPluginsWithUnversionedConfig.Has(pluginName) { - externalConfig.Plugins = append(externalConfig.Plugins, - apiserverv1.AdmissionPluginConfiguration{ - Name: pluginName, - Path: configFilePath}) - } - } - configScheme.Default(externalConfig) - internalConfig := &apiserver.AdmissionConfiguration{} - if err := configScheme.Convert(externalConfig, internalConfig, nil); err != nil { - return nil, err - } - return configProvider{ - config: internalConfig, - }, nil -} - -type configProvider struct { - config *apiserver.AdmissionConfiguration -} - -// GetAdmissionPluginConfigurationFor returns a reader that holds the admission plugin configuration. -func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfiguration) (io.Reader, error) { - // if there is a nest object, return it directly - if pluginCfg.Configuration != nil { - return bytes.NewBuffer(pluginCfg.Configuration.Raw), nil - } - // there is nothing nested, so we delegate to path - if pluginCfg.Path != "" { - content, err := os.ReadFile(pluginCfg.Path) - if err != nil { - klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) - return nil, err - } - return bytes.NewBuffer(content), nil - } - // there is no special config at all - return nil, nil -} - -// ConfigFor returns a reader for the specified plugin. -// If no specific configuration is present, we return a nil reader. -func (p configProvider) ConfigFor(pluginName string) (io.Reader, error) { - // there is no config, so there is no potential config - if p.config == nil { - return nil, nil - } - // look for matching plugin and get configuration - for _, pluginCfg := range p.config.Plugins { - if pluginName != pluginCfg.Name { - continue - } - pluginConfig, err := GetAdmissionPluginConfigurationFor(pluginCfg) - if err != nil { - return nil, err - } - return pluginConfig, nil - } - // there is no registered config that matches on plugin name. - return nil, nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/conversion.go deleted file mode 100644 index a2b313a98..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/conversion.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// VersionedAttributes is a wrapper around the original admission attributes, adding versioned -// variants of the object and old object. -type VersionedAttributes struct { - // Attributes holds the original admission attributes - Attributes - // VersionedOldObject holds Attributes.OldObject (if non-nil), converted to VersionedKind. - // It must never be mutated. - VersionedOldObject runtime.Object - // VersionedObject holds Attributes.Object (if non-nil), converted to VersionedKind. - // If mutated, Dirty must be set to true by the mutator. - VersionedObject runtime.Object - // VersionedKind holds the fully qualified kind - VersionedKind schema.GroupVersionKind - // Dirty indicates VersionedObject has been modified since being converted from Attributes.Object - Dirty bool -} - -// GetObject overrides the Attributes.GetObject() -func (v *VersionedAttributes) GetObject() runtime.Object { - if v.VersionedObject != nil { - return v.VersionedObject - } - return v.Attributes.GetObject() -} - -// ConvertToGVK converts object to the desired gvk. -func ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind, o ObjectInterfaces) (runtime.Object, error) { - // Unlike other resources, custom resources do not have internal version, so - // if obj is a custom resource, it should not need conversion. - if obj.GetObjectKind().GroupVersionKind() == gvk { - return obj, nil - } - out, err := o.GetObjectCreater().New(gvk) - if err != nil { - return nil, err - } - err = o.GetObjectConvertor().Convert(obj, out, nil) - if err != nil { - return nil, err - } - // Explicitly set the GVK - out.GetObjectKind().SetGroupVersionKind(gvk) - return out, nil -} - -// NewVersionedAttributes returns versioned attributes with the old and new object (if non-nil) converted to the requested kind -func NewVersionedAttributes(attr Attributes, gvk schema.GroupVersionKind, o ObjectInterfaces) (*VersionedAttributes, error) { - // convert the old and new objects to the requested version - versionedAttr := &VersionedAttributes{ - Attributes: attr, - VersionedKind: gvk, - } - if oldObj := attr.GetOldObject(); oldObj != nil { - out, err := ConvertToGVK(oldObj, gvk, o) - if err != nil { - return nil, err - } - versionedAttr.VersionedOldObject = out - } - if obj := attr.GetObject(); obj != nil { - out, err := ConvertToGVK(obj, gvk, o) - if err != nil { - return nil, err - } - versionedAttr.VersionedObject = out - } - return versionedAttr, nil -} - -// ConvertVersionedAttributes converts VersionedObject and VersionedOldObject to the specified kind, if needed. -// If attr.VersionedKind already matches the requested kind, no conversion is performed. -// If conversion is required: -// * attr.VersionedObject is used as the source for the new object if Dirty=true (and is round-tripped through attr.Attributes.Object, clearing Dirty in the process) -// * attr.Attributes.Object is used as the source for the new object if Dirty=false -// * attr.Attributes.OldObject is used as the source for the old object -func ConvertVersionedAttributes(attr *VersionedAttributes, gvk schema.GroupVersionKind, o ObjectInterfaces) error { - // we already have the desired kind, we're done - if attr.VersionedKind == gvk { - return nil - } - - // convert the original old object to the desired GVK - if oldObj := attr.Attributes.GetOldObject(); oldObj != nil { - out, err := ConvertToGVK(oldObj, gvk, o) - if err != nil { - return err - } - attr.VersionedOldObject = out - } - - if attr.VersionedObject != nil { - // convert the existing versioned object to internal - if attr.Dirty { - err := o.GetObjectConvertor().Convert(attr.VersionedObject, attr.Attributes.GetObject(), nil) - if err != nil { - return err - } - } - - // and back to external - out, err := ConvertToGVK(attr.Attributes.GetObject(), gvk, o) - if err != nil { - return err - } - attr.VersionedObject = out - } - - // Remember we converted to this version - attr.VersionedKind = gvk - attr.Dirty = false - - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/decorator.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/decorator.go deleted file mode 100644 index a4b0b28b5..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/decorator.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -type Decorator interface { - Decorate(handler Interface, name string) Interface -} - -type DecoratorFunc func(handler Interface, name string) Interface - -func (d DecoratorFunc) Decorate(handler Interface, name string) Interface { - return d(handler, name) -} - -type Decorators []Decorator - -// Decorate applies the decorator in inside-out order, i.e. the first decorator in the slice is first applied to the given handler. -func (d Decorators) Decorate(handler Interface, name string) Interface { - result := handler - for _, d := range d { - result = d.Decorate(result, name) - } - - return result -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/errors.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/errors.go deleted file mode 100644 index 9a069a2c9..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -func extractResourceName(a Attributes) (name string, resource schema.GroupResource, err error) { - resource = a.GetResource().GroupResource() - - if len(a.GetName()) > 0 { - return a.GetName(), resource, nil - } - - name = "Unknown" - obj := a.GetObject() - if obj != nil { - accessor, err := meta.Accessor(obj) - if err != nil { - // not all object have ObjectMeta. If we don't, return a name with a slash (always illegal) - return "Unknown/errorGettingName", resource, nil - } - - // this is necessary because name object name generation has not occurred yet - if len(accessor.GetName()) > 0 { - name = accessor.GetName() - } else if len(accessor.GetGenerateName()) > 0 { - name = accessor.GetGenerateName() - } - } - return name, resource, nil -} - -// NewForbidden is a utility function to return a well-formatted admission control error response -func NewForbidden(a Attributes, internalError error) error { - // do not double wrap an error of same type - if apierrors.IsForbidden(internalError) { - return internalError - } - name, resource, err := extractResourceName(a) - if err != nil { - return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err})) - } - return apierrors.NewForbidden(resource, name, internalError) -} - -// NewNotFound is a utility function to return a well-formatted admission control error response -func NewNotFound(a Attributes) error { - name, resource, err := extractResourceName(a) - if err != nil { - return apierrors.NewInternalError(err) - } - return apierrors.NewNotFound(resource, name) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/handler.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/handler.go deleted file mode 100644 index d2a9e7d4c..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/handler.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "time" - - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - // timeToWaitForReady is the amount of time to wait to let an admission controller to be ready to satisfy a request. - // this is useful when admission controllers need to warm their caches before letting requests through. - timeToWaitForReady = 10 * time.Second -) - -// ReadyFunc is a function that returns true if the admission controller is ready to handle requests. -type ReadyFunc func() bool - -// Handler is a base for admission control handlers that -// support a predefined set of operations -type Handler struct { - operations sets.String - readyFunc ReadyFunc -} - -// Handles returns true for methods that this handler supports -func (h *Handler) Handles(operation Operation) bool { - return h.operations.Has(string(operation)) -} - -// NewHandler creates a new base handler that handles the passed -// in operations -func NewHandler(ops ...Operation) *Handler { - operations := sets.NewString() - for _, op := range ops { - operations.Insert(string(op)) - } - return &Handler{ - operations: operations, - } -} - -// SetReadyFunc allows late registration of a ReadyFunc to know if the handler is ready to process requests. -func (h *Handler) SetReadyFunc(readyFunc ReadyFunc) { - h.readyFunc = readyFunc -} - -// WaitForReady will wait for the readyFunc (if registered) to return ready, and in case of timeout, will return false. -func (h *Handler) WaitForReady() bool { - // there is no ready func configured, so we return immediately - if h.readyFunc == nil { - return true - } - - timeout := time.After(timeToWaitForReady) - for !h.readyFunc() { - select { - case <-time.After(100 * time.Millisecond): - case <-timeout: - return h.readyFunc() - } - } - return true -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go deleted file mode 100644 index 21ee8c801..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package initializer - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/component-base/featuregate" -) - -type pluginInitializer struct { - externalClient kubernetes.Interface - dynamicClient dynamic.Interface - externalInformers informers.SharedInformerFactory - authorizer authorizer.Authorizer - featureGates featuregate.FeatureGate - stopCh <-chan struct{} - restMapper meta.RESTMapper -} - -// New creates an instance of admission plugins initializer. -// This constructor is public with a long param list so that callers immediately know that new information can be expected -// during compilation when they update a level. -func New( - extClientset kubernetes.Interface, - dynamicClient dynamic.Interface, - extInformers informers.SharedInformerFactory, - authz authorizer.Authorizer, - featureGates featuregate.FeatureGate, - stopCh <-chan struct{}, - restMapper meta.RESTMapper, -) pluginInitializer { - return pluginInitializer{ - externalClient: extClientset, - dynamicClient: dynamicClient, - externalInformers: extInformers, - authorizer: authz, - featureGates: featureGates, - stopCh: stopCh, - restMapper: restMapper, - } -} - -// Initialize checks the initialization interfaces implemented by a plugin -// and provide the appropriate initialization data -func (i pluginInitializer) Initialize(plugin admission.Interface) { - // First tell the plugin about drained notification, so it can pass it to further initializations. - if wants, ok := plugin.(WantsDrainedNotification); ok { - wants.SetDrainedNotification(i.stopCh) - } - - // Second tell the plugin about enabled features, so it can decide whether to start informers or not - if wants, ok := plugin.(WantsFeatures); ok { - wants.InspectFeatureGates(i.featureGates) - } - - if wants, ok := plugin.(WantsExternalKubeClientSet); ok { - wants.SetExternalKubeClientSet(i.externalClient) - } - - if wants, ok := plugin.(WantsDynamicClient); ok { - wants.SetDynamicClient(i.dynamicClient) - } - - if wants, ok := plugin.(WantsExternalKubeInformerFactory); ok { - wants.SetExternalKubeInformerFactory(i.externalInformers) - } - - if wants, ok := plugin.(WantsAuthorizer); ok { - wants.SetAuthorizer(i.authorizer) - } - if wants, ok := plugin.(WantsRESTMapper); ok { - wants.SetRESTMapper(i.restMapper) - } -} - -var _ admission.PluginInitializer = pluginInitializer{} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go deleted file mode 100644 index 21202bd79..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package initializer - -import ( - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/cel/openapi/resolver" - quota "k8s.io/apiserver/pkg/quota/v1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/component-base/featuregate" -) - -// WantsExternalKubeClientSet defines a function which sets external ClientSet for admission plugins that need it -type WantsExternalKubeClientSet interface { - SetExternalKubeClientSet(kubernetes.Interface) - admission.InitializationValidator -} - -// WantsExternalKubeInformerFactory defines a function which sets InformerFactory for admission plugins that need it -type WantsExternalKubeInformerFactory interface { - SetExternalKubeInformerFactory(informers.SharedInformerFactory) - admission.InitializationValidator -} - -// WantsAuthorizer defines a function which sets Authorizer for admission plugins that need it. -type WantsAuthorizer interface { - SetAuthorizer(authorizer.Authorizer) - admission.InitializationValidator -} - -// WantsQuotaConfiguration defines a function which sets quota configuration for admission plugins that need it. -type WantsQuotaConfiguration interface { - SetQuotaConfiguration(quota.Configuration) - admission.InitializationValidator -} - -// WantsDrainedNotification defines a function which sets the notification of where the apiserver -// has already been drained for admission plugins that need it. -// After receiving that notification, Admit/Validate calls won't be called anymore. -type WantsDrainedNotification interface { - SetDrainedNotification(<-chan struct{}) - admission.InitializationValidator -} - -// WantsFeatureGate defines a function which passes the featureGates for inspection by an admission plugin. -// Admission plugins should not hold a reference to the featureGates. Instead, they should query a particular one -// and assign it to a simple bool in the admission plugin struct. -// -// func (a *admissionPlugin) InspectFeatureGates(features featuregate.FeatureGate){ -// a.myFeatureIsOn = features.Enabled("my-feature") -// } -type WantsFeatures interface { - InspectFeatureGates(featuregate.FeatureGate) - admission.InitializationValidator -} - -type WantsDynamicClient interface { - SetDynamicClient(dynamic.Interface) - admission.InitializationValidator -} - -// WantsRESTMapper defines a function which sets RESTMapper for admission plugins that need it. -type WantsRESTMapper interface { - SetRESTMapper(meta.RESTMapper) - admission.InitializationValidator -} - -// WantsSchemaResolver defines a function which sets the SchemaResolver for -// an admission plugin that needs it. -type WantsSchemaResolver interface { - SetSchemaResolver(resolver resolver.SchemaResolver) - admission.InitializationValidator -} - -// WantsExcludedAdmissionResources defines a function which sets the ExcludedAdmissionResources -// for an admission plugin that needs it. -type WantsExcludedAdmissionResources interface { - SetExcludedAdmissionResources(excludedAdmissionResources []schema.GroupResource) - admission.InitializationValidator -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/interfaces.go deleted file mode 100644 index ba979c973..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/interfaces.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "context" - "io" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - auditinternal "k8s.io/apiserver/pkg/apis/audit" - "k8s.io/apiserver/pkg/authentication/user" -) - -// Attributes is an interface used by AdmissionController to get information about a request -// that is used to make an admission decision. -type Attributes interface { - // GetName returns the name of the object as presented in the request. On a CREATE operation, the client - // may omit name and rely on the server to generate the name. If that is the case, this method will return - // the empty string - GetName() string - // GetNamespace is the namespace associated with the request (if any) - GetNamespace() string - // GetResource is the name of the resource being requested. This is not the kind. For example: pods - GetResource() schema.GroupVersionResource - // GetSubresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. - // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" - // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". - GetSubresource() string - // GetOperation is the operation being performed - GetOperation() Operation - // GetOperationOptions is the options for the operation being performed - GetOperationOptions() runtime.Object - // IsDryRun indicates that modifications will definitely not be persisted for this request. This is to prevent - // admission controllers with side effects and a method of reconciliation from being overwhelmed. - // However, a value of false for this does not mean that the modification will be persisted, because it - // could still be rejected by a subsequent validation step. - IsDryRun() bool - // GetObject is the object from the incoming request prior to default values being applied - GetObject() runtime.Object - // GetOldObject is the existing object. Only populated for UPDATE and DELETE requests. - GetOldObject() runtime.Object - // GetKind is the type of object being manipulated. For example: Pod - GetKind() schema.GroupVersionKind - // GetUserInfo is information about the requesting user - GetUserInfo() user.Info - - // AddAnnotation sets annotation according to key-value pair. The key should be qualified, e.g., podsecuritypolicy.admission.k8s.io/admit-policy, where - // "podsecuritypolicy" is the name of the plugin, "admission.k8s.io" is the name of the organization, "admit-policy" is the key name. - // An error is returned if the format of key is invalid. When trying to overwrite annotation with a new value, an error is returned. - // Both ValidationInterface and MutationInterface are allowed to add Annotations. - // By default, an annotation gets logged into audit event if the request's audit level is greater or - // equal to Metadata. - AddAnnotation(key, value string) error - - // AddAnnotationWithLevel sets annotation according to key-value pair with additional intended audit level. - // An Annotation gets logged into audit event if the request's audit level is greater or equal to the - // intended audit level. - AddAnnotationWithLevel(key, value string, level auditinternal.Level) error - - // GetReinvocationContext tracks the admission request information relevant to the re-invocation policy. - GetReinvocationContext() ReinvocationContext -} - -// ObjectInterfaces is an interface used by AdmissionController to get object interfaces -// such as Converter or Defaulter. These interfaces are normally coming from Request Scope -// to handle special cases like CRDs. -type ObjectInterfaces interface { - // GetObjectCreater is the ObjectCreator appropriate for the requested object. - GetObjectCreater() runtime.ObjectCreater - // GetObjectTyper is the ObjectTyper appropriate for the requested object. - GetObjectTyper() runtime.ObjectTyper - // GetObjectDefaulter is the ObjectDefaulter appropriate for the requested object. - GetObjectDefaulter() runtime.ObjectDefaulter - // GetObjectConvertor is the ObjectConvertor appropriate for the requested object. - GetObjectConvertor() runtime.ObjectConvertor - // GetEquivalentResourceMapper is the EquivalentResourceMapper appropriate for finding equivalent resources and expected kind for the requested object. - GetEquivalentResourceMapper() runtime.EquivalentResourceMapper -} - -// privateAnnotationsGetter is a private interface which allows users to get annotations from Attributes. -type privateAnnotationsGetter interface { - getAnnotations(maxLevel auditinternal.Level) map[string]string -} - -// AnnotationsGetter allows users to get annotations from Attributes. An alternate Attribute should implement -// this interface. -type AnnotationsGetter interface { - GetAnnotations(maxLevel auditinternal.Level) map[string]string -} - -// ReinvocationContext provides access to the admission related state required to implement the re-invocation policy. -type ReinvocationContext interface { - // IsReinvoke returns true if the current admission check is a re-invocation. - IsReinvoke() bool - // SetIsReinvoke sets the current admission check as a re-invocation. - SetIsReinvoke() - // ShouldReinvoke returns true if any plugin has requested a re-invocation. - ShouldReinvoke() bool - // SetShouldReinvoke signals that a re-invocation is desired. - SetShouldReinvoke() - // AddValue set a value for a plugin name, possibly overriding a previous value. - SetValue(plugin string, v interface{}) - // Value reads a value for a webhook. - Value(plugin string) interface{} -} - -// Interface is an abstract, pluggable interface for Admission Control decisions. -type Interface interface { - // Handles returns true if this admission controller can handle the given operation - // where operation can be one of CREATE, UPDATE, DELETE, or CONNECT - Handles(operation Operation) bool -} - -type MutationInterface interface { - Interface - - // Admit makes an admission decision based on the request attributes. - // Context is used only for timeout/deadline/cancellation and tracing information. - Admit(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) -} - -// ValidationInterface is an abstract, pluggable interface for Admission Control decisions. -type ValidationInterface interface { - Interface - - // Validate makes an admission decision based on the request attributes. It is NOT allowed to mutate - // Context is used only for timeout/deadline/cancellation and tracing information. - Validate(ctx context.Context, a Attributes, o ObjectInterfaces) (err error) -} - -// Operation is the type of resource operation being checked for admission control -type Operation string - -// Operation constants -const ( - Create Operation = "CREATE" - Update Operation = "UPDATE" - Delete Operation = "DELETE" - Connect Operation = "CONNECT" -) - -// PluginInitializer is used for initialization of shareable resources between admission plugins. -// After initialization the resources have to be set separately -type PluginInitializer interface { - Initialize(plugin Interface) -} - -// InitializationValidator holds ValidateInitialization functions, which are responsible for validation of initialized -// shared resources and should be implemented on admission plugins -type InitializationValidator interface { - ValidateInitialization() error -} - -// ConfigProvider provides a way to get configuration for an admission plugin based on its name -type ConfigProvider interface { - ConfigFor(pluginName string) (io.Reader, error) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go deleted file mode 100644 index 6c1761149..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go +++ /dev/null @@ -1,356 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "context" - "strconv" - "time" - - "k8s.io/apiserver/pkg/admission" - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" -) - -// WebhookRejectionErrorType defines different error types that happen in a webhook rejection. -type WebhookRejectionErrorType string - -const ( - namespace = "apiserver" - subsystem = "admission" - - // WebhookRejectionCallingWebhookError identifies a calling webhook error which causes - // a webhook admission to reject a request - WebhookRejectionCallingWebhookError WebhookRejectionErrorType = "calling_webhook_error" - // WebhookRejectionAPIServerInternalError identifies an apiserver internal error which - // causes a webhook admission to reject a request - WebhookRejectionAPIServerInternalError WebhookRejectionErrorType = "apiserver_internal_error" - // WebhookRejectionNoError identifies a webhook properly rejected a request - WebhookRejectionNoError WebhookRejectionErrorType = "no_error" -) - -var ( - latencySummaryMaxAge = 5 * time.Hour - - // Metrics provides access to all admission metrics. - Metrics = newAdmissionMetrics() -) - -// ObserverFunc is a func that emits metrics. -type ObserverFunc func(ctx context.Context, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) - -const ( - kindWebhook = "webhook" - kindPolicy = "policy" - stepValidate = "validate" - stepAdmit = "admit" -) - -// WithControllerMetrics is a decorator for named admission handlers. -func WithControllerMetrics(i admission.Interface, name string) admission.Interface { - return WithMetrics(i, Metrics.ObserveAdmissionController, name) -} - -// WithStepMetrics is a decorator for a whole admission phase, i.e. admit or validation.admission step. -func WithStepMetrics(i admission.Interface) admission.Interface { - return WithMetrics(i, Metrics.ObserveAdmissionStep) -} - -// WithMetrics is a decorator for admission handlers with a generic observer func. -func WithMetrics(i admission.Interface, observer ObserverFunc, extraLabels ...string) admission.Interface { - return &pluginHandlerWithMetrics{ - Interface: i, - observer: observer, - extraLabels: extraLabels, - } -} - -// pluginHandlerWithMetrics decorates a admission handler with metrics. -type pluginHandlerWithMetrics struct { - admission.Interface - observer ObserverFunc - extraLabels []string -} - -// Admit performs a mutating admission control check and emit metrics. -func (p pluginHandlerWithMetrics) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - mutatingHandler, ok := p.Interface.(admission.MutationInterface) - if !ok { - return nil - } - - start := time.Now() - err := mutatingHandler.Admit(ctx, a, o) - p.observer(ctx, time.Since(start), err != nil, a, stepAdmit, p.extraLabels...) - return err -} - -// Validate performs a non-mutating admission control check and emits metrics. -func (p pluginHandlerWithMetrics) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { - validatingHandler, ok := p.Interface.(admission.ValidationInterface) - if !ok { - return nil - } - - start := time.Now() - err := validatingHandler.Validate(ctx, a, o) - p.observer(ctx, time.Since(start), err != nil, a, stepValidate, p.extraLabels...) - return err -} - -// AdmissionMetrics instruments admission with prometheus metrics. -type AdmissionMetrics struct { - step *metricSet - controller *metricSet - webhook *metricSet - webhookRejection *metrics.CounterVec - webhookFailOpen *metrics.CounterVec - webhookRequest *metrics.CounterVec - matchConditionEvalErrors *metrics.CounterVec - matchConditionExclusions *metrics.CounterVec - matchConditionEvaluationSeconds *metricSet -} - -// newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names. -func newAdmissionMetrics() *AdmissionMetrics { - // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps - // Each step is identified by a distinct type label value. - // Use buckets ranging from 5 ms to 2.5 seconds. - step := &metricSet{ - latencies: metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "step_admission_duration_seconds", - Help: "Admission sub-step latency histogram in seconds, broken out for each operation and API resource and step type (validate or admit).", - Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5}, - StabilityLevel: metrics.STABLE, - }, - []string{"type", "operation", "rejected"}, - ), - - latenciesSummary: metrics.NewSummaryVec( - &metrics.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "step_admission_duration_seconds_summary", - Help: "Admission sub-step latency summary in seconds, broken out for each operation and API resource and step type (validate or admit).", - MaxAge: latencySummaryMaxAge, - StabilityLevel: metrics.ALPHA, - }, - []string{"type", "operation", "rejected"}, - ), - } - - // Built-in admission controller metrics. Each admission controller is identified by name. - // Use buckets ranging from 5 ms to 2.5 seconds. - controller := &metricSet{ - latencies: metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "controller_admission_duration_seconds", - Help: "Admission controller latency histogram in seconds, identified by name and broken out for each operation and API resource and type (validate or admit).", - Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5}, - StabilityLevel: metrics.STABLE, - }, - []string{"name", "type", "operation", "rejected"}, - ), - - latenciesSummary: nil, - } - - // Admission webhook metrics. Each webhook is identified by name. - // Use buckets ranging from 5 ms to 2.5 seconds (admission webhooks timeout at 30 seconds by default). - webhook := &metricSet{ - latencies: metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "webhook_admission_duration_seconds", - Help: "Admission webhook latency histogram in seconds, identified by name and broken out for each operation and API resource and type (validate or admit).", - Buckets: []float64{0.005, 0.025, 0.1, 0.5, 1.0, 2.5, 10, 25}, - StabilityLevel: metrics.STABLE, - }, - []string{"name", "type", "operation", "rejected"}, - ), - - latenciesSummary: nil, - } - - webhookRejection := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "webhook_rejection_count", - Help: "Admission webhook rejection count, identified by name and broken out for each admission type (validating or admit) and operation. Additional labels specify an error type (calling_webhook_error or apiserver_internal_error if an error occurred; no_error otherwise) and optionally a non-zero rejection code if the webhook rejects the request with an HTTP status code (honored by the apiserver when the code is greater or equal to 400). Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "type", "operation", "error_type", "rejection_code"}) - - webhookFailOpen := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "webhook_fail_open_count", - Help: "Admission webhook fail open count, identified by name and broken out for each admission type (validating or mutating).", - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "type"}) - - webhookRequest := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "webhook_request_total", - Help: "Admission webhook request total, identified by name and broken out for each admission type (validating or mutating) and operation. Additional labels specify whether the request was rejected or not and an HTTP status code. Codes greater than 600 are truncated to 600, to keep the metrics cardinality bounded.", - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "type", "operation", "code", "rejected"}) - - matchConditionEvalError := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "match_condition_evaluation_errors_total", - Help: "Admission match condition evaluation errors count, identified by name of resource containing the match condition and broken out for each kind containing matchConditions (webhook or policy), operation and admission type (validate or admit).", - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "kind", "type", "operation"}) - - matchConditionExclusions := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "match_condition_exclusions_total", - Help: "Admission match condition evaluation exclusions count, identified by name of resource containing the match condition and broken out for each kind containing matchConditions (webhook or policy), operation and admission type (validate or admit).", - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "kind", "type", "operation"}) - - matchConditionEvaluationSeconds := &metricSet{ - latencies: metrics.NewHistogramVec( - &metrics.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "match_condition_evaluation_seconds", - Help: "Admission match condition evaluation time in seconds, identified by name and broken out for each kind containing matchConditions (webhook or policy), operation and type (validate or admit).", - Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.1, 0.2, 0.25}, - StabilityLevel: metrics.ALPHA, - }, - []string{"name", "kind", "type", "operation"}, - ), - latenciesSummary: nil, - } - - step.mustRegister() - controller.mustRegister() - webhook.mustRegister() - matchConditionEvaluationSeconds.mustRegister() - legacyregistry.MustRegister(webhookRejection) - legacyregistry.MustRegister(webhookFailOpen) - legacyregistry.MustRegister(webhookRequest) - legacyregistry.MustRegister(matchConditionEvalError) - legacyregistry.MustRegister(matchConditionExclusions) - return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection, webhookFailOpen: webhookFailOpen, webhookRequest: webhookRequest, matchConditionEvalErrors: matchConditionEvalError, matchConditionExclusions: matchConditionExclusions, matchConditionEvaluationSeconds: matchConditionEvaluationSeconds} -} - -func (m *AdmissionMetrics) reset() { - m.step.reset() - m.controller.reset() - m.webhook.reset() -} - -// ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. -func (m *AdmissionMetrics) ObserveAdmissionStep(ctx context.Context, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - m.step.observe(ctx, elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) -} - -// ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. -func (m *AdmissionMetrics) ObserveAdmissionController(ctx context.Context, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - m.controller.observe(ctx, elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) -} - -// ObserveWebhook records admission related metrics for a admission webhook. -func (m *AdmissionMetrics) ObserveWebhook(ctx context.Context, name string, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, code int) { - // We truncate codes greater than 600 to keep the cardinality bounded. - if code > 600 { - code = 600 - } - m.webhookRequest.WithContext(ctx).WithLabelValues(name, stepType, string(attr.GetOperation()), strconv.Itoa(code), strconv.FormatBool(rejected)).Inc() - m.webhook.observe(ctx, elapsed, name, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected)) -} - -// ObserveWebhookRejection records admission related metrics for an admission webhook rejection. -func (m *AdmissionMetrics) ObserveWebhookRejection(ctx context.Context, name, stepType, operation string, errorType WebhookRejectionErrorType, rejectionCode int) { - // We truncate codes greater than 600 to keep the cardinality bounded. - // This should be rarely done by a malfunctioning webhook server. - if rejectionCode > 600 { - rejectionCode = 600 - } - m.webhookRejection.WithContext(ctx).WithLabelValues(name, stepType, operation, string(errorType), strconv.Itoa(rejectionCode)).Inc() -} - -// ObserveWebhookFailOpen records validating or mutating webhook that fail open. -func (m *AdmissionMetrics) ObserveWebhookFailOpen(ctx context.Context, name, stepType string) { - m.webhookFailOpen.WithContext(ctx).WithLabelValues(name, stepType).Inc() -} - -// ObserveMatchConditionEvalError records validating or mutating webhook that are not called due to match conditions -func (m *AdmissionMetrics) ObserveMatchConditionEvalError(ctx context.Context, name, kind, stepType, operation string) { - m.matchConditionEvalErrors.WithContext(ctx).WithLabelValues(name, kind, stepType, operation).Inc() -} - -// ObserveMatchConditionExclusion records validating or mutating webhook that are not called due to match conditions -func (m *AdmissionMetrics) ObserveMatchConditionExclusion(ctx context.Context, name, kind, stepType, operation string) { - m.matchConditionExclusions.WithContext(ctx).WithLabelValues(name, kind, stepType, operation).Inc() -} - -// ObserveMatchConditionEvaluationTime records duration of match condition evaluation process. -func (m *AdmissionMetrics) ObserveMatchConditionEvaluationTime(ctx context.Context, elapsed time.Duration, name, kind, stepType, operation string) { - m.matchConditionEvaluationSeconds.observe(ctx, elapsed, name, kind, stepType, operation) -} - -type metricSet struct { - latencies *metrics.HistogramVec - latenciesSummary *metrics.SummaryVec -} - -// MustRegister registers all the prometheus metrics in the metricSet. -func (m *metricSet) mustRegister() { - legacyregistry.MustRegister(m.latencies) - if m.latenciesSummary != nil { - legacyregistry.MustRegister(m.latenciesSummary) - } -} - -// Reset resets all the prometheus metrics in the metricSet. -func (m *metricSet) reset() { - m.latencies.Reset() - if m.latenciesSummary != nil { - m.latenciesSummary.Reset() - } -} - -// Observe records an observed admission event to all metrics in the metricSet. -func (m *metricSet) observe(ctx context.Context, elapsed time.Duration, labels ...string) { - elapsedSeconds := elapsed.Seconds() - m.latencies.WithContext(ctx).WithLabelValues(labels...).Observe(elapsedSeconds) - if m.latenciesSummary != nil { - m.latenciesSummary.WithContext(ctx).WithLabelValues(labels...).Observe(elapsedSeconds) - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/OWNERS b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/OWNERS deleted file mode 100644 index 6a637d28d..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - jpbetz - - cici37 - - alexzielenski -reviewers: - - jpbetz - - cici37 - - alexzielenski diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go deleted file mode 100644 index bb5e233d4..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "fmt" - - "github.com/google/cel-go/cel" - - "k8s.io/apimachinery/pkg/util/version" - celconfig "k8s.io/apiserver/pkg/apis/cel" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/cel/library" -) - -const ( - ObjectVarName = "object" - OldObjectVarName = "oldObject" - ParamsVarName = "params" - RequestVarName = "request" - NamespaceVarName = "namespaceObject" - AuthorizerVarName = "authorizer" - RequestResourceAuthorizerVarName = "authorizer.requestResource" - VariableVarName = "variables" -) - -// BuildRequestType generates a DeclType for AdmissionRequest. This may be replaced with a utility that -// converts the native type definition to apiservercel.DeclType once such a utility becomes available. -// The 'uid' field is omitted since it is not needed for in-process admission review. -// The 'object' and 'oldObject' fields are omitted since they are exposed as root level CEL variables. -func BuildRequestType() *apiservercel.DeclType { - field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { - return apiservercel.NewDeclField(name, declType, required, nil, nil) - } - fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { - result := make(map[string]*apiservercel.DeclField, len(fields)) - for _, f := range fields { - result[f.Name] = f - } - return result - } - gvkType := apiservercel.NewObjectType("kubernetes.GroupVersionKind", fields( - field("group", apiservercel.StringType, true), - field("version", apiservercel.StringType, true), - field("kind", apiservercel.StringType, true), - )) - gvrType := apiservercel.NewObjectType("kubernetes.GroupVersionResource", fields( - field("group", apiservercel.StringType, true), - field("version", apiservercel.StringType, true), - field("resource", apiservercel.StringType, true), - )) - userInfoType := apiservercel.NewObjectType("kubernetes.UserInfo", fields( - field("username", apiservercel.StringType, false), - field("uid", apiservercel.StringType, false), - field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), - field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), - )) - return apiservercel.NewObjectType("kubernetes.AdmissionRequest", fields( - field("kind", gvkType, true), - field("resource", gvrType, true), - field("subResource", apiservercel.StringType, false), - field("requestKind", gvkType, true), - field("requestResource", gvrType, true), - field("requestSubResource", apiservercel.StringType, false), - field("name", apiservercel.StringType, true), - field("namespace", apiservercel.StringType, false), - field("operation", apiservercel.StringType, true), - field("userInfo", userInfoType, true), - field("dryRun", apiservercel.BoolType, false), - field("options", apiservercel.DynType, false), - )) -} - -// BuildNamespaceType generates a DeclType for Namespace. -// Certain nested fields in Namespace (e.g. managedFields, ownerReferences etc.) are omitted in the generated DeclType -// by design. -func BuildNamespaceType() *apiservercel.DeclType { - field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { - return apiservercel.NewDeclField(name, declType, required, nil, nil) - } - fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { - result := make(map[string]*apiservercel.DeclField, len(fields)) - for _, f := range fields { - result[f.Name] = f - } - return result - } - - specType := apiservercel.NewObjectType("kubernetes.NamespaceSpec", fields( - field("finalizers", apiservercel.NewListType(apiservercel.StringType, -1), true), - )) - conditionType := apiservercel.NewObjectType("kubernetes.NamespaceCondition", fields( - field("status", apiservercel.StringType, true), - field("type", apiservercel.StringType, true), - field("lastTransitionTime", apiservercel.TimestampType, true), - field("message", apiservercel.StringType, true), - field("reason", apiservercel.StringType, true), - )) - statusType := apiservercel.NewObjectType("kubernetes.NamespaceStatus", fields( - field("conditions", apiservercel.NewListType(conditionType, -1), true), - field("phase", apiservercel.StringType, true), - )) - metadataType := apiservercel.NewObjectType("kubernetes.NamespaceMetadata", fields( - field("name", apiservercel.StringType, true), - field("generateName", apiservercel.StringType, true), - field("namespace", apiservercel.StringType, true), - field("labels", apiservercel.NewMapType(apiservercel.StringType, apiservercel.StringType, -1), true), - field("annotations", apiservercel.NewMapType(apiservercel.StringType, apiservercel.StringType, -1), true), - field("UID", apiservercel.StringType, true), - field("creationTimestamp", apiservercel.TimestampType, true), - field("deletionGracePeriodSeconds", apiservercel.IntType, true), - field("deletionTimestamp", apiservercel.TimestampType, true), - field("generation", apiservercel.IntType, true), - field("resourceVersion", apiservercel.StringType, true), - field("finalizers", apiservercel.NewListType(apiservercel.StringType, -1), true), - )) - return apiservercel.NewObjectType("kubernetes.Namespace", fields( - field("metadata", metadataType, true), - field("spec", specType, true), - field("status", statusType, true), - )) -} - -// CompilationResult represents a compiled validations expression. -type CompilationResult struct { - Program cel.Program - Error *apiservercel.Error - ExpressionAccessor ExpressionAccessor - OutputType *cel.Type -} - -// Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and -// environment mode. -type Compiler interface { - CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult -} - -type compiler struct { - varEnvs variableDeclEnvs -} - -func NewCompiler(env *environment.EnvSet) Compiler { - return &compiler{varEnvs: mustBuildEnvs(env)} -} - -type variableDeclEnvs map[OptionalVariableDeclarations]*environment.EnvSet - -// CompileCELExpression returns a compiled CEL expression. -// perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input. -func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, envType environment.Type) CompilationResult { - resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult { - return CompilationResult{ - Error: &apiservercel.Error{ - Type: errType, - Detail: errorString, - }, - ExpressionAccessor: expressionAccessor, - } - } - - env, err := c.varEnvs[options].Env(envType) - if err != nil { - return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) - } - - ast, issues := env.Compile(expressionAccessor.GetExpression()) - if issues != nil { - return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) - } - found := false - returnTypes := expressionAccessor.ReturnTypes() - for _, returnType := range returnTypes { - if ast.OutputType() == returnType || cel.AnyType == returnType { - found = true - break - } - } - if !found { - var reason string - if len(returnTypes) == 1 { - reason = fmt.Sprintf("must evaluate to %v", returnTypes[0].String()) - } else { - reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) - } - - return resultError(reason, apiservercel.ErrorTypeInvalid) - } - - _, err = cel.AstToCheckedExpr(ast) - if err != nil { - // should be impossible since env.Compile returned no issues - return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) - } - prog, err := env.Program(ast, - cel.InterruptCheckFrequency(celconfig.CheckFrequency), - ) - if err != nil { - return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) - } - return CompilationResult{ - Program: prog, - ExpressionAccessor: expressionAccessor, - OutputType: ast.OutputType(), - } -} - -func mustBuildEnvs(baseEnv *environment.EnvSet) variableDeclEnvs { - requestType := BuildRequestType() - namespaceType := BuildNamespaceType() - envs := make(variableDeclEnvs, 8) // since the number of variable combinations is small, pre-build a environment for each - for _, hasParams := range []bool{false, true} { - for _, hasAuthorizer := range []bool{false, true} { - for _, strictCost := range []bool{false, true} { - var envOpts []cel.EnvOption - if hasParams { - envOpts = append(envOpts, cel.Variable(ParamsVarName, cel.DynType)) - } - if hasAuthorizer { - envOpts = append(envOpts, - cel.Variable(AuthorizerVarName, library.AuthorizerType), - cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType)) - } - envOpts = append(envOpts, - cel.Variable(ObjectVarName, cel.DynType), - cel.Variable(OldObjectVarName, cel.DynType), - cel.Variable(NamespaceVarName, namespaceType.CelType()), - cel.Variable(RequestVarName, requestType.CelType())) - - extended, err := baseEnv.Extend( - environment.VersionedOptions{ - // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these - // options should always be present. - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: envOpts, - DeclTypes: []*apiservercel.DeclType{ - namespaceType, - requestType, - }, - }, - ) - if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) - } - if strictCost { - extended, err = extended.Extend(environment.StrictCostOpt) - if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) - } - } - envs[OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer, StrictCost: strictCost}] = extended - } - } - } - return envs -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go deleted file mode 100644 index 9c449ecda..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ /dev/null @@ -1,253 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "math" - - "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - v1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/version" - "k8s.io/apiserver/pkg/admission" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/cel/lazy" -) - -const VariablesTypeName = "kubernetes.variables" - -type CompositedCompiler struct { - Compiler - FilterCompiler - - CompositionEnv *CompositionEnv -} - -type CompositedFilter struct { - Filter - - compositionEnv *CompositionEnv -} - -func NewCompositedCompiler(envSet *environment.EnvSet) (*CompositedCompiler, error) { - compositionContext, err := NewCompositionEnv(VariablesTypeName, envSet) - if err != nil { - return nil, err - } - return NewCompositedCompilerFromTemplate(compositionContext), nil -} - -func NewCompositedCompilerFromTemplate(context *CompositionEnv) *CompositedCompiler { - context = &CompositionEnv{ - MapType: context.MapType, - EnvSet: context.EnvSet, - CompiledVariables: map[string]CompilationResult{}, - } - compiler := NewCompiler(context.EnvSet) - filterCompiler := NewFilterCompiler(context.EnvSet) - return &CompositedCompiler{ - Compiler: compiler, - FilterCompiler: filterCompiler, - CompositionEnv: context, - } -} - -func (c *CompositedCompiler) CompileAndStoreVariables(variables []NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) { - for _, v := range variables { - _ = c.CompileAndStoreVariable(v, options, mode) - } -} - -func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult { - result := c.Compiler.CompileCELExpression(variable, options, mode) - c.CompositionEnv.AddField(variable.GetName(), result.OutputType) - c.CompositionEnv.CompiledVariables[variable.GetName()] = result - return result -} - -func (c *CompositedCompiler) Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter { - filter := c.FilterCompiler.Compile(expressions, optionalDecls, envType) - return &CompositedFilter{ - Filter: filter, - compositionEnv: c.CompositionEnv, - } -} - -type CompositionEnv struct { - *environment.EnvSet - - MapType *apiservercel.DeclType - CompiledVariables map[string]CompilationResult -} - -func (c *CompositionEnv) AddField(name string, celType *cel.Type) { - c.MapType.Fields[name] = apiservercel.NewDeclField(name, convertCelTypeToDeclType(celType), true, nil, nil) -} - -func NewCompositionEnv(typeName string, baseEnvSet *environment.EnvSet) (*CompositionEnv, error) { - declType := apiservercel.NewObjectType(typeName, map[string]*apiservercel.DeclField{}) - envSet, err := baseEnvSet.Extend(environment.VersionedOptions{ - // set to 1.0 because composition is one of the fundamental components - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: []cel.EnvOption{ - cel.Variable("variables", declType.CelType()), - }, - DeclTypes: []*apiservercel.DeclType{ - declType, - }, - }) - if err != nil { - return nil, err - } - return &CompositionEnv{ - MapType: declType, - EnvSet: envSet, - CompiledVariables: map[string]CompilationResult{}, - }, nil -} - -func (c *CompositionEnv) CreateContext(parent context.Context) CompositionContext { - return &compositionContext{ - Context: parent, - compositionEnv: c, - } -} - -type CompositionContext interface { - context.Context - Variables(activation any) ref.Val - GetAndResetCost() int64 -} - -type compositionContext struct { - context.Context - - compositionEnv *CompositionEnv - accumulatedCost int64 -} - -func (c *compositionContext) Variables(activation any) ref.Val { - lazyMap := lazy.NewMapValue(c.compositionEnv.MapType) - for name, result := range c.compositionEnv.CompiledVariables { - accessor := &variableAccessor{ - name: name, - result: result, - activation: activation, - context: c, - } - lazyMap.Append(name, accessor.Callback) - } - return lazyMap -} - -func (f *CompositedFilter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { - ctx = f.compositionEnv.CreateContext(ctx) - return f.Filter.ForInput(ctx, versionedAttr, request, optionalVars, namespace, runtimeCELCostBudget) -} - -func (c *compositionContext) reportCost(cost int64) { - c.accumulatedCost += cost -} - -func (c *compositionContext) GetAndResetCost() int64 { - cost := c.accumulatedCost - c.accumulatedCost = 0 - return cost -} - -type variableAccessor struct { - name string - result CompilationResult - activation any - context *compositionContext -} - -func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { - if a.result.Error != nil { - return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error) - } - - v, details, err := a.result.Program.ContextEval(a.context, a.activation) - if details == nil { - return types.NewErr("unable to get evaluation details of variable %q", a.name) - } - costPtr := details.ActualCost() - if costPtr == nil { - return types.NewErr("unable to calculate cost of variable %q", a.name) - } - cost := int64(*costPtr) - if *costPtr > math.MaxInt64 { - cost = math.MaxInt64 - } - a.context.reportCost(cost) - - if err != nil { - return types.NewErr("composited variable %q fails to evaluate: %v", a.name, err) - } - return v -} - -// convertCelTypeToDeclType converts a cel.Type to DeclType, for the use of -// the TypeProvider and the cost estimator. -// List and map types are created on-demand with their parameters converted recursively. -func convertCelTypeToDeclType(celType *cel.Type) *apiservercel.DeclType { - if celType == nil { - return apiservercel.DynType - } - switch celType { - case cel.AnyType: - return apiservercel.AnyType - case cel.BoolType: - return apiservercel.BoolType - case cel.BytesType: - return apiservercel.BytesType - case cel.DoubleType: - return apiservercel.DoubleType - case cel.DurationType: - return apiservercel.DurationType - case cel.IntType: - return apiservercel.IntType - case cel.NullType: - return apiservercel.NullType - case cel.StringType: - return apiservercel.StringType - case cel.TimestampType: - return apiservercel.TimestampType - case cel.UintType: - return apiservercel.UintType - default: - if celType.HasTrait(traits.ContainerType) && celType.HasTrait(traits.IndexerType) { - parameters := celType.Parameters() - switch len(parameters) { - case 1: - elemType := convertCelTypeToDeclType(parameters[0]) - return apiservercel.NewListType(elemType, -1) - case 2: - keyType := convertCelTypeToDeclType(parameters[0]) - valueType := convertCelTypeToDeclType(parameters[1]) - return apiservercel.NewMapType(keyType, valueType, -1) - } - } - return apiservercel.DynType - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go deleted file mode 100644 index 3e2a63e75..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "fmt" - "math" - "reflect" - "time" - - "github.com/google/cel-go/interpreter" - - admissionv1 "k8s.io/api/admission/v1" - authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/cel/library" -) - -// filterCompiler implement the interface FilterCompiler. -type filterCompiler struct { - compiler Compiler -} - -func NewFilterCompiler(env *environment.EnvSet) FilterCompiler { - return &filterCompiler{compiler: NewCompiler(env)} -} - -type evaluationActivation struct { - object, oldObject, params, request, namespace, authorizer, requestResourceAuthorizer, variables interface{} -} - -// ResolveName returns a value from the activation by qualified name, or false if the name -// could not be found. -func (a *evaluationActivation) ResolveName(name string) (interface{}, bool) { - switch name { - case ObjectVarName: - return a.object, true - case OldObjectVarName: - return a.oldObject, true - case ParamsVarName: - return a.params, true // params may be null - case RequestVarName: - return a.request, true - case NamespaceVarName: - return a.namespace, true - case AuthorizerVarName: - return a.authorizer, a.authorizer != nil - case RequestResourceAuthorizerVarName: - return a.requestResourceAuthorizer, a.requestResourceAuthorizer != nil - case VariableVarName: // variables always present - return a.variables, true - default: - return nil, false - } -} - -// Parent returns the parent of the current activation, may be nil. -// If non-nil, the parent will be searched during resolve calls. -func (a *evaluationActivation) Parent() interpreter.Activation { - return nil -} - -// Compile compiles the cel expressions defined in the ExpressionAccessors into a Filter -func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) Filter { - compilationResults := make([]CompilationResult, len(expressionAccessors)) - for i, expressionAccessor := range expressionAccessors { - if expressionAccessor == nil { - continue - } - compilationResults[i] = c.compiler.CompileCELExpression(expressionAccessor, options, mode) - } - return NewFilter(compilationResults) -} - -// filter implements the Filter interface -type filter struct { - compilationResults []CompilationResult -} - -func NewFilter(compilationResults []CompilationResult) Filter { - return &filter{ - compilationResults, - } -} - -func convertObjectToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { - if obj == nil || reflect.ValueOf(obj).IsNil() { - return &unstructured.Unstructured{Object: nil}, nil - } - ret, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err - } - return &unstructured.Unstructured{Object: ret}, nil -} - -func objectToResolveVal(r runtime.Object) (interface{}, error) { - if r == nil || reflect.ValueOf(r).IsNil() { - return nil, nil - } - v, err := convertObjectToUnstructured(r) - if err != nil { - return nil, err - } - return v.Object, nil -} - -// ForInput evaluates the compiled CEL expressions converting them into CELEvaluations -// errors per evaluation are returned on the Evaluation object -// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. -func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) { - // TODO: replace unstructured with ref.Val for CEL variables when native type support is available - evaluations := make([]EvaluationResult, len(f.compilationResults)) - var err error - - oldObjectVal, err := objectToResolveVal(versionedAttr.VersionedOldObject) - if err != nil { - return nil, -1, err - } - objectVal, err := objectToResolveVal(versionedAttr.VersionedObject) - if err != nil { - return nil, -1, err - } - var paramsVal, authorizerVal, requestResourceAuthorizerVal any - if inputs.VersionedParams != nil { - paramsVal, err = objectToResolveVal(inputs.VersionedParams) - if err != nil { - return nil, -1, err - } - } - - if inputs.Authorizer != nil { - authorizerVal = library.NewAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer) - requestResourceAuthorizerVal = library.NewResourceAuthorizerVal(versionedAttr.GetUserInfo(), inputs.Authorizer, versionedAttr) - } - - requestVal, err := convertObjectToUnstructured(request) - if err != nil { - return nil, -1, err - } - namespaceVal, err := objectToResolveVal(namespace) - if err != nil { - return nil, -1, err - } - va := &evaluationActivation{ - object: objectVal, - oldObject: oldObjectVal, - params: paramsVal, - request: requestVal.Object, - namespace: namespaceVal, - authorizer: authorizerVal, - requestResourceAuthorizer: requestResourceAuthorizerVal, - } - - // composition is an optional feature that only applies for ValidatingAdmissionPolicy. - // check if the context allows composition - var compositionCtx CompositionContext - var ok bool - if compositionCtx, ok = ctx.(CompositionContext); ok { - va.variables = compositionCtx.Variables(va) - } - - remainingBudget := runtimeCELCostBudget - for i, compilationResult := range f.compilationResults { - var evaluation = &evaluations[i] - if compilationResult.ExpressionAccessor == nil { // in case of placeholder - continue - } - evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor - if compilationResult.Error != nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("compilation error: %v", compilationResult.Error), - } - continue - } - if compilationResult.Program == nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInternal, - Detail: fmt.Sprintf("unexpected internal error compiling expression"), - } - continue - } - t1 := time.Now() - evalResult, evalDetails, err := compilationResult.Program.ContextEval(ctx, va) - // budget may be spent due to lazy evaluation of composited variables - if compositionCtx != nil { - compositionCost := compositionCtx.GetAndResetCost() - if compositionCost > remainingBudget { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), - } - } - remainingBudget -= compositionCost - } - elapsed := time.Since(t1) - evaluation.Elapsed = elapsed - if evalDetails == nil { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInternal, - Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), - } - } else { - rtCost := evalDetails.ActualCost() - if rtCost == nil { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()), - } - } else { - if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget { - return nil, -1, &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"), - } - } - remainingBudget -= int64(*rtCost) - } - } - if err != nil { - evaluation.Error = &cel.Error{ - Type: cel.ErrorTypeInvalid, - Detail: fmt.Sprintf("expression '%v' resulted in error: %v", compilationResult.ExpressionAccessor.GetExpression(), err), - } - } else { - evaluation.EvalResult = evalResult - } - } - - return evaluations, remainingBudget, nil -} - -// TODO: to reuse https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go#L154 -func CreateAdmissionRequest(attr admission.Attributes, equivalentGVR metav1.GroupVersionResource, equivalentKind metav1.GroupVersionKind) *admissionv1.AdmissionRequest { - // Attempting to use same logic as webhook for constructing resource - // GVK, GVR, subresource - // Use the GVK, GVR that the matcher decided was equivalent to that of the request - // https://github.com/kubernetes/kubernetes/blob/90c362b3430bcbbf8f245fadbcd521dab39f1d7c/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go#L182-L210 - gvk := equivalentKind - gvr := equivalentGVR - subresource := attr.GetSubresource() - - requestGVK := attr.GetKind() - requestGVR := attr.GetResource() - requestSubResource := attr.GetSubresource() - - aUserInfo := attr.GetUserInfo() - var userInfo authenticationv1.UserInfo - if aUserInfo != nil { - userInfo = authenticationv1.UserInfo{ - Extra: make(map[string]authenticationv1.ExtraValue), - Groups: aUserInfo.GetGroups(), - UID: aUserInfo.GetUID(), - Username: aUserInfo.GetName(), - } - // Convert the extra information in the user object - for key, val := range aUserInfo.GetExtra() { - userInfo.Extra[key] = authenticationv1.ExtraValue(val) - } - } - - dryRun := attr.IsDryRun() - - return &admissionv1.AdmissionRequest{ - Kind: metav1.GroupVersionKind{ - Group: gvk.Group, - Kind: gvk.Kind, - Version: gvk.Version, - }, - Resource: metav1.GroupVersionResource{ - Group: gvr.Group, - Resource: gvr.Resource, - Version: gvr.Version, - }, - SubResource: subresource, - RequestKind: &metav1.GroupVersionKind{ - Group: requestGVK.Group, - Kind: requestGVK.Kind, - Version: requestGVK.Version, - }, - RequestResource: &metav1.GroupVersionResource{ - Group: requestGVR.Group, - Resource: requestGVR.Resource, - Version: requestGVR.Version, - }, - RequestSubResource: requestSubResource, - Name: attr.GetName(), - Namespace: attr.GetNamespace(), - Operation: admissionv1.Operation(attr.GetOperation()), - UserInfo: userInfo, - // Leave Object and OldObject unset since we don't provide access to them via request - DryRun: &dryRun, - Options: runtime.RawExtension{ - Object: attr.GetOperationOptions(), - }, - } -} - -// CreateNamespaceObject creates a Namespace object that is suitable for the CEL evaluation. -// If the namespace is nil, CreateNamespaceObject returns nil -func CreateNamespaceObject(namespace *v1.Namespace) *v1.Namespace { - if namespace == nil { - return nil - } - - return &v1.Namespace{ - Status: namespace.Status, - Spec: namespace.Spec, - ObjectMeta: metav1.ObjectMeta{ - Name: namespace.Name, - GenerateName: namespace.GenerateName, - Namespace: namespace.Namespace, - UID: namespace.UID, - ResourceVersion: namespace.ResourceVersion, - Generation: namespace.Generation, - CreationTimestamp: namespace.CreationTimestamp, - DeletionTimestamp: namespace.DeletionTimestamp, - DeletionGracePeriodSeconds: namespace.DeletionGracePeriodSeconds, - Labels: namespace.Labels, - Annotations: namespace.Annotations, - Finalizers: namespace.Finalizers, - }, - } -} - -// CompilationErrors returns a list of all the errors from the compilation of the evaluator -func (e *filter) CompilationErrors() []error { - compilationErrors := []error{} - for _, result := range e.compilationResults { - if result.Error != nil { - compilationErrors = append(compilationErrors, result.Error) - } - } - return compilationErrors -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go deleted file mode 100644 index ae61dc826..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "time" - - "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types/ref" - - v1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/cel/environment" -) - -type ExpressionAccessor interface { - GetExpression() string - ReturnTypes() []*cel.Type -} - -// NamedExpressionAccessor extends NamedExpressionAccessor with a name. -type NamedExpressionAccessor interface { - ExpressionAccessor - - GetName() string // follows the naming convention of ExpressionAccessor -} - -// EvaluationResult contains the minimal required fields and metadata of a cel evaluation -type EvaluationResult struct { - EvalResult ref.Val - ExpressionAccessor ExpressionAccessor - Elapsed time.Duration - Error error -} - -// OptionalVariableDeclarations declares which optional CEL variables -// are declared for an expression. -type OptionalVariableDeclarations struct { - // HasParams specifies if the "params" variable is declared. - // The "params" variable may still be bound to "null" when declared. - HasParams bool - // HasAuthorizer specifies if the "authorizer" and "authorizer.requestResource" - // variables are declared. When declared, the authorizer variables are - // expected to be non-null. - HasAuthorizer bool - // StrictCost specifies if the CEL cost limitation is strict for extended libraries as well as native libraries. - StrictCost bool -} - -// FilterCompiler contains a function to assist with converting types and values to/from CEL-typed values. -type FilterCompiler interface { - // Compile is used for the cel expression compilation - Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter -} - -// OptionalVariableBindings provides expression bindings for optional CEL variables. -type OptionalVariableBindings struct { - // VersionedParams provides the "params" variable binding. This variable binding may - // be set to nil even when OptionalVariableDeclarations.HashParams is set to true. - VersionedParams runtime.Object - // Authorizer provides the authorizer used for the "authorizer" and - // "authorizer.requestResource" variable bindings. If the expression was compiled with - // OptionalVariableDeclarations.HasAuthorizer set to true this must be non-nil. - Authorizer authorizer.Authorizer -} - -// Filter contains a function to evaluate compiled CEL-typed values -// It expects the inbound object to already have been converted to the version expected -// by the underlying CEL code (which is indicated by the match criteria of a policy definition). -// versionedParams may be nil. -type Filter interface { - // ForInput converts compiled CEL-typed values into evaluated CEL-typed value. - // runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. - // If cost budget is calculated, the filter should return the remaining budget. - ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) - - // CompilationErrors returns a list of errors from the compilation of the evaluator - CompilationErrors() []error -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go deleted file mode 100644 index 85b18612f..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/accessor.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/types" -) - -type PolicyAccessor interface { - GetName() string - GetNamespace() string - GetParamKind() *v1.ParamKind - GetMatchConstraints() *v1.MatchResources -} - -type BindingAccessor interface { - GetName() string - GetNamespace() string - - // GetPolicyName returns the name of the (Validating/Mutating)AdmissionPolicy, - // which is cluster-scoped, so namespace is usually left blank. - // But we leave the door open to add a namespaced vesion in the future - GetPolicyName() types.NamespacedName - GetParamRef() *v1.ParamRef - - GetMatchResources() *v1.MatchResources -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go deleted file mode 100644 index d4dbfb0aa..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/interfaces.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - - "k8s.io/apiserver/pkg/admission" -) - -// Hook represents a dynamic admission hook. The hook may be a webhook or a -// policy. For webhook, the Hook may describe how to contact the endpoint, expected -// cert, etc. For policies, the hook may describe a compiled policy-binding pair. -type Hook interface { - // All hooks are expected to contain zero or more match conditions, object - // selectors, namespace selectors to help the dispatcher decide when to apply - // the hook. - // - // Methods of matching logic is applied are specific to the hook and left up - // to the implementation. -} - -// Source can list dynamic admission plugins. -type Source[H Hook] interface { - // Hooks returns the list of currently known admission hooks. - Hooks() []H - - // Run the source. This method should be called only once at startup. - Run(ctx context.Context) error - - // HasSynced returns true if the source has completed its initial sync. - HasSynced() bool -} - -// Dispatcher dispatches evaluates an admission request against the currently -// active hooks returned by the source. -type Dispatcher[H Hook] interface { - // Dispatch a request to the policies. Dispatcher may choose not to - // call a hook, either because the rules of the hook does not match, or - // the namespaceSelector or the objectSelector of the hook does not - // match. A non-nil error means the request is rejected. - Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []H) error -} - -// An evaluator represents a compiled CEL expression that can be evaluated a -// given a set of inputs used by the generic PolicyHook for Mutating and -// ValidatingAdmissionPolicy. -// Mutating and Validating may have different forms of evaluators -type Evaluator interface { -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go deleted file mode 100644 index ed1c621bc..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/plugin.go +++ /dev/null @@ -1,215 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - "errors" - "fmt" - - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/initializer" - "k8s.io/apiserver/pkg/admission/plugin/policy/matching" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" -) - -// H is the Hook type generated by the source and consumed by the dispatcher. -type sourceFactory[H any] func(informers.SharedInformerFactory, kubernetes.Interface, dynamic.Interface, meta.RESTMapper) Source[H] -type dispatcherFactory[H any] func(authorizer.Authorizer, *matching.Matcher) Dispatcher[H] - -// admissionResources is the list of resources related to CEL-based admission -// features. -var admissionResources = []schema.GroupResource{ - {Group: admissionregistrationv1.GroupName, Resource: "validatingadmissionpolicies"}, - {Group: admissionregistrationv1.GroupName, Resource: "validatingadmissionpolicybindings"}, - {Group: admissionregistrationv1.GroupName, Resource: "mutatingadmissionpolicies"}, - {Group: admissionregistrationv1.GroupName, Resource: "mutatingadmissionpolicybindings"}, -} - -// AdmissionPolicyManager is an abstract admission plugin with all the -// infrastructure to define Admit or Validate on-top. -type Plugin[H any] struct { - *admission.Handler - - sourceFactory sourceFactory[H] - dispatcherFactory dispatcherFactory[H] - - source Source[H] - dispatcher Dispatcher[H] - matcher *matching.Matcher - - informerFactory informers.SharedInformerFactory - client kubernetes.Interface - restMapper meta.RESTMapper - dynamicClient dynamic.Interface - excludedResources sets.Set[schema.GroupResource] - stopCh <-chan struct{} - authorizer authorizer.Authorizer - enabled bool -} - -var ( - _ initializer.WantsExternalKubeInformerFactory = &Plugin[any]{} - _ initializer.WantsExternalKubeClientSet = &Plugin[any]{} - _ initializer.WantsRESTMapper = &Plugin[any]{} - _ initializer.WantsDynamicClient = &Plugin[any]{} - _ initializer.WantsDrainedNotification = &Plugin[any]{} - _ initializer.WantsAuthorizer = &Plugin[any]{} - _ initializer.WantsExcludedAdmissionResources = &Plugin[any]{} - _ admission.InitializationValidator = &Plugin[any]{} -) - -func NewPlugin[H any]( - handler *admission.Handler, - sourceFactory sourceFactory[H], - dispatcherFactory dispatcherFactory[H], -) *Plugin[H] { - return &Plugin[H]{ - Handler: handler, - sourceFactory: sourceFactory, - dispatcherFactory: dispatcherFactory, - - // always exclude admission/mutating policies and bindings - excludedResources: sets.New(admissionResources...), - } -} - -func (c *Plugin[H]) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { - c.informerFactory = f -} - -func (c *Plugin[H]) SetExternalKubeClientSet(client kubernetes.Interface) { - c.client = client -} - -func (c *Plugin[H]) SetRESTMapper(mapper meta.RESTMapper) { - c.restMapper = mapper -} - -func (c *Plugin[H]) SetDynamicClient(client dynamic.Interface) { - c.dynamicClient = client -} - -func (c *Plugin[H]) SetDrainedNotification(stopCh <-chan struct{}) { - c.stopCh = stopCh -} - -func (c *Plugin[H]) SetAuthorizer(authorizer authorizer.Authorizer) { - c.authorizer = authorizer -} - -func (c *Plugin[H]) SetMatcher(matcher *matching.Matcher) { - c.matcher = matcher -} - -func (c *Plugin[H]) SetEnabled(enabled bool) { - c.enabled = enabled -} - -func (c *Plugin[H]) SetExcludedAdmissionResources(excludedResources []schema.GroupResource) { - c.excludedResources.Insert(excludedResources...) -} - -// ValidateInitialization - once clientset and informer factory are provided, creates and starts the admission controller -func (c *Plugin[H]) ValidateInitialization() error { - // By default enabled is set to false. It is up to types which embed this - // struct to set it to true (if feature gate is enabled, or other conditions) - if !c.enabled { - return nil - } - if c.Handler == nil { - return errors.New("missing handler") - } - if c.informerFactory == nil { - return errors.New("missing informer factory") - } - if c.client == nil { - return errors.New("missing kubernetes client") - } - if c.restMapper == nil { - return errors.New("missing rest mapper") - } - if c.dynamicClient == nil { - return errors.New("missing dynamic client") - } - if c.stopCh == nil { - return errors.New("missing stop channel") - } - if c.authorizer == nil { - return errors.New("missing authorizer") - } - - // Use default matcher - namespaceInformer := c.informerFactory.Core().V1().Namespaces() - c.matcher = matching.NewMatcher(namespaceInformer.Lister(), c.client) - - if err := c.matcher.ValidateInitialization(); err != nil { - return err - } - - c.source = c.sourceFactory(c.informerFactory, c.client, c.dynamicClient, c.restMapper) - c.dispatcher = c.dispatcherFactory(c.authorizer, c.matcher) - - pluginContext, pluginContextCancel := context.WithCancel(context.Background()) - go func() { - defer pluginContextCancel() - <-c.stopCh - }() - - go func() { - err := c.source.Run(pluginContext) - if err != nil && !errors.Is(err, context.Canceled) { - utilruntime.HandleError(fmt.Errorf("policy source context unexpectedly closed: %v", err)) - } - }() - - c.SetReadyFunc(func() bool { - return namespaceInformer.Informer().HasSynced() && c.source.HasSynced() - }) - return nil -} - -func (c *Plugin[H]) Dispatch( - ctx context.Context, - a admission.Attributes, - o admission.ObjectInterfaces, -) (err error) { - if !c.enabled { - return nil - } else if c.shouldIgnoreResource(a) { - return nil - } else if !c.WaitForReady() { - return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request")) - } - - return c.dispatcher.Dispatch(ctx, a, o, c.source.Hooks()) -} - -func (c *Plugin[H]) shouldIgnoreResource(attr admission.Attributes) bool { - gvr := attr.GetResource() - // exclusion decision ignores the version. - gr := gvr.GroupResource() - return c.excludedResources.Has(gr) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go deleted file mode 100644 index 62ed7bc6c..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_dispatcher.go +++ /dev/null @@ -1,354 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - "errors" - "fmt" - "time" - - "k8s.io/api/admissionregistration/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/policy/matching" - webhookgeneric "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" -) - -// A policy invocation is a single policy-binding-param tuple from a Policy Hook -// in the context of a specific request. The params have already been resolved -// and any error in configuration or setting up the invocation is stored in -// the Error field. -type PolicyInvocation[P runtime.Object, B runtime.Object, E Evaluator] struct { - // Relevant policy for this hook. - // This field is always populated - Policy P - - // Matched Kind for the request given the policy's matchconstraints - // May be empty if there was an error matching the resource - Kind schema.GroupVersionKind - - // Matched Resource for the request given the policy's matchconstraints - // May be empty if there was an error matching the resource - Resource schema.GroupVersionResource - - // Relevant binding for this hook. - // May be empty if there was an error with the policy's configuration itself - Binding B - - // Compiled policy evaluator - Evaluator E - - // Params fetched by the binding to use to evaluate the policy - Param runtime.Object - - // Error is set if there was an error with the policy or binding or its - // params, etc - Error error -} - -// dispatcherDelegate is called during a request with a pre-filtered list -// of (Policy, Binding, Param) tuples that are active and match the request. -// The dispatcher delegate is responsible for updating the object on the -// admission attributes in the case of mutation, or returning a status error in -// the case of validation. -// -// The delegate provides the "validation" or "mutation" aspect of dispatcher functionality -// (in contrast to generic.PolicyDispatcher which only selects active policies and params) -type dispatcherDelegate[P, B runtime.Object, E Evaluator] func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, versionedAttributes webhookgeneric.VersionedAttributeAccessor, invocations []PolicyInvocation[P, B, E]) error - -type policyDispatcher[P runtime.Object, B runtime.Object, E Evaluator] struct { - newPolicyAccessor func(P) PolicyAccessor - newBindingAccessor func(B) BindingAccessor - matcher PolicyMatcher - delegate dispatcherDelegate[P, B, E] -} - -func NewPolicyDispatcher[P runtime.Object, B runtime.Object, E Evaluator]( - newPolicyAccessor func(P) PolicyAccessor, - newBindingAccessor func(B) BindingAccessor, - matcher *matching.Matcher, - delegate dispatcherDelegate[P, B, E], -) Dispatcher[PolicyHook[P, B, E]] { - return &policyDispatcher[P, B, E]{ - newPolicyAccessor: newPolicyAccessor, - newBindingAccessor: newBindingAccessor, - matcher: NewPolicyMatcher(matcher), - delegate: delegate, - } -} - -// Dispatch implements generic.Dispatcher. It loops through all active hooks -// (policy x binding pairs) and selects those which are active for the current -// request. It then resolves all params and creates an Invocation for each -// matching policy-binding-param tuple. The delegate is then called with the -// list of tuples. -// -// Note: MatchConditions expressions are not evaluated here. The dispatcher delegate -// is expected to ignore the result of any policies whose match conditions dont pass. -// This may be possible to refactor so matchconditions are checked here instead. -func (d *policyDispatcher[P, B, E]) Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []PolicyHook[P, B, E]) error { - var relevantHooks []PolicyInvocation[P, B, E] - // Construct all the versions we need to call our webhooks - versionedAttrAccessor := &versionedAttributeAccessor{ - versionedAttrs: map[schema.GroupVersionKind]*admission.VersionedAttributes{}, - attr: a, - objectInterfaces: o, - } - - for _, hook := range hooks { - policyAccessor := d.newPolicyAccessor(hook.Policy) - matches, matchGVR, matchGVK, err := d.matcher.DefinitionMatches(a, o, policyAccessor) - if err != nil { - // There was an error evaluating if this policy matches anything. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Error: err, - }) - continue - } else if !matches { - continue - } else if hook.ConfigurationError != nil { - // The policy matches but there is a configuration error with the - // policy itself - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Error: hook.ConfigurationError, - Resource: matchGVR, - Kind: matchGVK, - }) - utilruntime.HandleError(hook.ConfigurationError) - continue - } - - for _, binding := range hook.Bindings { - bindingAccessor := d.newBindingAccessor(binding) - matches, err = d.matcher.BindingMatches(a, o, bindingAccessor) - if err != nil { - // There was an error evaluating if this binding matches anything. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Binding: binding, - Error: err, - Resource: matchGVR, - Kind: matchGVK, - }) - continue - } else if !matches { - continue - } - - // Collect params for this binding - params, err := CollectParams( - policyAccessor.GetParamKind(), - hook.ParamInformer, - hook.ParamScope, - bindingAccessor.GetParamRef(), - a.GetNamespace(), - ) - if err != nil { - // There was an error collecting params for this binding. - utilruntime.HandleError(err) - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Binding: binding, - Error: err, - Resource: matchGVR, - Kind: matchGVK, - }) - continue - } - - // If params is empty and there was no error, that means that - // ParamNotFoundAction is ignore, so it shouldnt be added to list - for _, param := range params { - relevantHooks = append(relevantHooks, PolicyInvocation[P, B, E]{ - Policy: hook.Policy, - Binding: binding, - Kind: matchGVK, - Resource: matchGVR, - Param: param, - Evaluator: hook.Evaluator, - }) - } - - // VersionedAttr result will be cached and reused later during parallel - // hook calls - _, err = versionedAttrAccessor.VersionedAttribute(matchGVK) - if err != nil { - return apierrors.NewInternalError(err) - } - } - - } - - if len(relevantHooks) == 0 { - // no matching hooks - return nil - } - - return d.delegate(ctx, a, o, versionedAttrAccessor, relevantHooks) -} - -// Returns params to use to evaluate a policy-binding with given param -// configuration. If the policy-binding has no param configuration, it -// returns a single-element list with a nil param. -func CollectParams( - paramKind *v1.ParamKind, - paramInformer informers.GenericInformer, - paramScope meta.RESTScope, - paramRef *v1.ParamRef, - namespace string, -) ([]runtime.Object, error) { - // If definition has paramKind, paramRef is required in binding. - // If definition has no paramKind, paramRef set in binding will be ignored. - var params []runtime.Object - var paramStore cache.GenericNamespaceLister - - // Make sure the param kind is ready to use - if paramKind != nil && paramRef != nil { - if paramInformer == nil { - return nil, fmt.Errorf("paramKind kind `%v` not known", - paramKind.String()) - } - - // Set up cluster-scoped, or namespaced access to the params - // "default" if not provided, and paramKind is namespaced - paramStore = paramInformer.Lister() - if paramScope.Name() == meta.RESTScopeNameNamespace { - paramsNamespace := namespace - if len(paramRef.Namespace) > 0 { - paramsNamespace = paramRef.Namespace - } else if len(paramsNamespace) == 0 { - // You must supply namespace if your matcher can possibly - // match a cluster-scoped resource - return nil, fmt.Errorf("cannot use namespaced paramRef in policy binding that matches cluster-scoped resources") - } - - paramStore = paramInformer.Lister().ByNamespace(paramsNamespace) - } - - // If the param informer for this admission policy has not yet - // had time to perform an initial listing, don't attempt to use - // it. - timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - if !cache.WaitForCacheSync(timeoutCtx.Done(), paramInformer.Informer().HasSynced) { - return nil, fmt.Errorf("paramKind kind `%v` not yet synced to use for admission", - paramKind.String()) - } - } - - // Find params to use with policy - switch { - case paramKind == nil: - // ParamKind is unset. Ignore any globalParamRef or namespaceParamRef - // setting. - return []runtime.Object{nil}, nil - case paramRef == nil: - // Policy ParamKind is set, but binding does not use it. - // Validate with nil params - return []runtime.Object{nil}, nil - case len(paramRef.Namespace) > 0 && paramScope.Name() == meta.RESTScopeRoot.Name(): - // Not allowed to set namespace for cluster-scoped param - return nil, fmt.Errorf("paramRef.namespace must not be provided for a cluster-scoped `paramKind`") - - case len(paramRef.Name) > 0: - if paramRef.Selector != nil { - // This should be validated, but just in case. - return nil, fmt.Errorf("paramRef.name and paramRef.selector are mutually exclusive") - } - - switch param, err := paramStore.Get(paramRef.Name); { - case err == nil: - params = []runtime.Object{param} - case apierrors.IsNotFound(err): - // Param not yet available. User may need to wait a bit - // before being able to use it for validation. - // - // Set params to nil to prepare for not found action - params = nil - case apierrors.IsInvalid(err): - // Param mis-configured - // require to set namespace for namespaced resource - // and unset namespace for cluster scoped resource - return nil, err - default: - // Internal error - utilruntime.HandleError(err) - return nil, err - } - case paramRef.Selector != nil: - // Select everything by default if empty name and selector - selector, err := metav1.LabelSelectorAsSelector(paramRef.Selector) - if err != nil { - // Cannot parse label selector: configuration error - return nil, err - - } - - paramList, err := paramStore.List(selector) - if err != nil { - // There was a bad internal error - utilruntime.HandleError(err) - return nil, err - } - - // Successfully grabbed params - params = paramList - default: - // Should be unreachable due to validation - return nil, fmt.Errorf("one of name or selector must be provided") - } - - // Apply fail action for params not found case - if len(params) == 0 && paramRef.ParameterNotFoundAction != nil && *paramRef.ParameterNotFoundAction == v1.DenyAction { - return nil, errors.New("no params found for policy binding with `Deny` parameterNotFoundAction") - } - - return params, nil -} - -var _ webhookgeneric.VersionedAttributeAccessor = &versionedAttributeAccessor{} - -type versionedAttributeAccessor struct { - versionedAttrs map[schema.GroupVersionKind]*admission.VersionedAttributes - attr admission.Attributes - objectInterfaces admission.ObjectInterfaces -} - -func (v *versionedAttributeAccessor) VersionedAttribute(gvk schema.GroupVersionKind) (*admission.VersionedAttributes, error) { - if val, ok := v.versionedAttrs[gvk]; ok { - return val, nil - } - versionedAttr, err := admission.NewVersionedAttributes(v.attr, gvk, v.objectInterfaces) - if err != nil { - return nil, err - } - v.versionedAttrs[gvk] = versionedAttr - return versionedAttr, nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_matcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_matcher.go deleted file mode 100644 index d243b0710..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_matcher.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "fmt" - - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/policy/matching" -) - -// Matcher is used for matching ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding to attributes -type PolicyMatcher interface { - admission.InitializationValidator - - // DefinitionMatches says whether this policy definition matches the provided admission - // resource request - DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition PolicyAccessor) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) - - // BindingMatches says whether this policy definition matches the provided admission - // resource request - BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, binding BindingAccessor) (bool, error) - - // GetNamespace retrieves the Namespace resource by the given name. The name may be empty, in which case - // GetNamespace must return nil, nil - GetNamespace(name string) (*corev1.Namespace, error) -} - -type matcher struct { - Matcher *matching.Matcher -} - -func NewPolicyMatcher(m *matching.Matcher) PolicyMatcher { - return &matcher{ - Matcher: m, - } -} - -// ValidateInitialization checks if Matcher is initialized. -func (c *matcher) ValidateInitialization() error { - return c.Matcher.ValidateInitialization() -} - -// DefinitionMatches returns whether this ValidatingAdmissionPolicy matches the provided admission resource request -func (c *matcher) DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition PolicyAccessor) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) { - constraints := definition.GetMatchConstraints() - if constraints == nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, fmt.Errorf("policy contained no match constraints, a required field") - } - criteria := matchCriteria{constraints: constraints} - return c.Matcher.Matches(a, o, &criteria) -} - -// BindingMatches returns whether this ValidatingAdmissionPolicyBinding matches the provided admission resource request -func (c *matcher) BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, binding BindingAccessor) (bool, error) { - matchResources := binding.GetMatchResources() - if matchResources == nil { - return true, nil - } - - criteria := matchCriteria{constraints: matchResources} - isMatch, _, _, err := c.Matcher.Matches(a, o, &criteria) - return isMatch, err -} - -func (c *matcher) GetNamespace(name string) (*corev1.Namespace, error) { - return c.Matcher.GetNamespace(name) -} - -var _ matching.MatchCriteria = &matchCriteria{} - -type matchCriteria struct { - constraints *admissionregistrationv1.MatchResources -} - -// GetParsedNamespaceSelector returns the converted LabelSelector which implements labels.Selector -func (m *matchCriteria) GetParsedNamespaceSelector() (labels.Selector, error) { - return metav1.LabelSelectorAsSelector(m.constraints.NamespaceSelector) -} - -// GetParsedObjectSelector returns the converted LabelSelector which implements labels.Selector -func (m *matchCriteria) GetParsedObjectSelector() (labels.Selector, error) { - return metav1.LabelSelectorAsSelector(m.constraints.ObjectSelector) -} - -// GetMatchResources returns the matchConstraints -func (m *matchCriteria) GetMatchResources() admissionregistrationv1.MatchResources { - return *m.constraints -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go deleted file mode 100644 index 9b2e2146a..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_source.go +++ /dev/null @@ -1,477 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - goerrors "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" -) - -type policySource[P runtime.Object, B runtime.Object, E Evaluator] struct { - ctx context.Context - policyInformer generic.Informer[P] - bindingInformer generic.Informer[B] - restMapper meta.RESTMapper - newPolicyAccessor func(P) PolicyAccessor - newBindingAccessor func(B) BindingAccessor - - informerFactory informers.SharedInformerFactory - dynamicClient dynamic.Interface - - compiler func(P) E - - // Currently compiled list of valid/active policy-binding pairs - policies atomic.Pointer[[]PolicyHook[P, B, E]] - // Whether the cache of policies is dirty and needs to be recompiled - policiesDirty atomic.Bool - - lock sync.Mutex - compiledPolicies map[types.NamespacedName]compiledPolicyEntry[E] - - // Temporary until we use the dynamic informer factory - paramsCRDControllers map[schema.GroupVersionKind]*paramInfo -} - -type paramInfo struct { - mapping meta.RESTMapping - - // When the param is changed, or the informer is done being used, the cancel - // func should be called to stop/cleanup the original informer - cancelFunc func() - - // The lister for this param - informer informers.GenericInformer -} - -type compiledPolicyEntry[E Evaluator] struct { - policyVersion string - evaluator E -} - -type PolicyHook[P runtime.Object, B runtime.Object, E Evaluator] struct { - Policy P - Bindings []B - - // ParamInformer is the informer for the param CRD for this policy, or nil if - // there is no param or if there was a configuration error - ParamInformer informers.GenericInformer - ParamScope meta.RESTScope - - Evaluator E - ConfigurationError error -} - -var _ Source[PolicyHook[runtime.Object, runtime.Object, Evaluator]] = &policySource[runtime.Object, runtime.Object, Evaluator]{} - -func NewPolicySource[P runtime.Object, B runtime.Object, E Evaluator]( - policyInformer cache.SharedIndexInformer, - bindingInformer cache.SharedIndexInformer, - newPolicyAccessor func(P) PolicyAccessor, - newBindingAccessor func(B) BindingAccessor, - compiler func(P) E, - paramInformerFactory informers.SharedInformerFactory, - dynamicClient dynamic.Interface, - restMapper meta.RESTMapper, -) Source[PolicyHook[P, B, E]] { - res := &policySource[P, B, E]{ - compiler: compiler, - policyInformer: generic.NewInformer[P](policyInformer), - bindingInformer: generic.NewInformer[B](bindingInformer), - compiledPolicies: map[types.NamespacedName]compiledPolicyEntry[E]{}, - newPolicyAccessor: newPolicyAccessor, - newBindingAccessor: newBindingAccessor, - paramsCRDControllers: map[schema.GroupVersionKind]*paramInfo{}, - informerFactory: paramInformerFactory, - dynamicClient: dynamicClient, - restMapper: restMapper, - } - return res -} - -func (s *policySource[P, B, E]) Run(ctx context.Context) error { - if s.ctx != nil { - return fmt.Errorf("policy source already running") - } - - // Wait for initial cache sync of policies and informers before reconciling - // any - if !cache.WaitForNamedCacheSync(fmt.Sprintf("%T", s), ctx.Done(), s.UpstreamHasSynced) { - err := ctx.Err() - if err == nil { - err = fmt.Errorf("initial cache sync for %T failed", s) - } - return err - } - - s.ctx = ctx - - // Perform initial policy compilation after initial list has finished - s.notify() - s.refreshPolicies() - - notifyFuncs := cache.ResourceEventHandlerFuncs{ - AddFunc: func(_ interface{}) { - s.notify() - }, - UpdateFunc: func(_, _ interface{}) { - s.notify() - }, - DeleteFunc: func(_ interface{}) { - s.notify() - }, - } - handle, err := s.policyInformer.AddEventHandler(notifyFuncs) - if err != nil { - return err - } - defer func() { - if err := s.policyInformer.RemoveEventHandler(handle); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to remove policy event handler: %w", err)) - } - }() - - bindingHandle, err := s.bindingInformer.AddEventHandler(notifyFuncs) - if err != nil { - return err - } - defer func() { - if err := s.bindingInformer.RemoveEventHandler(bindingHandle); err != nil { - utilruntime.HandleError(fmt.Errorf("failed to remove binding event handler: %w", err)) - } - }() - - // Start a worker that checks every second to see if policy data is dirty - // and needs to be recompiled - go func() { - // Loop every 1 second until context is cancelled, refreshing policies - wait.Until(s.refreshPolicies, 1*time.Second, ctx.Done()) - }() - - <-ctx.Done() - return nil -} - -func (s *policySource[P, B, E]) UpstreamHasSynced() bool { - return s.policyInformer.HasSynced() && s.bindingInformer.HasSynced() -} - -// HasSynced implements Source. -func (s *policySource[P, B, E]) HasSynced() bool { - // As an invariant we never store `nil` into the atomic list of processed - // policy hooks. If it is nil, then we haven't compiled all the policies - // and stored them yet. - return s.Hooks() != nil -} - -// Hooks implements Source. -func (s *policySource[P, B, E]) Hooks() []PolicyHook[P, B, E] { - res := s.policies.Load() - - // Error case should not happen since evaluation function never - // returns error - if res == nil { - // Not yet synced - return nil - } - - return *res -} - -func (s *policySource[P, B, E]) refreshPolicies() { - if !s.UpstreamHasSynced() { - return - } else if !s.policiesDirty.Swap(false) { - return - } - - // It is ok the cache gets marked dirty again between us clearing the - // flag and us calculating the policies. The dirty flag would be marked again, - // and we'd have a no-op after comparing resource versions on the next sync. - klog.Infof("refreshing policies") - policies, err := s.calculatePolicyData() - - // Intentionally store policy list regardless of error. There may be - // an error returned if there was a configuration error in one of the policies, - // but we would still want those policies evaluated - // (for instance to return error on failaction). Or if there was an error - // listing all policies at all, we would want to wipe the list. - s.policies.Store(&policies) - - if err != nil { - // An error was generated while syncing policies. Mark it as dirty again - // so we can retry later - utilruntime.HandleError(fmt.Errorf("encountered error syncing policies: %w. Rescheduling policy sync", err)) - s.notify() - } -} - -func (s *policySource[P, B, E]) notify() { - s.policiesDirty.Store(true) -} - -// calculatePolicyData calculates the list of policies and bindings for each -// policy. If there is an error in generation, it will return the error and -// the partial list of policies that were able to be generated. Policies that -// have an error will have a non-nil ConfigurationError field, but still be -// included in the result. -// -// This function caches the result of the intermediate compilations -func (s *policySource[P, B, E]) calculatePolicyData() ([]PolicyHook[P, B, E], error) { - if !s.UpstreamHasSynced() { - return nil, fmt.Errorf("cannot calculate policy data until upstream has synced") - } - - // Fat-fingered lock that can be made more fine-tuned if required - s.lock.Lock() - defer s.lock.Unlock() - - // Create a local copy of all policies and bindings - policiesToBindings := map[types.NamespacedName][]B{} - bindingList, err := s.bindingInformer.List(labels.Everything()) - if err != nil { - // This should never happen unless types are misconfigured - // (can't use meta.accessor on them) - return nil, err - } - - // Gather a list of all active policy bindings - for _, bindingSpec := range bindingList { - bindingAccessor := s.newBindingAccessor(bindingSpec) - policyKey := bindingAccessor.GetPolicyName() - - // Add this binding to the list of bindings for this policy - policiesToBindings[policyKey] = append(policiesToBindings[policyKey], bindingSpec) - } - - result := make([]PolicyHook[P, B, E], 0, len(bindingList)) - usedParams := map[schema.GroupVersionKind]struct{}{} - var errs []error - for policyKey, bindingSpecs := range policiesToBindings { - var inf generic.NamespacedLister[P] = s.policyInformer - if len(policyKey.Namespace) > 0 { - inf = s.policyInformer.Namespaced(policyKey.Namespace) - } - policySpec, err := inf.Get(policyKey.Name) - if errors.IsNotFound(err) { - // Policy for bindings doesn't exist. This can happen if the policy - // was deleted before the binding, or the binding was created first. - // - // Just skip bindings that refer to non-existent policies - // If the policy is recreated, the cache will be marked dirty and - // this function will run again. - continue - } else if err != nil { - // This should never happen since fetching from a cache should never - // fail and this function checks that the cache was synced before - // even getting to this point. - errs = append(errs, err) - continue - } - - var parsedParamKind *schema.GroupVersionKind - policyAccessor := s.newPolicyAccessor(policySpec) - - if paramKind := policyAccessor.GetParamKind(); paramKind != nil { - groupVersion, err := schema.ParseGroupVersion(paramKind.APIVersion) - if err != nil { - errs = append(errs, fmt.Errorf("failed to parse paramKind APIVersion: %w", err)) - continue - } - parsedParamKind = &schema.GroupVersionKind{ - Group: groupVersion.Group, - Version: groupVersion.Version, - Kind: paramKind.Kind, - } - - // TEMPORARY UNTIL WE HAVE SHARED PARAM INFORMERS - usedParams[*parsedParamKind] = struct{}{} - } - - paramInformer, paramScope, configurationError := s.ensureParamsForPolicyLocked(parsedParamKind) - result = append(result, PolicyHook[P, B, E]{ - Policy: policySpec, - Bindings: bindingSpecs, - Evaluator: s.compilePolicyLocked(policySpec), - ParamInformer: paramInformer, - ParamScope: paramScope, - ConfigurationError: configurationError, - }) - - // Should queue a re-sync for policy sync error. If our shared param - // informer can notify us when CRD discovery changes we can remove this - // and just rely on the informer to notify us when the CRDs change - if configurationError != nil { - errs = append(errs, configurationError) - } - } - - // Clean up orphaned policies by replacing the old cache of compiled policies - // (the map of used policies is updated by `compilePolicy`) - for policyKey := range s.compiledPolicies { - if _, wasSeen := policiesToBindings[policyKey]; !wasSeen { - delete(s.compiledPolicies, policyKey) - } - } - - // Clean up orphaned param informers - for paramKind, info := range s.paramsCRDControllers { - if _, wasSeen := usedParams[paramKind]; !wasSeen { - info.cancelFunc() - delete(s.paramsCRDControllers, paramKind) - } - } - - err = nil - if len(errs) > 0 { - err = goerrors.Join(errs...) - } - return result, err -} - -// ensureParamsForPolicyLocked ensures that the informer for the paramKind is -// started and returns the informer and the scope of the paramKind. -// -// Must be called under write lock -func (s *policySource[P, B, E]) ensureParamsForPolicyLocked(paramSource *schema.GroupVersionKind) (informers.GenericInformer, meta.RESTScope, error) { - if paramSource == nil { - return nil, nil, nil - } else if info, ok := s.paramsCRDControllers[*paramSource]; ok { - return info.informer, info.mapping.Scope, nil - } - - mapping, err := s.restMapper.RESTMapping(schema.GroupKind{ - Group: paramSource.Group, - Kind: paramSource.Kind, - }, paramSource.Version) - - if err != nil { - // Failed to resolve. Return error so we retry again (rate limited) - // Save a record of this definition with an evaluator that unconditionally - return nil, nil, fmt.Errorf("failed to find resource referenced by paramKind: '%v'", *paramSource) - } - - // We are not watching this param. Start an informer for it. - instanceContext, instanceCancel := context.WithCancel(s.ctx) - - var informer informers.GenericInformer - - // Try to see if our provided informer factory has an informer for this type. - // We assume the informer is already started, and starts all types associated - // with it. - if genericInformer, err := s.informerFactory.ForResource(mapping.Resource); err == nil { - informer = genericInformer - - // Start the informer - s.informerFactory.Start(instanceContext.Done()) - - } else { - // Dynamic JSON informer fallback. - // Cannot use shared dynamic informer since it would be impossible - // to clean CRD informers properly with multiple dependents - // (cannot start ahead of time, and cannot track dependencies via stopCh) - informer = dynamicinformer.NewFilteredDynamicInformer( - s.dynamicClient, - mapping.Resource, - corev1.NamespaceAll, - // Use same interval as is used for k8s typed sharedInformerFactory - // https://github.com/kubernetes/kubernetes/blob/7e0923899fed622efbc8679cca6b000d43633e38/cmd/kube-apiserver/app/server.go#L430 - 10*time.Minute, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - nil, - ) - go informer.Informer().Run(instanceContext.Done()) - } - - klog.Infof("informer started for %v", *paramSource) - ret := ¶mInfo{ - mapping: *mapping, - cancelFunc: instanceCancel, - informer: informer, - } - s.paramsCRDControllers[*paramSource] = ret - return ret.informer, mapping.Scope, nil -} - -// For testing -func (s *policySource[P, B, E]) getParamInformer(param schema.GroupVersionKind) (informers.GenericInformer, meta.RESTScope) { - s.lock.Lock() - defer s.lock.Unlock() - - if info, ok := s.paramsCRDControllers[param]; ok { - return info.informer, info.mapping.Scope - } - - return nil, nil -} - -// compilePolicyLocked compiles the policy and returns the evaluator for it. -// If the policy has not changed since the last compilation, it will return -// the cached evaluator. -// -// Must be called under write lock -func (s *policySource[P, B, E]) compilePolicyLocked(policySpec P) E { - policyMeta, err := meta.Accessor(policySpec) - if err != nil { - // This should not happen if P, and B have ObjectMeta, but - // unfortunately there is no way to express "able to call - // meta.Accessor" as a type constraint - utilruntime.HandleError(err) - var emptyEvaluator E - return emptyEvaluator - } - key := types.NamespacedName{ - Namespace: policyMeta.GetNamespace(), - Name: policyMeta.GetName(), - } - - compiledPolicy, wasCompiled := s.compiledPolicies[key] - - // If the policy or binding has changed since it was last compiled, - // and if there is no configuration error (like a missing param CRD) - // then we recompile - if !wasCompiled || - compiledPolicy.policyVersion != policyMeta.GetResourceVersion() { - - compiledPolicy = compiledPolicyEntry[E]{ - policyVersion: policyMeta.GetResourceVersion(), - evaluator: s.compiler(policySpec), - } - s.compiledPolicies[key] = compiledPolicy - } - - return compiledPolicy.evaluator -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go deleted file mode 100644 index 69b19fb2a..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/generic/policy_test_context.go +++ /dev/null @@ -1,639 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - dynamicfake "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - clienttesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/component-base/featuregate" - - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/initializer" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/features" -) - -// PolicyTestContext is everything you need to unit test a policy plugin -type PolicyTestContext[P runtime.Object, B runtime.Object, E Evaluator] struct { - context.Context - Plugin *Plugin[PolicyHook[P, B, E]] - Source Source[PolicyHook[P, B, E]] - Start func() error - - scheme *runtime.Scheme - restMapper *meta.DefaultRESTMapper - policyGVR schema.GroupVersionResource - bindingGVR schema.GroupVersionResource - - policyGVK schema.GroupVersionKind - bindingGVK schema.GroupVersionKind - - nativeTracker clienttesting.ObjectTracker - policyAndBindingTracker clienttesting.ObjectTracker - unstructuredTracker clienttesting.ObjectTracker -} - -func NewPolicyTestContext[P, B runtime.Object, E Evaluator]( - newPolicyAccessor func(P) PolicyAccessor, - newBindingAccessor func(B) BindingAccessor, - compileFunc func(P) E, - dispatcher dispatcherFactory[PolicyHook[P, B, E]], - initialObjects []runtime.Object, - paramMappings []meta.RESTMapping, -) (*PolicyTestContext[P, B, E], func(), error) { - var Pexample P - var Bexample B - - // Create a fake resource and kind for the provided policy and binding types - fakePolicyGVR := schema.GroupVersionResource{ - Group: "policy.example.com", - Version: "v1", - Resource: "fakepolicies", - } - fakeBindingGVR := schema.GroupVersionResource{ - Group: "policy.example.com", - Version: "v1", - Resource: "fakebindings", - } - fakePolicyGVK := fakePolicyGVR.GroupVersion().WithKind("FakePolicy") - fakeBindingGVK := fakeBindingGVR.GroupVersion().WithKind("FakeBinding") - - policySourceTestScheme, err := func() (*runtime.Scheme, error) { - scheme := runtime.NewScheme() - - if err := fake.AddToScheme(scheme); err != nil { - return nil, err - } - - scheme.AddKnownTypeWithName(fakePolicyGVK, Pexample) - scheme.AddKnownTypeWithName(fakeBindingGVK, Bexample) - scheme.AddKnownTypeWithName(fakePolicyGVK.GroupVersion().WithKind(fakePolicyGVK.Kind+"List"), &FakeList[P]{}) - scheme.AddKnownTypeWithName(fakeBindingGVK.GroupVersion().WithKind(fakeBindingGVK.Kind+"List"), &FakeList[B]{}) - - for _, mapping := range paramMappings { - // Skip if it is in the scheme already - if scheme.Recognizes(mapping.GroupVersionKind) { - continue - } - scheme.AddKnownTypeWithName(mapping.GroupVersionKind, &unstructured.Unstructured{}) - scheme.AddKnownTypeWithName(mapping.GroupVersionKind.GroupVersion().WithKind(mapping.GroupVersionKind.Kind+"List"), &unstructured.UnstructuredList{}) - } - - return scheme, nil - }() - if err != nil { - return nil, nil, err - } - - fakeRestMapper := func() *meta.DefaultRESTMapper { - res := meta.NewDefaultRESTMapper([]schema.GroupVersion{ - { - Group: "", - Version: "v1", - }, - }) - - res.Add(fakePolicyGVK, meta.RESTScopeRoot) - res.Add(fakeBindingGVK, meta.RESTScopeRoot) - res.Add(corev1.SchemeGroupVersion.WithKind("ConfigMap"), meta.RESTScopeNamespace) - - for _, mapping := range paramMappings { - res.AddSpecific(mapping.GroupVersionKind, mapping.Resource, mapping.Resource, mapping.Scope) - } - - return res - }() - - nativeClient := fake.NewSimpleClientset() - dynamicClient := dynamicfake.NewSimpleDynamicClient(policySourceTestScheme) - fakeInformerFactory := informers.NewSharedInformerFactory(nativeClient, 30*time.Second) - - // Make an object tracker specifically for our policies and bindings - policiesAndBindingsTracker := clienttesting.NewObjectTracker( - policySourceTestScheme, - serializer.NewCodecFactory(policySourceTestScheme).UniversalDecoder()) - - // Make an informer for our policies and bindings - - policyInformer := cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return policiesAndBindingsTracker.List(fakePolicyGVR, fakePolicyGVK, "") - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return policiesAndBindingsTracker.Watch(fakePolicyGVR, "") - }, - }, - Pexample, - 30*time.Second, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - bindingInformer := cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return policiesAndBindingsTracker.List(fakeBindingGVR, fakeBindingGVK, "") - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return policiesAndBindingsTracker.Watch(fakeBindingGVR, "") - }, - }, - Bexample, - 30*time.Second, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - ) - - var source Source[PolicyHook[P, B, E]] - plugin := NewPlugin[PolicyHook[P, B, E]]( - admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update), - func(sif informers.SharedInformerFactory, i1 kubernetes.Interface, i2 dynamic.Interface, r meta.RESTMapper) Source[PolicyHook[P, B, E]] { - source = NewPolicySource[P, B, E]( - policyInformer, - bindingInformer, - newPolicyAccessor, - newBindingAccessor, - compileFunc, - sif, - i2, - r, - ) - return source - }, dispatcher) - plugin.SetEnabled(true) - - featureGate := featuregate.NewFeatureGate() - err = featureGate.Add(map[featuregate.Feature]featuregate.FeatureSpec{ - //!TODO: move this to validating specific tests - features.ValidatingAdmissionPolicy: { - Default: true, PreRelease: featuregate.Beta}}) - if err != nil { - return nil, nil, err - } - err = featureGate.SetFromMap(map[string]bool{string(features.ValidatingAdmissionPolicy): true}) - if err != nil { - return nil, nil, err - } - - testContext, testCancel := context.WithCancel(context.Background()) - genericInitializer := initializer.New( - nativeClient, - dynamicClient, - fakeInformerFactory, - fakeAuthorizer{}, - featureGate, - testContext.Done(), - fakeRestMapper, - ) - genericInitializer.Initialize(plugin) - plugin.SetRESTMapper(fakeRestMapper) - - if err := plugin.ValidateInitialization(); err != nil { - testCancel() - return nil, nil, err - } - - res := &PolicyTestContext[P, B, E]{ - Context: testContext, - Plugin: plugin, - Source: source, - - restMapper: fakeRestMapper, - scheme: policySourceTestScheme, - policyGVK: fakePolicyGVK, - bindingGVK: fakeBindingGVK, - policyGVR: fakePolicyGVR, - bindingGVR: fakeBindingGVR, - nativeTracker: nativeClient.Tracker(), - policyAndBindingTracker: policiesAndBindingsTracker, - unstructuredTracker: dynamicClient.Tracker(), - } - - for _, obj := range initialObjects { - err := res.updateOne(obj) - if err != nil { - testCancel() - return nil, nil, err - } - } - - res.Start = func() error { - fakeInformerFactory.Start(res.Done()) - go policyInformer.Run(res.Done()) - go bindingInformer.Run(res.Done()) - - if !cache.WaitForCacheSync(res.Done(), res.Source.HasSynced) { - return fmt.Errorf("timed out waiting for initial cache sync") - } - return nil - } - return res, testCancel, nil -} - -// UpdateAndWait updates the given object in the test, or creates it if it doesn't exist -// Depending upon object type, waits afterward until the object is synced -// by the policy source -// -// Be aware the UpdateAndWait will modify the ResourceVersion of the -// provided objects. -func (p *PolicyTestContext[P, B, E]) UpdateAndWait(objects ...runtime.Object) error { - return p.update(true, objects...) -} - -// Update updates the given object in the test, or creates it if it doesn't exist -// -// Be aware the Update will modify the ResourceVersion of the -// provided objects. -func (p *PolicyTestContext[P, B, E]) Update(objects ...runtime.Object) error { - return p.update(false, objects...) -} - -// Objects the given object in the test, or creates it if it doesn't exist -// Depending upon object type, waits afterward until the object is synced -// by the policy source -func (p *PolicyTestContext[P, B, E]) update(wait bool, objects ...runtime.Object) error { - for _, object := range objects { - if err := p.updateOne(object); err != nil { - return err - } - } - - if wait { - timeoutCtx, timeoutCancel := context.WithTimeout(p, 3*time.Second) - defer timeoutCancel() - - for _, object := range objects { - if err := p.WaitForReconcile(timeoutCtx, object); err != nil { - return fmt.Errorf("error waiting for reconcile of %v: %v", object, err) - } - } - } - return nil -} - -// Depending upon object type, waits afterward until the object is synced -// by the policy source. Note that policies that are not bound are skipped, -// so you should not try to wait for an unbound policy. Create both the binding -// and policy, then wait. -func (p *PolicyTestContext[P, B, E]) WaitForReconcile(timeoutCtx context.Context, object runtime.Object) error { - if !p.Source.HasSynced() { - return nil - } - - objectMeta, err := meta.Accessor(object) - if err != nil { - return err - } - - objectGVK, _, err := p.inferGVK(object) - if err != nil { - return err - } - - switch objectGVK { - case p.policyGVK: - return wait.PollUntilContextCancel(timeoutCtx, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { - policies := p.Source.Hooks() - for _, policy := range policies { - policyMeta, err := meta.Accessor(policy.Policy) - if err != nil { - return true, err - } else if policyMeta.GetName() == objectMeta.GetName() && policyMeta.GetResourceVersion() == objectMeta.GetResourceVersion() { - return true, nil - } - } - return false, nil - }) - case p.bindingGVK: - return wait.PollUntilContextCancel(timeoutCtx, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { - policies := p.Source.Hooks() - for _, policy := range policies { - for _, binding := range policy.Bindings { - bindingMeta, err := meta.Accessor(binding) - if err != nil { - return true, err - } else if bindingMeta.GetName() == objectMeta.GetName() && bindingMeta.GetResourceVersion() == objectMeta.GetResourceVersion() { - return true, nil - } - } - } - return false, nil - }) - - default: - // Do nothing, params are visible immediately - // Loop until one of the params is visible via get of the param informer - return wait.PollUntilContextCancel(timeoutCtx, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { - informer, scope := p.Source.(*policySource[P, B, E]).getParamInformer(objectGVK) - if informer == nil { - // Informer does not exist yet, keep waiting for sync - return false, nil - } - - if !cache.WaitForCacheSync(timeoutCtx.Done(), informer.Informer().HasSynced) { - return false, fmt.Errorf("timed out waiting for cache sync of param informer") - } - - var lister cache.GenericNamespaceLister = informer.Lister() - if scope == meta.RESTScopeNamespace { - lister = informer.Lister().ByNamespace(objectMeta.GetNamespace()) - } - - fetched, err := lister.Get(objectMeta.GetName()) - if err != nil { - if errors.IsNotFound(err) { - return false, nil - } - return true, err - } - - // Ensure RV matches - fetchedMeta, err := meta.Accessor(fetched) - if err != nil { - return true, err - } else if fetchedMeta.GetResourceVersion() != objectMeta.GetResourceVersion() { - return false, nil - } - - return true, nil - }) - } -} - -func (p *PolicyTestContext[P, B, E]) waitForDelete(ctx context.Context, objectGVK schema.GroupVersionKind, name types.NamespacedName) error { - srce := p.Source.(*policySource[P, B, E]) - - return wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (done bool, err error) { - switch objectGVK { - case p.policyGVK: - for _, hook := range p.Source.Hooks() { - accessor := srce.newPolicyAccessor(hook.Policy) - if accessor.GetName() == name.Name && accessor.GetNamespace() == name.Namespace { - return false, nil - } - } - - return true, nil - case p.bindingGVK: - for _, hook := range p.Source.Hooks() { - for _, binding := range hook.Bindings { - accessor := srce.newBindingAccessor(binding) - if accessor.GetName() == name.Name && accessor.GetNamespace() == name.Namespace { - return false, nil - } - } - } - return true, nil - default: - // Do nothing, params are visible immediately - // Loop until one of the params is visible via get of the param informer - informer, scope := p.Source.(*policySource[P, B, E]).getParamInformer(objectGVK) - if informer == nil { - return true, nil - } - - var lister cache.GenericNamespaceLister = informer.Lister() - if scope == meta.RESTScopeNamespace { - lister = informer.Lister().ByNamespace(name.Namespace) - } - - _, err = lister.Get(name.Name) - if err != nil { - if errors.IsNotFound(err) { - return true, nil - } - return false, err - } - return false, nil - } - }) -} - -func (p *PolicyTestContext[P, B, E]) updateOne(object runtime.Object) error { - objectMeta, err := meta.Accessor(object) - if err != nil { - return err - } - objectMeta.SetResourceVersion(string(uuid.NewUUID())) - objectGVK, gvr, err := p.inferGVK(object) - if err != nil { - return err - } - - switch objectGVK { - case p.policyGVK: - err := p.policyAndBindingTracker.Update(p.policyGVR, object, objectMeta.GetNamespace()) - if errors.IsNotFound(err) { - err = p.policyAndBindingTracker.Create(p.policyGVR, object, objectMeta.GetNamespace()) - } - - return err - case p.bindingGVK: - err := p.policyAndBindingTracker.Update(p.bindingGVR, object, objectMeta.GetNamespace()) - if errors.IsNotFound(err) { - err = p.policyAndBindingTracker.Create(p.bindingGVR, object, objectMeta.GetNamespace()) - } - - return err - default: - if _, ok := object.(*unstructured.Unstructured); ok { - if err := p.unstructuredTracker.Create(gvr, object, objectMeta.GetNamespace()); err != nil { - if errors.IsAlreadyExists(err) { - return p.unstructuredTracker.Update(gvr, object, objectMeta.GetNamespace()) - } - return err - } - return nil - } else if err := p.nativeTracker.Create(gvr, object, objectMeta.GetNamespace()); err != nil { - if errors.IsAlreadyExists(err) { - return p.nativeTracker.Update(gvr, object, objectMeta.GetNamespace()) - } - } - return nil - } -} - -// Depending upon object type, waits afterward until the object is synced -// by the policy source -func (p *PolicyTestContext[P, B, E]) DeleteAndWait(object ...runtime.Object) error { - for _, object := range object { - if err := p.deleteOne(object); err != nil && !errors.IsNotFound(err) { - return err - } - } - - timeoutCtx, timeoutCancel := context.WithTimeout(p, 3*time.Second) - defer timeoutCancel() - - for _, object := range object { - accessor, err := meta.Accessor(object) - if err != nil { - return err - } - - objectGVK, _, err := p.inferGVK(object) - if err != nil { - return err - } - - if err := p.waitForDelete( - timeoutCtx, - objectGVK, - types.NamespacedName{Name: accessor.GetName(), Namespace: accessor.GetNamespace()}); err != nil { - return err - } - } - return nil -} - -func (p *PolicyTestContext[P, B, E]) deleteOne(object runtime.Object) error { - objectMeta, err := meta.Accessor(object) - if err != nil { - return err - } - objectMeta.SetResourceVersion(string(uuid.NewUUID())) - objectGVK, gvr, err := p.inferGVK(object) - if err != nil { - return err - } - - switch objectGVK { - case p.policyGVK: - return p.policyAndBindingTracker.Delete(p.policyGVR, objectMeta.GetNamespace(), objectMeta.GetName()) - case p.bindingGVK: - return p.policyAndBindingTracker.Delete(p.bindingGVR, objectMeta.GetNamespace(), objectMeta.GetName()) - default: - if _, ok := object.(*unstructured.Unstructured); ok { - return p.unstructuredTracker.Delete(gvr, objectMeta.GetNamespace(), objectMeta.GetName()) - } - return p.nativeTracker.Delete(gvr, objectMeta.GetNamespace(), objectMeta.GetName()) - } -} - -func (p *PolicyTestContext[P, B, E]) Dispatch( - new, old runtime.Object, - operation admission.Operation, -) error { - if old == nil && new == nil { - return fmt.Errorf("both old and new objects cannot be nil") - } - - nonNilObject := new - if nonNilObject == nil { - nonNilObject = old - } - - gvk, gvr, err := p.inferGVK(nonNilObject) - if err != nil { - return err - } - - nonNilMeta, err := meta.Accessor(nonNilObject) - if err != nil { - return err - } - - return p.Plugin.Dispatch( - p, - admission.NewAttributesRecord( - new, - old, - gvk, - nonNilMeta.GetName(), - nonNilMeta.GetNamespace(), - gvr, - "", - operation, - nil, - false, - nil, - ), admission.NewObjectInterfacesFromScheme(p.scheme)) -} - -func (p *PolicyTestContext[P, B, E]) inferGVK(object runtime.Object) (schema.GroupVersionKind, schema.GroupVersionResource, error) { - objectGVK := object.GetObjectKind().GroupVersionKind() - if objectGVK.Empty() { - // If the object doesn't have a GVK, ask the schema for preferred GVK - knownKinds, _, err := p.scheme.ObjectKinds(object) - if err != nil { - return schema.GroupVersionKind{}, schema.GroupVersionResource{}, err - } else if len(knownKinds) == 0 { - return schema.GroupVersionKind{}, schema.GroupVersionResource{}, fmt.Errorf("no known GVKs for object in schema: %T", object) - } - toTake := 0 - - // Prefer GVK if it is our fake policy or binding - for i, knownKind := range knownKinds { - if knownKind == p.policyGVK || knownKind == p.bindingGVK { - toTake = i - break - } - } - - objectGVK = knownKinds[toTake] - } - - // Make sure GVK is known to the fake rest mapper. To prevent cryptic error - mapping, err := p.restMapper.RESTMapping(objectGVK.GroupKind(), objectGVK.Version) - if err != nil { - return schema.GroupVersionKind{}, schema.GroupVersionResource{}, err - } - return objectGVK, mapping.Resource, nil -} - -type FakeList[T runtime.Object] struct { - metav1.TypeMeta - metav1.ListMeta - Items []T -} - -func (fl *FakeList[P]) DeepCopyObject() runtime.Object { - copiedItems := make([]P, len(fl.Items)) - for i, item := range fl.Items { - copiedItems[i] = item.DeepCopyObject().(P) - } - return &FakeList[P]{ - TypeMeta: fl.TypeMeta, - ListMeta: fl.ListMeta, - Items: copiedItems, - } -} - -type fakeAuthorizer struct{} - -func (f fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { - return authorizer.DecisionAllow, "", nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go deleted file mode 100644 index 4334c0dd8..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/controller.go +++ /dev/null @@ -1,293 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/cache/synctrack" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" -) - -var _ Controller[runtime.Object] = &controller[runtime.Object]{} - -type controller[T runtime.Object] struct { - informer Informer[T] - queue workqueue.RateLimitingInterface - - // Returns an error if there was a transient error during reconciliation - // and the object should be tried again later. - reconciler func(namespace, name string, newObj T) error - - options ControllerOptions - - // must hold a func() bool or nil - notificationsDelivered atomic.Value - - hasProcessed synctrack.AsyncTracker[string] -} - -type ControllerOptions struct { - Name string - Workers uint -} - -func (c *controller[T]) Informer() Informer[T] { - return c.informer -} - -func NewController[T runtime.Object]( - informer Informer[T], - reconciler func(namepace, name string, newObj T) error, - options ControllerOptions, -) Controller[T] { - if options.Workers == 0 { - options.Workers = 2 - } - - if len(options.Name) == 0 { - options.Name = fmt.Sprintf("%T-controller", *new(T)) - } - - c := &controller[T]{ - options: options, - informer: informer, - reconciler: reconciler, - queue: nil, - } - c.hasProcessed.UpstreamHasSynced = func() bool { - f := c.notificationsDelivered.Load() - if f == nil { - return false - } - return f.(func() bool)() - } - return c -} - -// Runs the controller and returns an error explaining why running was stopped. -// Reconciliation ends as soon as the context completes. If there are events -// waiting to be processed at that itme, they will be dropped. -func (c *controller[T]) Run(ctx context.Context) error { - klog.Infof("starting %s", c.options.Name) - defer klog.Infof("stopping %s", c.options.Name) - - c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.options.Name) - - // Forcefully shutdown workqueue. Drop any enqueued items. - // Important to do this in a `defer` at the start of `Run`. - // Otherwise, if there are any early returns without calling this, we - // would never shut down the workqueue - defer c.queue.ShutDown() - - enqueue := func(obj interface{}, isInInitialList bool) { - var key string - var err error - if key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err != nil { - utilruntime.HandleError(err) - return - } - if isInInitialList { - c.hasProcessed.Start(key) - } - - c.queue.Add(key) - } - - registration, err := c.informer.AddEventHandler(cache.ResourceEventHandlerDetailedFuncs{ - AddFunc: enqueue, - UpdateFunc: func(oldObj, newObj interface{}) { - oldMeta, err1 := meta.Accessor(oldObj) - newMeta, err2 := meta.Accessor(newObj) - - if err1 != nil || err2 != nil { - if err1 != nil { - utilruntime.HandleError(err1) - } - - if err2 != nil { - utilruntime.HandleError(err2) - } - return - } else if oldMeta.GetResourceVersion() == newMeta.GetResourceVersion() { - if len(oldMeta.GetResourceVersion()) == 0 { - klog.Warningf("%v throwing out update with empty RV. this is likely to happen if a test did not supply a resource version on an updated object", c.options.Name) - } - return - } - - enqueue(newObj, false) - }, - DeleteFunc: func(obj interface{}) { - // Enqueue - enqueue(obj, false) - }, - }) - - // Error might be raised if informer was started and stopped already - if err != nil { - return err - } - - c.notificationsDelivered.Store(registration.HasSynced) - - // Make sure event handler is removed from informer in case return early from - // an error - defer func() { - c.notificationsDelivered.Store(func() bool { return false }) - // Remove event handler and Handle Error here. Error should only be raised - // for improper usage of event handler API. - if err := c.informer.RemoveEventHandler(registration); err != nil { - utilruntime.HandleError(err) - } - }() - - // Wait for initial cache list to complete before beginning to reconcile - // objects. - if !cache.WaitForNamedCacheSync(c.options.Name, ctx.Done(), c.informer.HasSynced) { - // ctx cancelled during cache sync. return early - err := ctx.Err() - if err == nil { - // if context wasnt cancelled then the sync failed for another reason - err = errors.New("cache sync failed") - } - return err - } - - waitGroup := sync.WaitGroup{} - - for i := uint(0); i < c.options.Workers; i++ { - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - wait.Until(c.runWorker, time.Second, ctx.Done()) - }() - } - - klog.Infof("Started %v workers for %v", c.options.Workers, c.options.Name) - - // Wait for context cancel. - <-ctx.Done() - - // Forcefully shutdown workqueue. Drop any enqueued items. - c.queue.ShutDown() - - // Workqueue shutdown signals for workers to stop. Wait for all workers to - // clean up - waitGroup.Wait() - - // Only way for workers to ever stop is for caller to cancel the context - return ctx.Err() -} - -func (c *controller[T]) HasSynced() bool { - return c.hasProcessed.HasSynced() -} - -func (c *controller[T]) runWorker() { - for { - key, shutdown := c.queue.Get() - if shutdown { - return - } - - // We wrap this block in a func so we can defer c.workqueue.Done. - err := func(obj interface{}) error { - // We call Done here so the workqueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the workqueue and attempted again after a back-off - // period. - defer c.queue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workqueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workqueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workqueue. - if key, ok = obj.(string); !ok { - // How did an incorrectly formatted key get in the workqueue? - // Done is sufficient. (Forget resets rate limiter for the key, - // but the key is invalid so there is no point in doing that) - return fmt.Errorf("expected string in workqueue but got %#v", obj) - } - defer c.hasProcessed.Finished(key) - - if err := c.reconcile(key); err != nil { - // Put the item back on the workqueue to handle any transient errors. - c.queue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - // Finally, if no error occurs we Forget this item so it is allowed - // to be re-enqueued without a long rate limit - c.queue.Forget(obj) - klog.V(4).Infof("syncAdmissionPolicy(%q)", key) - return nil - }(key) - - if err != nil { - utilruntime.HandleError(err) - } - } -} - -func (c *controller[T]) reconcile(key string) error { - var newObj T - var err error - var namespace string - var name string - var lister NamespacedLister[T] - - // Convert the namespace/name string into a distinct namespace and name - namespace, name, err = cache.SplitMetaNamespaceKey(key) - if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) - return nil - } - - if len(namespace) > 0 { - lister = c.informer.Namespaced(namespace) - } else { - lister = c.informer - } - - newObj, err = lister.Get(name) - if err != nil { - if !kerrors.IsNotFound(err) { - return err - } - - // Deleted object. Inform reconciler with empty - } - - return c.reconciler(namespace, name, newObj) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/doc.go deleted file mode 100644 index 2acfad989..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generic contains a typed wrapper over cache SharedIndexInformer -// and Lister (maybe eventually should have a home there?) -// -// This interface is being experimented with as an easier way to write controllers -// with a bit less boilerplate. -// -// Informer/Lister classes are thin wrappers providing a type-safe interface -// over regular interface{}-based Informers/Listers -// -// Controller[T] provides a reusable way to reconcile objects out of an informer -// using the tried and true controller design pattern found all over k8s -// codebase based upon syncFunc/reconcile -package generic diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/informer.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/informer.go deleted file mode 100644 index acb6316ec..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/informer.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" -) - -var _ Informer[runtime.Object] = informer[runtime.Object]{} - -type informer[T runtime.Object] struct { - cache.SharedIndexInformer - lister[T] -} - -// Creates a generic informer around a type-erased cache.SharedIndexInformer -// It is incumbent on the caller to ensure that the generic type argument is -// consistent with the type of the objects stored inside the SharedIndexInformer -// as they will be casted. -func NewInformer[T runtime.Object](informe cache.SharedIndexInformer) Informer[T] { - return informer[T]{ - SharedIndexInformer: informe, - lister: NewLister[T](informe.GetIndexer()), - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/interface.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/interface.go deleted file mode 100644 index a17821e77..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/interface.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" -) - -type Controller[T runtime.Object] interface { - // Meant to be run inside a goroutine - // Waits for and reacts to changes in whatever type the controller - // is concerned with. - // - // Returns an error always non-nil explaining why the worker stopped - Run(ctx context.Context) error - - // Retrieves the informer used to back this controller - Informer() Informer[T] - - // Returns true if the informer cache has synced, and all the objects from - // the initial list have been reconciled at least once. - HasSynced() bool -} - -type NamespacedLister[T any] interface { - // List lists all ValidationRuleSets in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []T, err error) - // Get retrieves the ValidationRuleSet from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (T, error) -} - -type Informer[T any] interface { - cache.SharedIndexInformer - Lister[T] -} - -// Lister[T] helps list Ts. -// All objects returned here must be treated as read-only. -type Lister[T any] interface { - NamespacedLister[T] - Namespaced(namespace string) NamespacedLister[T] -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/lister.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/lister.go deleted file mode 100644 index aa6b09032..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic/lister.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "fmt" - "net/http" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" -) - -var _ Lister[runtime.Object] = lister[runtime.Object]{} - -type namespacedLister[T runtime.Object] struct { - indexer cache.Indexer - namespace string -} - -func (w namespacedLister[T]) List(selector labels.Selector) (ret []T, err error) { - err = cache.ListAllByNamespace(w.indexer, w.namespace, selector, func(m interface{}) { - ret = append(ret, m.(T)) - }) - return ret, err -} - -func (w namespacedLister[T]) Get(name string) (T, error) { - var result T - - obj, exists, err := w.indexer.GetByKey(w.namespace + "/" + name) - if err != nil { - return result, err - } - if !exists { - return result, &kerrors.StatusError{ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusNotFound, - Reason: metav1.StatusReasonNotFound, - Message: fmt.Sprintf("%s not found", name), - }} - } - result = obj.(T) - return result, nil -} - -type lister[T runtime.Object] struct { - indexer cache.Indexer -} - -func (w lister[T]) List(selector labels.Selector) (ret []T, err error) { - err = cache.ListAll(w.indexer, selector, func(m interface{}) { - ret = append(ret, m.(T)) - }) - return ret, err -} - -func (w lister[T]) Get(name string) (T, error) { - var result T - - obj, exists, err := w.indexer.GetByKey(name) - if err != nil { - return result, err - } - if !exists { - // kerrors.StatusNotFound requires a GroupResource we cannot provide - return result, &kerrors.StatusError{ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusNotFound, - Reason: metav1.StatusReasonNotFound, - Message: fmt.Sprintf("%s not found", name), - }} - } - result = obj.(T) - return result, nil -} - -func (w lister[T]) Namespaced(namespace string) NamespacedLister[T] { - return namespacedLister[T]{namespace: namespace, indexer: w.indexer} -} - -func NewLister[T runtime.Object](indexer cache.Indexer) lister[T] { - return lister[T]{indexer: indexer} -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/matching/matching.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/matching/matching.go deleted file mode 100644 index eebe76943..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/matching/matching.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package matching - -import ( - "fmt" - - v1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/client-go/kubernetes" - listersv1 "k8s.io/client-go/listers/core/v1" - - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules" -) - -type MatchCriteria interface { - namespace.NamespaceSelectorProvider - object.ObjectSelectorProvider - - GetMatchResources() v1.MatchResources -} - -// Matcher decides if a request matches against matchCriteria -type Matcher struct { - namespaceMatcher *namespace.Matcher - objectMatcher *object.Matcher -} - -func (m *Matcher) GetNamespace(name string) (*corev1.Namespace, error) { - return m.namespaceMatcher.GetNamespace(name) -} - -// NewMatcher initialize the matcher with dependencies requires -func NewMatcher( - namespaceLister listersv1.NamespaceLister, - client kubernetes.Interface, -) *Matcher { - return &Matcher{ - namespaceMatcher: &namespace.Matcher{ - NamespaceLister: namespaceLister, - Client: client, - }, - objectMatcher: &object.Matcher{}, - } -} - -// ValidateInitialization verify if the matcher is ready before use -func (m *Matcher) ValidateInitialization() error { - if err := m.namespaceMatcher.Validate(); err != nil { - return fmt.Errorf("namespaceMatcher is not properly setup: %v", err) - } - return nil -} - -func (m *Matcher) Matches(attr admission.Attributes, o admission.ObjectInterfaces, criteria MatchCriteria) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) { - matches, matchNsErr := m.namespaceMatcher.MatchNamespaceSelector(criteria, attr) - // Should not return an error here for policy which do not apply to the request, even if err is an unexpected scenario. - if !matches && matchNsErr == nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil - } - - matches, matchObjErr := m.objectMatcher.MatchObjectSelector(criteria, attr) - // Should not return an error here for policy which do not apply to the request, even if err is an unexpected scenario. - if !matches && matchObjErr == nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil - } - - matchResources := criteria.GetMatchResources() - matchPolicy := matchResources.MatchPolicy - if isExcluded, _, _, err := matchesResourceRules(matchResources.ExcludeResourceRules, matchPolicy, attr, o); isExcluded || err != nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, err - } - - var ( - isMatch bool - matchResource schema.GroupVersionResource - matchKind schema.GroupVersionKind - matchErr error - ) - if len(matchResources.ResourceRules) == 0 { - isMatch = true - matchKind = attr.GetKind() - matchResource = attr.GetResource() - } else { - isMatch, matchResource, matchKind, matchErr = matchesResourceRules(matchResources.ResourceRules, matchPolicy, attr, o) - } - if matchErr != nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchErr - } - if !isMatch { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil - } - - // now that we know this applies to this request otherwise, if there were selector errors, return them - if matchNsErr != nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchNsErr - } - if matchObjErr != nil { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchObjErr - } - - return true, matchResource, matchKind, nil -} - -func matchesResourceRules(namedRules []v1.NamedRuleWithOperations, matchPolicy *v1.MatchPolicyType, attr admission.Attributes, o admission.ObjectInterfaces) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) { - matchKind := attr.GetKind() - matchResource := attr.GetResource() - - for _, namedRule := range namedRules { - rule := v1.RuleWithOperations(namedRule.RuleWithOperations) - ruleMatcher := rules.Matcher{ - Rule: rule, - Attr: attr, - } - if !ruleMatcher.Matches() { - continue - } - // an empty name list always matches - if len(namedRule.ResourceNames) == 0 { - return true, matchResource, matchKind, nil - } - // TODO: GetName() can return an empty string if the user is relying on - // the API server to generate the name... figure out what to do for this edge case - name := attr.GetName() - for _, matchedName := range namedRule.ResourceNames { - if name == matchedName { - return true, matchResource, matchKind, nil - } - } - } - - // if match policy is undefined or exact, don't perform fuzzy matching - // note that defaulting to fuzzy matching is set by the API - if matchPolicy == nil || *matchPolicy == v1.Exact { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil - } - - attrWithOverride := &attrWithResourceOverride{Attributes: attr} - equivalents := o.GetEquivalentResourceMapper().EquivalentResourcesFor(attr.GetResource(), attr.GetSubresource()) - for _, namedRule := range namedRules { - for _, equivalent := range equivalents { - if equivalent == attr.GetResource() { - // we have already checked the original resource - continue - } - attrWithOverride.resource = equivalent - rule := v1.RuleWithOperations(namedRule.RuleWithOperations) - m := rules.Matcher{ - Rule: rule, - Attr: attrWithOverride, - } - if !m.Matches() { - continue - } - matchKind = o.GetEquivalentResourceMapper().KindFor(equivalent, attr.GetSubresource()) - if matchKind.Empty() { - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, fmt.Errorf("unable to convert to %v: unknown kind", equivalent) - } - // an empty name list always matches - if len(namedRule.ResourceNames) == 0 { - return true, equivalent, matchKind, nil - } - - // TODO: GetName() can return an empty string if the user is relying on - // the API server to generate the name... figure out what to do for this edge case - name := attr.GetName() - for _, matchedName := range namedRule.ResourceNames { - if name == matchedName { - return true, equivalent, matchKind, nil - } - } - } - } - return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil -} - -type attrWithResourceOverride struct { - admission.Attributes - resource schema.GroupVersionResource -} - -func (a *attrWithResourceOverride) GetResource() schema.GroupVersionResource { return a.resource } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go deleted file mode 100644 index 97cef0914..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/accessor.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apiserver/pkg/admission/plugin/policy/generic" -) - -func NewValidatingAdmissionPolicyAccessor(obj *v1.ValidatingAdmissionPolicy) generic.PolicyAccessor { - return &validatingAdmissionPolicyAccessor{ - ValidatingAdmissionPolicy: obj, - } -} - -func NewValidatingAdmissionPolicyBindingAccessor(obj *v1.ValidatingAdmissionPolicyBinding) generic.BindingAccessor { - return &validatingAdmissionPolicyBindingAccessor{ - ValidatingAdmissionPolicyBinding: obj, - } -} - -type validatingAdmissionPolicyAccessor struct { - *v1.ValidatingAdmissionPolicy -} - -func (v *validatingAdmissionPolicyAccessor) GetNamespace() string { - return v.Namespace -} - -func (v *validatingAdmissionPolicyAccessor) GetName() string { - return v.Name -} - -func (v *validatingAdmissionPolicyAccessor) GetParamKind() *v1.ParamKind { - return v.Spec.ParamKind -} - -func (v *validatingAdmissionPolicyAccessor) GetMatchConstraints() *v1.MatchResources { - return v.Spec.MatchConstraints -} - -type validatingAdmissionPolicyBindingAccessor struct { - *v1.ValidatingAdmissionPolicyBinding -} - -func (v *validatingAdmissionPolicyBindingAccessor) GetNamespace() string { - return v.Namespace -} - -func (v *validatingAdmissionPolicyBindingAccessor) GetName() string { - return v.Name -} - -func (v *validatingAdmissionPolicyBindingAccessor) GetPolicyName() types.NamespacedName { - return types.NamespacedName{ - Namespace: "", - Name: v.Spec.PolicyName, - } -} - -func (v *validatingAdmissionPolicyBindingAccessor) GetMatchResources() *v1.MatchResources { - return v.Spec.MatchResources -} - -func (v *validatingAdmissionPolicyBindingAccessor) GetParamRef() *v1.ParamRef { - return v.Spec.ParamRef -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go deleted file mode 100644 index fbefd595e..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/caching_authorizer.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - "encoding/json" - "sort" - "strings" - - "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/apiserver/pkg/authorization/authorizer" -) - -type authzResult struct { - authorized authorizer.Decision - reason string - err error -} - -type cachingAuthorizer struct { - authorizer authorizer.Authorizer - decisions map[string]authzResult -} - -func newCachingAuthorizer(in authorizer.Authorizer) authorizer.Authorizer { - return &cachingAuthorizer{ - authorizer: in, - decisions: make(map[string]authzResult), - } -} - -// The attribute accessors known to cache key construction. If this fails to compile, the cache -// implementation may need to be updated. -var _ authorizer.Attributes = (interface { - GetUser() user.Info - GetVerb() string - IsReadOnly() bool - GetNamespace() string - GetResource() string - GetSubresource() string - GetName() string - GetAPIGroup() string - GetAPIVersion() string - IsResourceRequest() bool - GetPath() string -})(nil) - -// The user info accessors known to cache key construction. If this fails to compile, the cache -// implementation may need to be updated. -var _ user.Info = (interface { - GetName() string - GetUID() string - GetGroups() []string - GetExtra() map[string][]string -})(nil) - -// Authorize returns an authorization decision by delegating to another Authorizer. If an equivalent -// check has already been performed, a cached result is returned. Not safe for concurrent use. -func (ca *cachingAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { - serializableAttributes := authorizer.AttributesRecord{ - Verb: a.GetVerb(), - Namespace: a.GetNamespace(), - APIGroup: a.GetAPIGroup(), - APIVersion: a.GetAPIVersion(), - Resource: a.GetResource(), - Subresource: a.GetSubresource(), - Name: a.GetName(), - ResourceRequest: a.IsResourceRequest(), - Path: a.GetPath(), - } - - if u := a.GetUser(); u != nil { - di := &user.DefaultInfo{ - Name: u.GetName(), - UID: u.GetUID(), - } - - // Differently-ordered groups or extras could cause otherwise-equivalent checks to - // have distinct cache keys. - if groups := u.GetGroups(); len(groups) > 0 { - di.Groups = make([]string, len(groups)) - copy(di.Groups, groups) - sort.Strings(di.Groups) - } - - if extra := u.GetExtra(); len(extra) > 0 { - di.Extra = make(map[string][]string, len(extra)) - for k, vs := range extra { - vdupe := make([]string, len(vs)) - copy(vdupe, vs) - sort.Strings(vdupe) - di.Extra[k] = vdupe - } - } - - serializableAttributes.User = di - } - - var b strings.Builder - if err := json.NewEncoder(&b).Encode(serializableAttributes); err != nil { - return authorizer.DecisionNoOpinion, "", err - } - key := b.String() - - if cached, ok := ca.decisions[key]; ok { - return cached.authorized, cached.reason, cached.err - } - - authorized, reason, err := ca.authorizer.Authorize(ctx, a) - - ca.decisions[key] = authzResult{ - authorized: authorized, - reason: reason, - err: err, - } - - return authorized, reason, err -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go deleted file mode 100644 index edf5a7886..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/dispatcher.go +++ /dev/null @@ -1,414 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - "errors" - "fmt" - "strings" - - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - utiljson "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/policy/generic" - celmetrics "k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics" - celconfig "k8s.io/apiserver/pkg/apis/cel" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/warning" - "k8s.io/klog/v2" -) - -type dispatcher struct { - matcher generic.PolicyMatcher - authz authorizer.Authorizer -} - -var _ generic.Dispatcher[PolicyHook] = &dispatcher{} - -func NewDispatcher( - authorizer authorizer.Authorizer, - matcher generic.PolicyMatcher, -) generic.Dispatcher[PolicyHook] { - return &dispatcher{ - matcher: matcher, - authz: authorizer, - } -} - -// contains the cel PolicyDecisions along with the ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding -// that determined the decision -type policyDecisionWithMetadata struct { - PolicyDecision - Definition *admissionregistrationv1.ValidatingAdmissionPolicy - Binding *admissionregistrationv1.ValidatingAdmissionPolicyBinding -} - -// Dispatch implements generic.Dispatcher. -func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []PolicyHook) error { - - var deniedDecisions []policyDecisionWithMetadata - - addConfigError := func(err error, definition *admissionregistrationv1.ValidatingAdmissionPolicy, binding *admissionregistrationv1.ValidatingAdmissionPolicyBinding) { - // we always default the FailurePolicy if it is unset and validate it in API level - var policy admissionregistrationv1.FailurePolicyType - if definition.Spec.FailurePolicy == nil { - policy = admissionregistrationv1.Fail - } else { - policy = *definition.Spec.FailurePolicy - } - - // apply FailurePolicy specified in ValidatingAdmissionPolicy, the default would be Fail - switch policy { - case admissionregistrationv1.Ignore: - // TODO: add metrics for ignored error here - return - case admissionregistrationv1.Fail: - var message string - if binding == nil { - message = fmt.Errorf("failed to configure policy: %w", err).Error() - } else { - message = fmt.Errorf("failed to configure binding: %w", err).Error() - } - deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{ - PolicyDecision: PolicyDecision{ - Action: ActionDeny, - Message: message, - }, - Definition: definition, - Binding: binding, - }) - default: - deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{ - PolicyDecision: PolicyDecision{ - Action: ActionDeny, - Message: fmt.Errorf("unrecognized failure policy: '%v'", policy).Error(), - }, - Definition: definition, - Binding: binding, - }) - } - } - - authz := newCachingAuthorizer(c.authz) - - for _, hook := range hooks { - // versionedAttributes will be set to non-nil inside of the loop, but - // is scoped outside of the param loop so we only convert once. We defer - // conversion so that it is only performed when we know a policy matches, - // saving the cost of converting non-matching requests. - var versionedAttr *admission.VersionedAttributes - - definition := hook.Policy - matches, matchResource, matchKind, err := c.matcher.DefinitionMatches(a, o, NewValidatingAdmissionPolicyAccessor(definition)) - if err != nil { - // Configuration error. - addConfigError(err, definition, nil) - continue - } - if !matches { - // Policy definition does not match request - continue - } else if hook.ConfigurationError != nil { - // Configuration error. - addConfigError(hook.ConfigurationError, definition, nil) - continue - } - - auditAnnotationCollector := newAuditAnnotationCollector() - for _, binding := range hook.Bindings { - // If the key is inside dependentBindings, there is guaranteed to - // be a bindingInfo for it - matches, err := c.matcher.BindingMatches(a, o, NewValidatingAdmissionPolicyBindingAccessor(binding)) - if err != nil { - // Configuration error. - addConfigError(err, definition, binding) - continue - } - if !matches { - continue - } - - params, err := generic.CollectParams( - hook.Policy.Spec.ParamKind, - hook.ParamInformer, - hook.ParamScope, - binding.Spec.ParamRef, - a.GetNamespace(), - ) - - if err != nil { - addConfigError(err, definition, binding) - continue - } else if versionedAttr == nil && len(params) > 0 { - // As optimization versionedAttr creation is deferred until - // first use. Since > 0 params, we will validate - va, err := admission.NewVersionedAttributes(a, matchKind, o) - if err != nil { - wrappedErr := fmt.Errorf("failed to convert object version: %w", err) - addConfigError(wrappedErr, definition, binding) - continue - } - versionedAttr = va - } - - var validationResults []ValidateResult - var namespace *v1.Namespace - namespaceName := a.GetNamespace() - - // Special case, the namespace object has the namespace of itself (maybe a bug). - // unset it if the incoming object is a namespace - if gvk := a.GetKind(); gvk.Kind == "Namespace" && gvk.Version == "v1" && gvk.Group == "" { - namespaceName = "" - } - - // if it is cluster scoped, namespaceName will be empty - // Otherwise, get the Namespace resource. - if namespaceName != "" { - namespace, err = c.matcher.GetNamespace(namespaceName) - if err != nil { - return err - } - } - - for _, param := range params { - var p runtime.Object = param - if p != nil && p.GetObjectKind().GroupVersionKind().Empty() { - // Make sure param has TypeMeta populated - // This is a simple hack to make sure typeMeta is - // available to CEL without making copies of objects, etc. - p = &wrappedParam{ - TypeMeta: metav1.TypeMeta{ - APIVersion: definition.Spec.ParamKind.APIVersion, - Kind: definition.Spec.ParamKind.Kind, - }, - nested: param, - } - } - - validationResults = append(validationResults, - hook.Evaluator.Validate( - ctx, - matchResource, - versionedAttr, - p, - namespace, - celconfig.RuntimeCELCostBudget, - authz, - ), - ) - } - - for _, validationResult := range validationResults { - for i, decision := range validationResult.Decisions { - switch decision.Action { - case ActionAdmit: - if decision.Evaluation == EvalError { - celmetrics.Metrics.ObserveAdmissionWithError(ctx, decision.Elapsed, definition.Name, binding.Name, "active") - } - case ActionDeny: - for _, action := range binding.Spec.ValidationActions { - switch action { - case admissionregistrationv1.Deny: - deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{ - Definition: definition, - Binding: binding, - PolicyDecision: decision, - }) - celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, "active") - case admissionregistrationv1.Audit: - publishValidationFailureAnnotation(binding, i, decision, versionedAttr) - celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, "active") - case admissionregistrationv1.Warn: - warning.AddWarning(ctx, "", fmt.Sprintf("Validation failed for ValidatingAdmissionPolicy '%s' with binding '%s': %s", definition.Name, binding.Name, decision.Message)) - celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, "active") - } - } - default: - return fmt.Errorf("unrecognized evaluation decision '%s' for ValidatingAdmissionPolicyBinding '%s' with ValidatingAdmissionPolicy '%s'", - decision.Action, binding.Name, definition.Name) - } - } - - for _, auditAnnotation := range validationResult.AuditAnnotations { - switch auditAnnotation.Action { - case AuditAnnotationActionPublish: - value := auditAnnotation.Value - if len(auditAnnotation.Value) > maxAuditAnnotationValueLength { - value = value[:maxAuditAnnotationValueLength] - } - auditAnnotationCollector.add(auditAnnotation.Key, value) - case AuditAnnotationActionError: - // When failurePolicy=fail, audit annotation errors result in deny - deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{ - Definition: definition, - Binding: binding, - PolicyDecision: PolicyDecision{ - Action: ActionDeny, - Evaluation: EvalError, - Message: auditAnnotation.Error, - Elapsed: auditAnnotation.Elapsed, - }, - }) - celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, "active") - case AuditAnnotationActionExclude: // skip it - default: - return fmt.Errorf("unsupported AuditAnnotation Action: %s", auditAnnotation.Action) - } - } - } - } - auditAnnotationCollector.publish(definition.Name, a) - } - - if len(deniedDecisions) > 0 { - // TODO: refactor admission.NewForbidden so the name extraction is reusable but the code/reason is customizable - var message string - deniedDecision := deniedDecisions[0] - if deniedDecision.Binding != nil { - message = fmt.Sprintf("ValidatingAdmissionPolicy '%s' with binding '%s' denied request: %s", deniedDecision.Definition.Name, deniedDecision.Binding.Name, deniedDecision.Message) - } else { - message = fmt.Sprintf("ValidatingAdmissionPolicy '%s' denied request: %s", deniedDecision.Definition.Name, deniedDecision.Message) - } - err := admission.NewForbidden(a, errors.New(message)).(*k8serrors.StatusError) - reason := deniedDecision.Reason - if len(reason) == 0 { - reason = metav1.StatusReasonInvalid - } - err.ErrStatus.Reason = reason - err.ErrStatus.Code = reasonToCode(reason) - err.ErrStatus.Details.Causes = append(err.ErrStatus.Details.Causes, metav1.StatusCause{Message: message}) - return err - } - return nil -} - -func publishValidationFailureAnnotation(binding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, expressionIndex int, decision PolicyDecision, attributes admission.Attributes) { - key := "validation.policy.admission.k8s.io/validation_failure" - // Marshal to a list of failures since, in the future, we may need to support multiple failures - valueJSON, err := utiljson.Marshal([]ValidationFailureValue{{ - ExpressionIndex: expressionIndex, - Message: decision.Message, - ValidationActions: binding.Spec.ValidationActions, - Binding: binding.Name, - Policy: binding.Spec.PolicyName, - }}) - if err != nil { - klog.Warningf("Failed to set admission audit annotation %s for ValidatingAdmissionPolicy %s and ValidatingAdmissionPolicyBinding %s: %v", key, binding.Spec.PolicyName, binding.Name, err) - } - value := string(valueJSON) - if err := attributes.AddAnnotation(key, value); err != nil { - klog.Warningf("Failed to set admission audit annotation %s to %s for ValidatingAdmissionPolicy %s and ValidatingAdmissionPolicyBinding %s: %v", key, value, binding.Spec.PolicyName, binding.Name, err) - } -} - -const maxAuditAnnotationValueLength = 10 * 1024 - -// validationFailureValue defines the JSON format of a "validation.policy.admission.k8s.io/validation_failure" audit -// annotation value. -type ValidationFailureValue struct { - Message string `json:"message"` - Policy string `json:"policy"` - Binding string `json:"binding"` - ExpressionIndex int `json:"expressionIndex"` - ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions"` -} - -type auditAnnotationCollector struct { - annotations map[string][]string -} - -func newAuditAnnotationCollector() auditAnnotationCollector { - return auditAnnotationCollector{annotations: map[string][]string{}} -} - -func (a auditAnnotationCollector) add(key, value string) { - // If multiple bindings produces the exact same key and value for an audit annotation, - // ignore the duplicates. - for _, v := range a.annotations[key] { - if v == value { - return - } - } - a.annotations[key] = append(a.annotations[key], value) -} - -func (a auditAnnotationCollector) publish(policyName string, attributes admission.Attributes) { - for key, bindingAnnotations := range a.annotations { - var value string - if len(bindingAnnotations) == 1 { - value = bindingAnnotations[0] - } else { - // Multiple distinct values can exist when binding params are used in the valueExpression of an auditAnnotation. - // When this happens, the values are concatenated into a comma-separated list. - value = strings.Join(bindingAnnotations, ", ") - } - if err := attributes.AddAnnotation(policyName+"/"+key, value); err != nil { - klog.Warningf("Failed to set admission audit annotation %s to %s for ValidatingAdmissionPolicy %s: %v", key, value, policyName, err) - } - } -} - -// A workaround to fact that native types do not have TypeMeta populated, which -// is needed for CEL expressions to be able to access the value. -type wrappedParam struct { - metav1.TypeMeta - nested runtime.Object -} - -func (w *wrappedParam) MarshalJSON() ([]byte, error) { - return nil, errors.New("MarshalJSON unimplemented for wrappedParam") -} - -func (w *wrappedParam) UnmarshalJSON(data []byte) error { - return errors.New("UnmarshalJSON unimplemented for wrappedParam") -} - -func (w *wrappedParam) ToUnstructured() interface{} { - res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(w.nested) - - if err != nil { - return nil - } - - metaRes, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&w.TypeMeta) - if err != nil { - return nil - } - - for k, v := range metaRes { - res[k] = v - } - - return res -} - -func (w *wrappedParam) DeepCopyObject() runtime.Object { - return &wrappedParam{ - TypeMeta: w.TypeMeta, - nested: w.nested.DeepCopyObject(), - } -} - -func (w *wrappedParam) GetObjectKind() schema.ObjectKind { - return w -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/initializer.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/initializer.go deleted file mode 100644 index c7cb9c657..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/initializer.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - - "k8s.io/apiserver/pkg/admission" -) - -type CELPolicyEvaluator interface { - admission.InitializationValidator - - Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error - HasSynced() bool - Run(stopCh <-chan struct{}) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/interface.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/interface.go deleted file mode 100644 index 97eeb9550..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/interface.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - - celgo "github.com/google/cel-go/cel" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/cel" - "k8s.io/apiserver/pkg/authorization/authorizer" -) - -var _ cel.ExpressionAccessor = &ValidationCondition{} - -// ValidationCondition contains the inputs needed to compile, evaluate and validate a cel expression -type ValidationCondition struct { - Expression string - Message string - Reason *metav1.StatusReason -} - -func (v *ValidationCondition) GetExpression() string { - return v.Expression -} - -func (v *ValidationCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.BoolType} -} - -// AuditAnnotationCondition contains the inputs needed to compile, evaluate and publish a cel audit annotation -type AuditAnnotationCondition struct { - Key string - ValueExpression string -} - -func (v *AuditAnnotationCondition) GetExpression() string { - return v.ValueExpression -} - -func (v *AuditAnnotationCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.StringType, celgo.NullType} -} - -// Variable is a named expression for composition. -type Variable struct { - Name string - Expression string -} - -func (v *Variable) GetExpression() string { - return v.Expression -} - -func (v *Variable) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.AnyType, celgo.DynType} -} - -func (v *Variable) GetName() string { - return v.Name -} - -// ValidateResult defines the result of a Validator.Validate operation. -type ValidateResult struct { - // Decisions specifies the outcome of the validation as well as the details about the decision. - Decisions []PolicyDecision - // AuditAnnotations specifies the audit annotations that should be recorded for the validation. - AuditAnnotations []PolicyAuditAnnotation -} - -// Validator is contains logic for converting ValidationEvaluation to PolicyDecisions -type Validator interface { - // Validate is used to take cel evaluations and convert into decisions - // runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. - Validate(ctx context.Context, matchedResource schema.GroupVersionResource, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, namespace *corev1.Namespace, runtimeCELCostBudget int64, authz authorizer.Authorizer) ValidateResult -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/message.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/message.go deleted file mode 100644 index 8b7d97361..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/message.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - celgo "github.com/google/cel-go/cel" - "k8s.io/apiserver/pkg/admission/plugin/cel" -) - -var _ cel.ExpressionAccessor = (*MessageExpressionCondition)(nil) - -type MessageExpressionCondition struct { - MessageExpression string -} - -func (m *MessageExpressionCondition) GetExpression() string { - return m.MessageExpression -} - -func (m *MessageExpressionCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.StringType} -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go deleted file mode 100644 index 9f8a94110..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics/metrics.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "time" - - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" -) - -const ( - metricsNamespace = "apiserver" - metricsSubsystem = "validating_admission_policy" -) - -var ( - // Metrics provides access to validation admission metrics. - Metrics = newValidationAdmissionMetrics() -) - -// ValidatingAdmissionPolicyMetrics aggregates Prometheus metrics related to validation admission control. -type ValidatingAdmissionPolicyMetrics struct { - policyCheck *metrics.CounterVec - policyDefinition *metrics.CounterVec - policyLatency *metrics.HistogramVec -} - -func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics { - check := metrics.NewCounterVec( - &metrics.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, - Name: "check_total", - Help: "Validation admission policy check total, labeled by policy and further identified by binding, enforcement action taken, and state.", - StabilityLevel: metrics.ALPHA, - }, - []string{"policy", "policy_binding", "enforcement_action", "state"}, - ) - definition := metrics.NewCounterVec(&metrics.CounterOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, - Name: "definition_total", - Help: "Validation admission policy count total, labeled by state and enforcement action.", - StabilityLevel: metrics.ALPHA, - }, - []string{"state", "enforcement_action"}, - ) - latency := metrics.NewHistogramVec(&metrics.HistogramOpts{ - Namespace: metricsNamespace, - Subsystem: metricsSubsystem, - Name: "check_duration_seconds", - Help: "Validation admission latency for individual validation expressions in seconds, labeled by policy and further including binding, state and enforcement action taken.", - // the bucket distribution here is based oo the benchmark suite at - // github.com/DangerOnTheRanger/cel-benchmark performed on 16-core Intel Xeon - // the lowest bucket was based around the 180ns/op figure for BenchmarkAccess, - // plus some additional leeway to account for the apiserver doing other things - // the largest bucket was chosen based on the fact that benchmarks indicate the - // same Xeon running a CEL expression close to the estimated cost limit takes - // around 760ms, so that bucket should only ever have the slowest CEL expressions - // in it - Buckets: []float64{0.0000005, 0.001, 0.01, 0.1, 1.0}, - StabilityLevel: metrics.ALPHA, - }, - []string{"policy", "policy_binding", "enforcement_action", "state"}, - ) - - legacyregistry.MustRegister(check) - legacyregistry.MustRegister(definition) - legacyregistry.MustRegister(latency) - return &ValidatingAdmissionPolicyMetrics{policyCheck: check, policyDefinition: definition, policyLatency: latency} -} - -// Reset resets all validation admission-related Prometheus metrics. -func (m *ValidatingAdmissionPolicyMetrics) Reset() { - m.policyCheck.Reset() - m.policyDefinition.Reset() - m.policyLatency.Reset() -} - -// ObserveDefinition observes a policy definition. -func (m *ValidatingAdmissionPolicyMetrics) ObserveDefinition(ctx context.Context, state, enforcementAction string) { - m.policyDefinition.WithContext(ctx).WithLabelValues(state, enforcementAction).Inc() -} - -// ObserveAdmissionWithError observes a policy validation error that was ignored due to failure policy. -func (m *ValidatingAdmissionPolicyMetrics) ObserveAdmissionWithError(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Observe(elapsed.Seconds()) -} - -// ObserveRejection observes a policy validation error that was at least one of the reasons for a deny. -func (m *ValidatingAdmissionPolicyMetrics) ObserveRejection(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Observe(elapsed.Seconds()) -} - -// ObserveAudit observes a policy validation audit annotation was published for a validation failure. -func (m *ValidatingAdmissionPolicyMetrics) ObserveAudit(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Observe(elapsed.Seconds()) -} - -// ObserveWarn observes a policy validation warning was published for a validation failure. -func (m *ValidatingAdmissionPolicyMetrics) ObserveWarn(ctx context.Context, elapsed time.Duration, policy, binding, state string) { - m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Inc() - m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Observe(elapsed.Seconds()) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go deleted file mode 100644 index fb097737a..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - "io" - - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/initializer" - "k8s.io/apiserver/pkg/admission/plugin/cel" - "k8s.io/apiserver/pkg/admission/plugin/policy/generic" - "k8s.io/apiserver/pkg/admission/plugin/policy/matching" - "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/component-base/featuregate" -) - -const ( - // PluginName indicates the name of admission plug-in - PluginName = "ValidatingAdmissionPolicy" -) - -var ( - compositionEnvTemplateWithStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv { - compositionEnvTemplateWithStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) - if err != nil { - panic(err) - } - - return compositionEnvTemplateWithStrictCost - }() - compositionEnvTemplateWithoutStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv { - compositionEnvTemplateWithoutStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false)) - if err != nil { - panic(err) - } - - return compositionEnvTemplateWithoutStrictCost - }() -) - -// Register registers a plugin -func Register(plugins *admission.Plugins) { - plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) { - return NewPlugin(configFile), nil - }) -} - -// Plugin is an implementation of admission.Interface. -type Policy = v1.ValidatingAdmissionPolicy -type PolicyBinding = v1.ValidatingAdmissionPolicyBinding -type PolicyEvaluator = Validator -type PolicyHook = generic.PolicyHook[*Policy, *PolicyBinding, PolicyEvaluator] - -type Plugin struct { - *generic.Plugin[PolicyHook] -} - -var _ admission.Interface = &Plugin{} -var _ admission.ValidationInterface = &Plugin{} -var _ initializer.WantsFeatures = &Plugin{} -var _ initializer.WantsExcludedAdmissionResources = &Plugin{} - -func NewPlugin(_ io.Reader) *Plugin { - handler := admission.NewHandler(admission.Connect, admission.Create, admission.Delete, admission.Update) - - return &Plugin{ - Plugin: generic.NewPlugin( - handler, - func(f informers.SharedInformerFactory, client kubernetes.Interface, dynamicClient dynamic.Interface, restMapper meta.RESTMapper) generic.Source[PolicyHook] { - return generic.NewPolicySource( - f.Admissionregistration().V1().ValidatingAdmissionPolicies().Informer(), - f.Admissionregistration().V1().ValidatingAdmissionPolicyBindings().Informer(), - NewValidatingAdmissionPolicyAccessor, - NewValidatingAdmissionPolicyBindingAccessor, - compilePolicy, - f, - dynamicClient, - restMapper, - ) - }, - func(a authorizer.Authorizer, m *matching.Matcher) generic.Dispatcher[PolicyHook] { - return NewDispatcher(a, generic.NewPolicyMatcher(m)) - }, - ), - } -} - -// Validate makes an admission decision based on the request attributes. -func (a *Plugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { - return a.Plugin.Dispatch(ctx, attr, o) -} - -func (a *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) { - a.Plugin.SetEnabled(featureGates.Enabled(features.ValidatingAdmissionPolicy)) -} - -func compilePolicy(policy *Policy) Validator { - hasParam := false - if policy.Spec.ParamKind != nil { - hasParam = true - } - strictCost := utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForVAP) - optionalVars := cel.OptionalVariableDeclarations{HasParams: hasParam, HasAuthorizer: true, StrictCost: strictCost} - expressionOptionalVars := cel.OptionalVariableDeclarations{HasParams: hasParam, HasAuthorizer: false, StrictCost: strictCost} - failurePolicy := policy.Spec.FailurePolicy - var matcher matchconditions.Matcher = nil - matchConditions := policy.Spec.MatchConditions - var compositionEnvTemplate *cel.CompositionEnv - if strictCost { - compositionEnvTemplate = compositionEnvTemplateWithStrictCost - } else { - compositionEnvTemplate = compositionEnvTemplateWithoutStrictCost - } - filterCompiler := cel.NewCompositedCompilerFromTemplate(compositionEnvTemplate) - filterCompiler.CompileAndStoreVariables(convertv1beta1Variables(policy.Spec.Variables), optionalVars, environment.StoredExpressions) - - if len(matchConditions) > 0 { - matchExpressionAccessors := make([]cel.ExpressionAccessor, len(matchConditions)) - for i := range matchConditions { - matchExpressionAccessors[i] = (*matchconditions.MatchCondition)(&matchConditions[i]) - } - matcher = matchconditions.NewMatcher(filterCompiler.Compile(matchExpressionAccessors, optionalVars, environment.StoredExpressions), failurePolicy, "policy", "validate", policy.Name) - } - res := NewValidator( - filterCompiler.Compile(convertv1Validations(policy.Spec.Validations), optionalVars, environment.StoredExpressions), - matcher, - filterCompiler.Compile(convertv1AuditAnnotations(policy.Spec.AuditAnnotations), optionalVars, environment.StoredExpressions), - filterCompiler.Compile(convertv1MessageExpressions(policy.Spec.Validations), expressionOptionalVars, environment.StoredExpressions), - failurePolicy, - ) - - return res -} - -func convertv1Validations(inputValidations []v1.Validation) []cel.ExpressionAccessor { - celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations)) - for i, validation := range inputValidations { - validation := ValidationCondition{ - Expression: validation.Expression, - Message: validation.Message, - Reason: validation.Reason, - } - celExpressionAccessor[i] = &validation - } - return celExpressionAccessor -} - -func convertv1MessageExpressions(inputValidations []v1.Validation) []cel.ExpressionAccessor { - celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations)) - for i, validation := range inputValidations { - if validation.MessageExpression != "" { - condition := MessageExpressionCondition{ - MessageExpression: validation.MessageExpression, - } - celExpressionAccessor[i] = &condition - } - } - return celExpressionAccessor -} - -func convertv1AuditAnnotations(inputValidations []v1.AuditAnnotation) []cel.ExpressionAccessor { - celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations)) - for i, validation := range inputValidations { - validation := AuditAnnotationCondition{ - Key: validation.Key, - ValueExpression: validation.ValueExpression, - } - celExpressionAccessor[i] = &validation - } - return celExpressionAccessor -} - -func convertv1beta1Variables(variables []v1.Variable) []cel.NamedExpressionAccessor { - namedExpressions := make([]cel.NamedExpressionAccessor, len(variables)) - for i, variable := range variables { - namedExpressions[i] = &Variable{Name: variable.Name, Expression: variable.Expression} - } - return namedExpressions -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/policy_decision.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/policy_decision.go deleted file mode 100644 index 3ea17038a..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/policy_decision.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "net/http" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type PolicyDecisionAction string - -const ( - ActionAdmit PolicyDecisionAction = "admit" - ActionDeny PolicyDecisionAction = "deny" -) - -type PolicyDecisionEvaluation string - -const ( - EvalAdmit PolicyDecisionEvaluation = "admit" - EvalError PolicyDecisionEvaluation = "error" - EvalDeny PolicyDecisionEvaluation = "deny" -) - -// PolicyDecision contains the action determined from a cel evaluation along with metadata such as message, reason and duration -type PolicyDecision struct { - Action PolicyDecisionAction - Evaluation PolicyDecisionEvaluation - Message string - Reason metav1.StatusReason - Elapsed time.Duration -} - -type PolicyAuditAnnotationAction string - -const ( - // AuditAnnotationActionPublish indicates that the audit annotation should be - // published with the audit event. - AuditAnnotationActionPublish PolicyAuditAnnotationAction = "publish" - // AuditAnnotationActionError indicates that the valueExpression resulted - // in an error. - AuditAnnotationActionError PolicyAuditAnnotationAction = "error" - // AuditAnnotationActionExclude indicates that the audit annotation should be excluded - // because the valueExpression evaluated to null, or because FailurePolicy is Ignore - // and the expression failed with a parse error, type check error, or runtime error. - AuditAnnotationActionExclude PolicyAuditAnnotationAction = "exclude" -) - -type PolicyAuditAnnotation struct { - Key string - Value string - Elapsed time.Duration - Action PolicyAuditAnnotationAction - Error string -} - -func reasonToCode(r metav1.StatusReason) int32 { - switch r { - case metav1.StatusReasonForbidden: - return http.StatusForbidden - case metav1.StatusReasonUnauthorized: - return http.StatusUnauthorized - case metav1.StatusReasonRequestEntityTooLarge: - return http.StatusRequestEntityTooLarge - case metav1.StatusReasonInvalid: - return http.StatusUnprocessableEntity - default: - // It should not reach here since we only allow above reason to be set from API level - return http.StatusUnprocessableEntity - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/typechecking.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/typechecking.go deleted file mode 100644 index 192be9621..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/typechecking.go +++ /dev/null @@ -1,489 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "errors" - "fmt" - "sort" - "strings" - "time" - - "github.com/google/cel-go/cel" - - "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apimachinery/pkg/util/version" - plugincel "k8s.io/apiserver/pkg/admission/plugin/cel" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/common" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/cel/library" - "k8s.io/apiserver/pkg/cel/openapi" - "k8s.io/apiserver/pkg/cel/openapi/resolver" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" -) - -const maxTypesToCheck = 10 - -type TypeChecker struct { - SchemaResolver resolver.SchemaResolver - RestMapper meta.RESTMapper -} - -// TypeCheckingContext holds information about the policy being type-checked. -// The struct is opaque to the caller. -type TypeCheckingContext struct { - gvks []schema.GroupVersionKind - declTypes []*apiservercel.DeclType - paramGVK schema.GroupVersionKind - paramDeclType *apiservercel.DeclType - - variables []v1.Variable -} - -type typeOverwrite struct { - object *apiservercel.DeclType - params *apiservercel.DeclType -} - -// TypeCheckingResult holds the issues found during type checking, any returned -// error, and the gvk that the type checking is performed against. -type TypeCheckingResult struct { - // GVK is the associated GVK - GVK schema.GroupVersionKind - // Issues contain machine-readable information about the typechecking result. - Issues error - // Err is the possible error that was encounter during type checking. - Err error -} - -// TypeCheckingResults is a collection of TypeCheckingResult -type TypeCheckingResults []*TypeCheckingResult - -func (rs TypeCheckingResults) String() string { - var messages []string - for _, r := range rs { - message := r.String() - if message != "" { - messages = append(messages, message) - } - } - return strings.Join(messages, "\n") -} - -// String converts the result to human-readable form as a string. -func (r *TypeCheckingResult) String() string { - if r.Issues == nil && r.Err == nil { - return "" - } - if r.Err != nil { - return fmt.Sprintf("%v: type checking error: %v\n", r.GVK, r.Err) - } - return fmt.Sprintf("%v: %s\n", r.GVK, r.Issues) -} - -// Check preforms the type check against the given policy, and format the result -// as []ExpressionWarning that is ready to be set in policy.Status -// The result is nil if type checking returns no warning. -// The policy object is NOT mutated. The caller should update Status accordingly -func (c *TypeChecker) Check(policy *v1.ValidatingAdmissionPolicy) []v1.ExpressionWarning { - ctx := c.CreateContext(policy) - - // warnings to return, note that the capacity is optimistically set to zero - var warnings []v1.ExpressionWarning // intentionally not setting capacity - - // check main validation expressions and their message expressions, located in spec.validations[*] - fieldRef := field.NewPath("spec", "validations") - for i, v := range policy.Spec.Validations { - results := c.CheckExpression(ctx, v.Expression) - if len(results) != 0 { - warnings = append(warnings, v1.ExpressionWarning{ - FieldRef: fieldRef.Index(i).Child("expression").String(), - Warning: results.String(), - }) - } - // Note that MessageExpression is optional - if v.MessageExpression == "" { - continue - } - results = c.CheckExpression(ctx, v.MessageExpression) - if len(results) != 0 { - warnings = append(warnings, v1.ExpressionWarning{ - FieldRef: fieldRef.Index(i).Child("messageExpression").String(), - Warning: results.String(), - }) - } - } - - return warnings -} - -// CreateContext resolves all types and their schemas from a policy definition and creates the context. -func (c *TypeChecker) CreateContext(policy *v1.ValidatingAdmissionPolicy) *TypeCheckingContext { - ctx := new(TypeCheckingContext) - allGvks := c.typesToCheck(policy) - gvks := make([]schema.GroupVersionKind, 0, len(allGvks)) - declTypes := make([]*apiservercel.DeclType, 0, len(allGvks)) - for _, gvk := range allGvks { - declType, err := c.declType(gvk) - if err != nil { - // type checking errors MUST NOT alter the behavior of the policy - // even if an error occurs. - if !errors.Is(err, resolver.ErrSchemaNotFound) { - // Anything except ErrSchemaNotFound is an internal error - klog.V(2).ErrorS(err, "internal error: schema resolution failure", "gvk", gvk) - } - // skip for not found or internal error - continue - } - gvks = append(gvks, gvk) - declTypes = append(declTypes, declType) - } - ctx.gvks = gvks - ctx.declTypes = declTypes - - paramsGVK := c.paramsGVK(policy) // maybe empty, correctly handled - paramsDeclType, err := c.declType(paramsGVK) - if err != nil { - if !errors.Is(err, resolver.ErrSchemaNotFound) { - klog.V(2).ErrorS(err, "internal error: cannot resolve schema for params", "gvk", paramsGVK) - } - paramsDeclType = nil - } - ctx.paramGVK = paramsGVK - ctx.paramDeclType = paramsDeclType - ctx.variables = policy.Spec.Variables - return ctx -} - -func (c *TypeChecker) compiler(ctx *TypeCheckingContext, typeOverwrite typeOverwrite) (*plugincel.CompositedCompiler, error) { - envSet, err := buildEnvSet( - /* hasParams */ ctx.paramDeclType != nil, - /* hasAuthorizer */ true, - typeOverwrite) - if err != nil { - return nil, err - } - env, err := plugincel.NewCompositionEnv(plugincel.VariablesTypeName, envSet) - if err != nil { - return nil, err - } - compiler := &plugincel.CompositedCompiler{ - Compiler: &typeCheckingCompiler{typeOverwrite: typeOverwrite, compositionEnv: env}, - CompositionEnv: env, - } - return compiler, nil -} - -// CheckExpression type checks a single expression, given the context -func (c *TypeChecker) CheckExpression(ctx *TypeCheckingContext, expression string) TypeCheckingResults { - var results TypeCheckingResults - for i, gvk := range ctx.gvks { - declType := ctx.declTypes[i] - compiler, err := c.compiler(ctx, typeOverwrite{ - object: declType, - params: ctx.paramDeclType, - }) - if err != nil { - utilruntime.HandleError(err) - continue - } - options := plugincel.OptionalVariableDeclarations{ - HasParams: ctx.paramDeclType != nil, - HasAuthorizer: true, - StrictCost: utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForVAP), - } - compiler.CompileAndStoreVariables(convertv1beta1Variables(ctx.variables), options, environment.StoredExpressions) - result := compiler.CompileCELExpression(celExpression(expression), options, environment.StoredExpressions) - if err := result.Error; err != nil { - typeCheckingResult := &TypeCheckingResult{GVK: gvk} - if err.Type == apiservercel.ErrorTypeInvalid { - typeCheckingResult.Issues = err - } else { - typeCheckingResult.Err = err - } - results = append(results, typeCheckingResult) - } - } - return results -} - -type celExpression string - -func (c celExpression) GetExpression() string { - return string(c) -} - -func (c celExpression) ReturnTypes() []*cel.Type { - return []*cel.Type{cel.AnyType} -} -func generateUniqueTypeName(kind string) string { - return fmt.Sprintf("%s%d", kind, time.Now().Nanosecond()) -} - -func (c *TypeChecker) declType(gvk schema.GroupVersionKind) (*apiservercel.DeclType, error) { - if gvk.Empty() { - return nil, nil - } - s, err := c.SchemaResolver.ResolveSchema(gvk) - if err != nil { - return nil, err - } - return common.SchemaDeclType(&openapi.Schema{Schema: s}, true).MaybeAssignTypeName(generateUniqueTypeName(gvk.Kind)), nil -} - -func (c *TypeChecker) paramsGVK(policy *v1.ValidatingAdmissionPolicy) schema.GroupVersionKind { - if policy.Spec.ParamKind == nil { - return schema.GroupVersionKind{} - } - gv, err := schema.ParseGroupVersion(policy.Spec.ParamKind.APIVersion) - if err != nil { - return schema.GroupVersionKind{} - } - return gv.WithKind(policy.Spec.ParamKind.Kind) -} - -// typesToCheck extracts a list of GVKs that needs type checking from the policy -// the result is sorted in the order of Group, Version, and Kind -func (c *TypeChecker) typesToCheck(p *v1.ValidatingAdmissionPolicy) []schema.GroupVersionKind { - gvks := sets.New[schema.GroupVersionKind]() - if p.Spec.MatchConstraints == nil || len(p.Spec.MatchConstraints.ResourceRules) == 0 { - return nil - } - restMapperRefreshAttempted := false // at most once per policy, refresh RESTMapper and retry resolution. - for _, rule := range p.Spec.MatchConstraints.ResourceRules { - groups := extractGroups(&rule.Rule) - if len(groups) == 0 { - continue - } - versions := extractVersions(&rule.Rule) - if len(versions) == 0 { - continue - } - resources := extractResources(&rule.Rule) - if len(resources) == 0 { - continue - } - // sort GVRs so that the loop below provides - // consistent results. - sort.Strings(groups) - sort.Strings(versions) - sort.Strings(resources) - count := 0 - for _, group := range groups { - for _, version := range versions { - for _, resource := range resources { - gvr := schema.GroupVersionResource{ - Group: group, - Version: version, - Resource: resource, - } - resolved, err := c.RestMapper.KindsFor(gvr) - if err != nil { - if restMapperRefreshAttempted { - // RESTMapper refresh happens at most once per policy - continue - } - c.tryRefreshRESTMapper() - restMapperRefreshAttempted = true - resolved, err = c.RestMapper.KindsFor(gvr) - if err != nil { - continue - } - } - for _, r := range resolved { - if !r.Empty() { - gvks.Insert(r) - count++ - // early return if maximum number of types are already - // collected - if count == maxTypesToCheck { - if gvks.Len() == 0 { - return nil - } - return sortGVKList(gvks.UnsortedList()) - } - } - } - } - } - } - } - if gvks.Len() == 0 { - return nil - } - return sortGVKList(gvks.UnsortedList()) -} - -func extractGroups(rule *v1.Rule) []string { - groups := make([]string, 0, len(rule.APIGroups)) - for _, group := range rule.APIGroups { - // give up if wildcard - if strings.ContainsAny(group, "*") { - return nil - } - groups = append(groups, group) - } - return groups -} - -func extractVersions(rule *v1.Rule) []string { - versions := make([]string, 0, len(rule.APIVersions)) - for _, version := range rule.APIVersions { - if strings.ContainsAny(version, "*") { - return nil - } - versions = append(versions, version) - } - return versions -} - -func extractResources(rule *v1.Rule) []string { - resources := make([]string, 0, len(rule.Resources)) - for _, resource := range rule.Resources { - // skip wildcard and subresources - if strings.ContainsAny(resource, "*/") { - continue - } - resources = append(resources, resource) - } - return resources -} - -// sortGVKList sorts the list by Group, Version, and Kind -// returns the list itself. -func sortGVKList(list []schema.GroupVersionKind) []schema.GroupVersionKind { - sort.Slice(list, func(i, j int) bool { - if g := strings.Compare(list[i].Group, list[j].Group); g != 0 { - return g < 0 - } - if v := strings.Compare(list[i].Version, list[j].Version); v != 0 { - return v < 0 - } - return strings.Compare(list[i].Kind, list[j].Kind) < 0 - }) - return list -} - -// tryRefreshRESTMapper refreshes the RESTMapper if it supports refreshing. -func (c *TypeChecker) tryRefreshRESTMapper() { - if r, ok := c.RestMapper.(meta.ResettableRESTMapper); ok { - r.Reset() - } -} - -func buildEnvSet(hasParams bool, hasAuthorizer bool, types typeOverwrite) (*environment.EnvSet, error) { - baseEnv := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForVAP)) - requestType := plugincel.BuildRequestType() - namespaceType := plugincel.BuildNamespaceType() - - var varOpts []cel.EnvOption - var declTypes []*apiservercel.DeclType - - // namespace, hand-crafted type - declTypes = append(declTypes, namespaceType) - varOpts = append(varOpts, createVariableOpts(namespaceType, plugincel.NamespaceVarName)...) - - // request, hand-crafted type - declTypes = append(declTypes, requestType) - varOpts = append(varOpts, createVariableOpts(requestType, plugincel.RequestVarName)...) - - // object and oldObject, same type, type(s) resolved from constraints - declTypes = append(declTypes, types.object) - varOpts = append(varOpts, createVariableOpts(types.object, plugincel.ObjectVarName, plugincel.OldObjectVarName)...) - - // params, defined by ParamKind - if hasParams && types.params != nil { - declTypes = append(declTypes, types.params) - varOpts = append(varOpts, createVariableOpts(types.params, plugincel.ParamsVarName)...) - } - - // authorizer, implicitly available to all expressions of a policy - if hasAuthorizer { - // we only need its structure but not the variable itself - varOpts = append(varOpts, cel.Variable("authorizer", library.AuthorizerType)) - } - - return baseEnv.Extend( - environment.VersionedOptions{ - // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these - // options should always be present. - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: varOpts, - DeclTypes: declTypes, - }, - ) -} - -// createVariableOpts creates a slice of EnvOption -// that can be used for creating a CEL env containing variables of declType. -// declType can be nil, in which case the variables will be of DynType. -func createVariableOpts(declType *apiservercel.DeclType, variables ...string) []cel.EnvOption { - opts := make([]cel.EnvOption, 0, len(variables)) - t := cel.DynType - if declType != nil { - t = declType.CelType() - } - for _, v := range variables { - opts = append(opts, cel.Variable(v, t)) - } - return opts -} - -type typeCheckingCompiler struct { - compositionEnv *plugincel.CompositionEnv - typeOverwrite typeOverwrite -} - -// CompileCELExpression compiles the given expression. -// The implementation is the same as that of staging/src/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go -// except that: -// - object, oldObject, and params are typed instead of Dyn -// - compiler does not enforce the output type -// - the compiler does not initialize the program -func (c *typeCheckingCompiler) CompileCELExpression(expressionAccessor plugincel.ExpressionAccessor, options plugincel.OptionalVariableDeclarations, mode environment.Type) plugincel.CompilationResult { - resultError := func(errorString string, errType apiservercel.ErrorType) plugincel.CompilationResult { - return plugincel.CompilationResult{ - Error: &apiservercel.Error{ - Type: errType, - Detail: errorString, - }, - ExpressionAccessor: expressionAccessor, - } - } - env, err := c.compositionEnv.Env(mode) - if err != nil { - return resultError(fmt.Sprintf("fail to build env: %v", err), apiservercel.ErrorTypeInternal) - } - ast, issues := env.Compile(expressionAccessor.GetExpression()) - if issues != nil { - return resultError(issues.String(), apiservercel.ErrorTypeInvalid) - } - // type checker does not require the program, however the type must still be set. - return plugincel.CompilationResult{ - OutputType: ast.OutputType(), - } -} - -var _ plugincel.Compiler = (*typeCheckingCompiler)(nil) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go deleted file mode 100644 index da2ea1c10..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/policy/validating/validator.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validating - -import ( - "context" - "fmt" - "strings" - - celtypes "github.com/google/cel-go/common/types" - - v1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/cel" - "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" - celconfig "k8s.io/apiserver/pkg/apis/cel" - "k8s.io/apiserver/pkg/authorization/authorizer" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/klog/v2" -) - -// validator implements the Validator interface -type validator struct { - celMatcher matchconditions.Matcher - validationFilter cel.Filter - auditAnnotationFilter cel.Filter - messageFilter cel.Filter - failPolicy *v1.FailurePolicyType -} - -func NewValidator(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failPolicy *v1.FailurePolicyType) Validator { - return &validator{ - celMatcher: celMatcher, - validationFilter: validationFilter, - auditAnnotationFilter: auditAnnotationFilter, - messageFilter: messageFilter, - failPolicy: failPolicy, - } -} - -func policyDecisionActionForError(f v1.FailurePolicyType) PolicyDecisionAction { - if f == v1.Ignore { - return ActionAdmit - } - return ActionDeny -} - -func auditAnnotationEvaluationForError(f v1.FailurePolicyType) PolicyAuditAnnotationAction { - if f == v1.Ignore { - return AuditAnnotationActionExclude - } - return AuditAnnotationActionError -} - -// Validate takes a list of Evaluation and a failure policy and converts them into actionable PolicyDecisions -// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input. - -func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVersionResource, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, namespace *corev1.Namespace, runtimeCELCostBudget int64, authz authorizer.Authorizer) ValidateResult { - var f v1.FailurePolicyType - if v.failPolicy == nil { - f = v1.Fail - } else { - f = *v.failPolicy - } - if v.celMatcher != nil { - matchResults := v.celMatcher.Match(ctx, versionedAttr, versionedParams, authz) - if matchResults.Error != nil { - return ValidateResult{ - Decisions: []PolicyDecision{ - { - Action: policyDecisionActionForError(f), - Evaluation: EvalError, - Message: matchResults.Error.Error(), - }, - }, - } - } - - // if preconditions are not met, then do not return any validations - if !matchResults.Matches { - return ValidateResult{} - } - } - - optionalVars := cel.OptionalVariableBindings{VersionedParams: versionedParams, Authorizer: authz} - expressionOptionalVars := cel.OptionalVariableBindings{VersionedParams: versionedParams} - admissionRequest := cel.CreateAdmissionRequest(versionedAttr.Attributes, metav1.GroupVersionResource(matchedResource), metav1.GroupVersionKind(versionedAttr.VersionedKind)) - // Decide which fields are exposed - ns := cel.CreateNamespaceObject(namespace) - evalResults, remainingBudget, err := v.validationFilter.ForInput(ctx, versionedAttr, admissionRequest, optionalVars, ns, runtimeCELCostBudget) - if err != nil { - return ValidateResult{ - Decisions: []PolicyDecision{ - { - Action: policyDecisionActionForError(f), - Evaluation: EvalError, - Message: err.Error(), - }, - }, - } - } - decisions := make([]PolicyDecision, len(evalResults)) - messageResults, _, err := v.messageFilter.ForInput(ctx, versionedAttr, admissionRequest, expressionOptionalVars, ns, remainingBudget) - for i, evalResult := range evalResults { - var decision = &decisions[i] - // TODO: move this to generics - validation, ok := evalResult.ExpressionAccessor.(*ValidationCondition) - if !ok { - klog.Error("Invalid type conversion to ValidationCondition") - decision.Action = policyDecisionActionForError(f) - decision.Evaluation = EvalError - decision.Message = "Invalid type sent to validator, expected ValidationCondition" - continue - } - - var messageResult *cel.EvaluationResult - var messageError *apiservercel.Error - if len(messageResults) > i { - messageResult = &messageResults[i] - } - messageError, _ = err.(*apiservercel.Error) - if evalResult.Error != nil { - decision.Action = policyDecisionActionForError(f) - decision.Evaluation = EvalError - decision.Message = evalResult.Error.Error() - } else if messageError != nil && - (messageError.Type == apiservercel.ErrorTypeInternal || - (messageError.Type == apiservercel.ErrorTypeInvalid && - strings.HasPrefix(messageError.Detail, "validation failed due to running out of cost budget"))) { - decision.Action = policyDecisionActionForError(f) - decision.Evaluation = EvalError - decision.Message = fmt.Sprintf("failed messageExpression: %s", err) - } else if evalResult.EvalResult != celtypes.True { - decision.Action = ActionDeny - if validation.Reason == nil { - decision.Reason = metav1.StatusReasonInvalid - } else { - decision.Reason = *validation.Reason - } - // decide the failure message - var message string - // attempt to set message with messageExpression result - if messageResult != nil && messageResult.Error == nil && messageResult.EvalResult != nil { - // also fallback if the eval result is non-string (including null) or - // whitespaces. - if message, ok = messageResult.EvalResult.Value().(string); ok { - message = strings.TrimSpace(message) - // deny excessively long message from EvalResult - if len(message) > celconfig.MaxEvaluatedMessageExpressionSizeBytes { - klog.V(2).InfoS("excessively long message denied", "message", message) - message = "" - } - // deny message that contains newlines - if strings.ContainsAny(message, "\n") { - klog.V(2).InfoS("multi-line message denied", "message", message) - message = "" - } - } - } - if messageResult != nil && messageResult.Error != nil { - // log any error with messageExpression - klog.V(2).ErrorS(messageResult.Error, "error while evaluating messageExpression") - } - // fallback to set message to the custom message - if message == "" && len(validation.Message) > 0 { - message = strings.TrimSpace(validation.Message) - } - // fallback to use the expression to compose a message - if message == "" { - message = fmt.Sprintf("failed expression: %v", strings.TrimSpace(validation.Expression)) - } - decision.Message = message - } else { - decision.Action = ActionAdmit - decision.Evaluation = EvalAdmit - } - } - - options := cel.OptionalVariableBindings{VersionedParams: versionedParams} - auditAnnotationEvalResults, _, err := v.auditAnnotationFilter.ForInput(ctx, versionedAttr, admissionRequest, options, namespace, runtimeCELCostBudget) - if err != nil { - return ValidateResult{ - Decisions: []PolicyDecision{ - { - Action: policyDecisionActionForError(f), - Evaluation: EvalError, - Message: err.Error(), - }, - }, - } - } - - auditAnnotationResults := make([]PolicyAuditAnnotation, len(auditAnnotationEvalResults)) - for i, evalResult := range auditAnnotationEvalResults { - if evalResult.ExpressionAccessor == nil { - continue - } - var auditAnnotationResult = &auditAnnotationResults[i] - // TODO: move this to generics - validation, ok := evalResult.ExpressionAccessor.(*AuditAnnotationCondition) - if !ok { - klog.Error("Invalid type conversion to AuditAnnotationCondition") - auditAnnotationResult.Action = auditAnnotationEvaluationForError(f) - auditAnnotationResult.Error = fmt.Sprintf("Invalid type sent to validator, expected AuditAnnotationCondition but got %T", evalResult.ExpressionAccessor) - continue - } - auditAnnotationResult.Key = validation.Key - - if evalResult.Error != nil { - auditAnnotationResult.Action = auditAnnotationEvaluationForError(f) - auditAnnotationResult.Error = evalResult.Error.Error() - } else { - switch evalResult.EvalResult.Type() { - case celtypes.StringType: - value := strings.TrimSpace(evalResult.EvalResult.Value().(string)) - if len(value) == 0 { - auditAnnotationResult.Action = AuditAnnotationActionExclude - } else { - auditAnnotationResult.Action = AuditAnnotationActionPublish - auditAnnotationResult.Value = value - } - case celtypes.NullType: - auditAnnotationResult.Action = AuditAnnotationActionExclude - default: - auditAnnotationResult.Action = AuditAnnotationActionError - auditAnnotationResult.Error = fmt.Sprintf("valueExpression '%v' resulted in unsupported return type: %v. "+ - "Return type must be either string or null.", validation.ValueExpression, evalResult.EvalResult.Type()) - } - } - } - return ValidateResult{Decisions: decisions, AuditAnnotations: auditAnnotationResults} -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go deleted file mode 100644 index f23580cc0..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go +++ /dev/null @@ -1,388 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "sync" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apiserver/pkg/admission/plugin/cel" - "k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - webhookutil "k8s.io/apiserver/pkg/util/webhook" - "k8s.io/client-go/rest" -) - -// WebhookAccessor provides a common interface to both mutating and validating webhook types. -type WebhookAccessor interface { - // This accessor provides the methods needed to support matching against webhook - // predicates - namespace.NamespaceSelectorProvider - object.ObjectSelectorProvider - - // GetUID gets a string that uniquely identifies the webhook. - GetUID() string - - // GetConfigurationName gets the name of the webhook configuration that owns this webhook. - GetConfigurationName() string - - // GetRESTClient gets the webhook client - GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) - - // GetCompiledMatcher gets the compiled matcher object - GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher - - // GetName gets the webhook Name field. Note that the name is scoped to the webhook - // configuration and does not provide a globally unique identity, if a unique identity is - // needed, use GetUID. - GetName() string - // GetClientConfig gets the webhook ClientConfig field. - GetClientConfig() v1.WebhookClientConfig - // GetRules gets the webhook Rules field. - GetRules() []v1.RuleWithOperations - // GetFailurePolicy gets the webhook FailurePolicy field. - GetFailurePolicy() *v1.FailurePolicyType - // GetMatchPolicy gets the webhook MatchPolicy field. - GetMatchPolicy() *v1.MatchPolicyType - // GetNamespaceSelector gets the webhook NamespaceSelector field. - GetNamespaceSelector() *metav1.LabelSelector - // GetObjectSelector gets the webhook ObjectSelector field. - GetObjectSelector() *metav1.LabelSelector - // GetSideEffects gets the webhook SideEffects field. - GetSideEffects() *v1.SideEffectClass - // GetTimeoutSeconds gets the webhook TimeoutSeconds field. - GetTimeoutSeconds() *int32 - // GetAdmissionReviewVersions gets the webhook AdmissionReviewVersions field. - GetAdmissionReviewVersions() []string - - // GetMatchConditions gets the webhook match conditions field. - GetMatchConditions() []v1.MatchCondition - - // GetMutatingWebhook if the accessor contains a MutatingWebhook, returns it and true, else returns false. - GetMutatingWebhook() (*v1.MutatingWebhook, bool) - // GetValidatingWebhook if the accessor contains a ValidatingWebhook, returns it and true, else returns false. - GetValidatingWebhook() (*v1.ValidatingWebhook, bool) - - // GetType returns the type of the accessor (validate or admit) - GetType() string -} - -// NewMutatingWebhookAccessor creates an accessor for a MutatingWebhook. -func NewMutatingWebhookAccessor(uid, configurationName string, h *v1.MutatingWebhook) WebhookAccessor { - return &mutatingWebhookAccessor{uid: uid, configurationName: configurationName, MutatingWebhook: h} -} - -type mutatingWebhookAccessor struct { - *v1.MutatingWebhook - uid string - configurationName string - - initObjectSelector sync.Once - objectSelector labels.Selector - objectSelectorErr error - - initNamespaceSelector sync.Once - namespaceSelector labels.Selector - namespaceSelectorErr error - - initClient sync.Once - client *rest.RESTClient - clientErr error - - compileMatcher sync.Once - compiledMatcher matchconditions.Matcher -} - -func (m *mutatingWebhookAccessor) GetUID() string { - return m.uid -} - -func (m *mutatingWebhookAccessor) GetConfigurationName() string { - return m.configurationName -} - -func (m *mutatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { - m.initClient.Do(func() { - m.client, m.clientErr = clientManager.HookClient(hookClientConfigForWebhook(m)) - }) - return m.client, m.clientErr -} - -func (m *mutatingWebhookAccessor) GetType() string { - return "admit" -} - -func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher { - m.compileMatcher.Do(func() { - expressions := make([]cel.ExpressionAccessor, len(m.MutatingWebhook.MatchConditions)) - for i, matchCondition := range m.MutatingWebhook.MatchConditions { - expressions[i] = &matchconditions.MatchCondition{ - Name: matchCondition.Name, - Expression: matchCondition.Expression, - } - } - strictCost := false - if utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks) { - strictCost = true - } - m.compiledMatcher = matchconditions.NewMatcher(compiler.Compile( - expressions, - cel.OptionalVariableDeclarations{ - HasParams: false, - HasAuthorizer: true, - StrictCost: strictCost, - }, - environment.StoredExpressions, - ), m.FailurePolicy, "webhook", "admit", m.Name) - }) - return m.compiledMatcher -} - -func (m *mutatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { - m.initNamespaceSelector.Do(func() { - m.namespaceSelector, m.namespaceSelectorErr = metav1.LabelSelectorAsSelector(m.NamespaceSelector) - }) - return m.namespaceSelector, m.namespaceSelectorErr -} - -func (m *mutatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { - m.initObjectSelector.Do(func() { - m.objectSelector, m.objectSelectorErr = metav1.LabelSelectorAsSelector(m.ObjectSelector) - }) - return m.objectSelector, m.objectSelectorErr -} - -func (m *mutatingWebhookAccessor) GetName() string { - return m.Name -} - -func (m *mutatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { - return m.ClientConfig -} - -func (m *mutatingWebhookAccessor) GetRules() []v1.RuleWithOperations { - return m.Rules -} - -func (m *mutatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { - return m.FailurePolicy -} - -func (m *mutatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { - return m.MatchPolicy -} - -func (m *mutatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { - return m.NamespaceSelector -} - -func (m *mutatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { - return m.ObjectSelector -} - -func (m *mutatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { - return m.SideEffects -} - -func (m *mutatingWebhookAccessor) GetTimeoutSeconds() *int32 { - return m.TimeoutSeconds -} - -func (m *mutatingWebhookAccessor) GetAdmissionReviewVersions() []string { - return m.AdmissionReviewVersions -} - -func (m *mutatingWebhookAccessor) GetMatchConditions() []v1.MatchCondition { - return m.MatchConditions -} - -func (m *mutatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { - return m.MutatingWebhook, true -} - -func (m *mutatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { - return nil, false -} - -// NewValidatingWebhookAccessor creates an accessor for a ValidatingWebhook. -func NewValidatingWebhookAccessor(uid, configurationName string, h *v1.ValidatingWebhook) WebhookAccessor { - return &validatingWebhookAccessor{uid: uid, configurationName: configurationName, ValidatingWebhook: h} -} - -type validatingWebhookAccessor struct { - *v1.ValidatingWebhook - uid string - configurationName string - - initObjectSelector sync.Once - objectSelector labels.Selector - objectSelectorErr error - - initNamespaceSelector sync.Once - namespaceSelector labels.Selector - namespaceSelectorErr error - - initClient sync.Once - client *rest.RESTClient - clientErr error - - compileMatcher sync.Once - compiledMatcher matchconditions.Matcher -} - -func (v *validatingWebhookAccessor) GetUID() string { - return v.uid -} - -func (v *validatingWebhookAccessor) GetConfigurationName() string { - return v.configurationName -} - -func (v *validatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error) { - v.initClient.Do(func() { - v.client, v.clientErr = clientManager.HookClient(hookClientConfigForWebhook(v)) - }) - return v.client, v.clientErr -} - -func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher { - v.compileMatcher.Do(func() { - expressions := make([]cel.ExpressionAccessor, len(v.ValidatingWebhook.MatchConditions)) - for i, matchCondition := range v.ValidatingWebhook.MatchConditions { - expressions[i] = &matchconditions.MatchCondition{ - Name: matchCondition.Name, - Expression: matchCondition.Expression, - } - } - strictCost := false - if utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks) { - strictCost = true - } - v.compiledMatcher = matchconditions.NewMatcher(compiler.Compile( - expressions, - cel.OptionalVariableDeclarations{ - HasParams: false, - HasAuthorizer: true, - StrictCost: strictCost, - }, - environment.StoredExpressions, - ), v.FailurePolicy, "webhook", "validating", v.Name) - }) - return v.compiledMatcher -} - -func (v *validatingWebhookAccessor) GetParsedNamespaceSelector() (labels.Selector, error) { - v.initNamespaceSelector.Do(func() { - v.namespaceSelector, v.namespaceSelectorErr = metav1.LabelSelectorAsSelector(v.NamespaceSelector) - }) - return v.namespaceSelector, v.namespaceSelectorErr -} - -func (v *validatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector, error) { - v.initObjectSelector.Do(func() { - v.objectSelector, v.objectSelectorErr = metav1.LabelSelectorAsSelector(v.ObjectSelector) - }) - return v.objectSelector, v.objectSelectorErr -} - -func (m *validatingWebhookAccessor) GetType() string { - return "validate" -} - -func (v *validatingWebhookAccessor) GetName() string { - return v.Name -} - -func (v *validatingWebhookAccessor) GetClientConfig() v1.WebhookClientConfig { - return v.ClientConfig -} - -func (v *validatingWebhookAccessor) GetRules() []v1.RuleWithOperations { - return v.Rules -} - -func (v *validatingWebhookAccessor) GetFailurePolicy() *v1.FailurePolicyType { - return v.FailurePolicy -} - -func (v *validatingWebhookAccessor) GetMatchPolicy() *v1.MatchPolicyType { - return v.MatchPolicy -} - -func (v *validatingWebhookAccessor) GetNamespaceSelector() *metav1.LabelSelector { - return v.NamespaceSelector -} - -func (v *validatingWebhookAccessor) GetObjectSelector() *metav1.LabelSelector { - return v.ObjectSelector -} - -func (v *validatingWebhookAccessor) GetSideEffects() *v1.SideEffectClass { - return v.SideEffects -} - -func (v *validatingWebhookAccessor) GetTimeoutSeconds() *int32 { - return v.TimeoutSeconds -} - -func (v *validatingWebhookAccessor) GetAdmissionReviewVersions() []string { - return v.AdmissionReviewVersions -} - -func (v *validatingWebhookAccessor) GetMatchConditions() []v1.MatchCondition { - return v.MatchConditions -} - -func (v *validatingWebhookAccessor) GetMutatingWebhook() (*v1.MutatingWebhook, bool) { - return nil, false -} - -func (v *validatingWebhookAccessor) GetValidatingWebhook() (*v1.ValidatingWebhook, bool) { - return v.ValidatingWebhook, true -} - -// hookClientConfigForWebhook construct a webhookutil.ClientConfig using a WebhookAccessor to access -// v1beta1.MutatingWebhook and v1beta1.ValidatingWebhook API objects. webhookutil.ClientConfig is used -// to create a HookClient and the purpose of the config struct is to share that with other packages -// that need to create a HookClient. -func hookClientConfigForWebhook(w WebhookAccessor) webhookutil.ClientConfig { - ret := webhookutil.ClientConfig{Name: w.GetName(), CABundle: w.GetClientConfig().CABundle} - if w.GetClientConfig().URL != nil { - ret.URL = *w.GetClientConfig().URL - } - if w.GetClientConfig().Service != nil { - ret.Service = &webhookutil.ClientConfigService{ - Name: w.GetClientConfig().Service.Name, - Namespace: w.GetClientConfig().Service.Namespace, - } - if w.GetClientConfig().Service.Port != nil { - ret.Service.Port = *w.GetClientConfig().Service.Port - } else { - ret.Service.Port = 443 - } - if w.GetClientConfig().Service.Path != nil { - ret.Service.Path = *w.GetClientConfig().Service.Path - } - } - return ret -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go deleted file mode 100644 index 087162e36..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -package webhookadmission // import "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go deleted file mode 100644 index 2f49b8976..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhookadmission - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "apiserver.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &WebhookAdmission{}, - ) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), - &WebhookAdmission{}, - ) - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go deleted file mode 100644 index 71ce47b1f..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhookadmission - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// WebhookAdmission provides configuration for the webhook admission controller. -type WebhookAdmission struct { - metav1.TypeMeta - - // KubeConfigFile is the path to the kubeconfig file. - KubeConfigFile string -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go deleted file mode 100644 index 58875a59f..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission -// +k8s:defaulter-gen=TypeMeta -// +groupName=apiserver.config.k8s.io - -// Package v1 is the v1 version of the API. -package v1 // import "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go deleted file mode 100644 index 4a9c0a689..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "apiserver.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("WebhookAdmissionConfiguration"), - &WebhookAdmission{}, - ) - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go deleted file mode 100644 index 632427d7d..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/types.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// WebhookAdmission provides configuration for the webhook admission controller. -type WebhookAdmission struct { - metav1.TypeMeta `json:",inline"` - - // KubeConfigFile is the path to the kubeconfig file. - KubeConfigFile string `json:"kubeConfigFile"` -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go deleted file mode 100644 index 4cf69291b..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.conversion.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { - out.KubeConfigFile = in.KubeConfigFile - return nil -} - -// Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. -func Convert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { - return autoConvert_v1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) -} - -func autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { - out.KubeConfigFile = in.KubeConfigFile - return nil -} - -// Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission is an autogenerated conversion function. -func Convert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { - return autoConvert_webhookadmission_WebhookAdmission_To_v1_WebhookAdmission(in, out, s) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go deleted file mode 100644 index 839c1fc7a..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. -func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { - if in == nil { - return nil - } - out := new(WebhookAdmission) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WebhookAdmission) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go deleted file mode 100644 index dac177e93..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1/zz_generated.defaults.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go deleted file mode 100644 index c8ee58d5d..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission -// +k8s:defaulter-gen=TypeMeta -// +groupName=apiserver.config.k8s.io - -// Package v1alpha1 is the v1alpha1 version of the API. -package v1alpha1 // import "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go deleted file mode 100644 index 56489f780..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "apiserver.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &WebhookAdmission{}, - ) - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go deleted file mode 100644 index a49a6a813..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// WebhookAdmission provides configuration for the webhook admission controller. -type WebhookAdmission struct { - metav1.TypeMeta `json:",inline"` - - // KubeConfigFile is the path to the kubeconfig file. - KubeConfigFile string `json:"kubeConfigFile"` -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index 66aaecbd8..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - webhookadmission "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*WebhookAdmission)(nil), (*webhookadmission.WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(a.(*WebhookAdmission), b.(*webhookadmission.WebhookAdmission), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*webhookadmission.WebhookAdmission)(nil), (*WebhookAdmission)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(a.(*webhookadmission.WebhookAdmission), b.(*WebhookAdmission), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { - out.KubeConfigFile = in.KubeConfigFile - return nil -} - -// Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission is an autogenerated conversion function. -func Convert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in *WebhookAdmission, out *webhookadmission.WebhookAdmission, s conversion.Scope) error { - return autoConvert_v1alpha1_WebhookAdmission_To_webhookadmission_WebhookAdmission(in, out, s) -} - -func autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { - out.KubeConfigFile = in.KubeConfigFile - return nil -} - -// Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission is an autogenerated conversion function. -func Convert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in *webhookadmission.WebhookAdmission, out *WebhookAdmission, s conversion.Scope) error { - return autoConvert_webhookadmission_WebhookAdmission_To_v1alpha1_WebhookAdmission(in, out, s) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index f997f4aba..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. -func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { - if in == nil { - return nil - } - out := new(WebhookAdmission) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WebhookAdmission) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go deleted file mode 100644 index 5070cb91b..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go deleted file mode 100644 index 27c3ede0d..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package webhookadmission - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookAdmission) DeepCopyInto(out *WebhookAdmission) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookAdmission. -func (in *WebhookAdmission) DeepCopy() *WebhookAdmission { - if in == nil { - return nil - } - out := new(WebhookAdmission) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WebhookAdmission) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go deleted file mode 100644 index 7b845f1d1..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "io" - "path" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission" - "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1" - "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1" -) - -var ( - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - utilruntime.Must(webhookadmission.AddToScheme(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) - utilruntime.Must(v1alpha1.AddToScheme(scheme)) -} - -// LoadConfig extract the KubeConfigFile from configFile -func LoadConfig(configFile io.Reader) (string, error) { - var kubeconfigFile string - if configFile != nil { - // we have a config so parse it. - data, err := io.ReadAll(configFile) - if err != nil { - return "", err - } - decoder := codecs.UniversalDecoder() - decodedObj, err := runtime.Decode(decoder, data) - if err != nil { - return "", err - } - config, ok := decodedObj.(*webhookadmission.WebhookAdmission) - if !ok { - return "", fmt.Errorf("unexpected type: %T", decodedObj) - } - - if !path.IsAbs(config.KubeConfigFile) { - return "", field.Invalid(field.NewPath("kubeConfigFile"), config.KubeConfigFile, "must be an absolute file path") - } - - kubeconfigFile = config.KubeConfigFile - } - return kubeconfigFile, nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go deleted file mode 100644 index af33a09f4..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/interfaces.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/admission/plugin/webhook" -) - -type VersionedAttributeAccessor interface { - VersionedAttribute(gvk schema.GroupVersionKind) (*admission.VersionedAttributes, error) -} - -// Source can list dynamic webhook plugins. -type Source interface { - Webhooks() []webhook.WebhookAccessor - HasSynced() bool -} - -// WebhookInvocation describes how to call a webhook, including the resource and subresource the webhook registered for, -// and the kind that should be sent to the webhook. -type WebhookInvocation struct { - Webhook webhook.WebhookAccessor - Resource schema.GroupVersionResource - Subresource string - Kind schema.GroupVersionKind -} - -// Dispatcher dispatches webhook call to a list of webhooks with admission attributes as argument. -type Dispatcher interface { - // Dispatch a request to the webhooks. Dispatcher may choose not to - // call a hook, either because the rules of the hook does not match, or - // the namespaceSelector or the objectSelector of the hook does not - // match. A non-nil error means the request is rejected. - Dispatch(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces, hooks []webhook.WebhookAccessor) error -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go deleted file mode 100644 index f067b3f72..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "context" - "fmt" - "io" - - "k8s.io/klog/v2" - - admissionv1 "k8s.io/api/admission/v1" - admissionv1beta1 "k8s.io/api/admission/v1beta1" - v1 "k8s.io/api/admissionregistration/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer" - admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" - "k8s.io/apiserver/pkg/admission/plugin/cel" - "k8s.io/apiserver/pkg/admission/plugin/webhook" - "k8s.io/apiserver/pkg/admission/plugin/webhook/config" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" - "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - webhookutil "k8s.io/apiserver/pkg/util/webhook" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" -) - -// Webhook is an abstract admission plugin with all the infrastructure to define Admit or Validate on-top. -type Webhook struct { - *admission.Handler - - sourceFactory sourceFactory - - hookSource Source - clientManager *webhookutil.ClientManager - namespaceMatcher *namespace.Matcher - objectMatcher *object.Matcher - dispatcher Dispatcher - filterCompiler cel.FilterCompiler - authorizer authorizer.Authorizer -} - -var ( - _ genericadmissioninit.WantsExternalKubeClientSet = &Webhook{} - _ admission.Interface = &Webhook{} -) - -type sourceFactory func(f informers.SharedInformerFactory) Source -type dispatcherFactory func(cm *webhookutil.ClientManager) Dispatcher - -// NewWebhook creates a new generic admission webhook. -func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory sourceFactory, dispatcherFactory dispatcherFactory) (*Webhook, error) { - kubeconfigFile, err := config.LoadConfig(configFile) - if err != nil { - return nil, err - } - - cm, err := webhookutil.NewClientManager( - []schema.GroupVersion{ - admissionv1beta1.SchemeGroupVersion, - admissionv1.SchemeGroupVersion, - }, - admissionv1beta1.AddToScheme, - admissionv1.AddToScheme, - ) - if err != nil { - return nil, err - } - authInfoResolver, err := webhookutil.NewDefaultAuthenticationInfoResolver(kubeconfigFile) - if err != nil { - return nil, err - } - // Set defaults which may be overridden later. - cm.SetAuthenticationInfoResolver(authInfoResolver) - cm.SetServiceResolver(webhookutil.NewDefaultServiceResolver()) - - return &Webhook{ - Handler: handler, - sourceFactory: sourceFactory, - clientManager: &cm, - namespaceMatcher: &namespace.Matcher{}, - objectMatcher: &object.Matcher{}, - dispatcher: dispatcherFactory(&cm), - filterCompiler: cel.NewFilterCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), utilfeature.DefaultFeatureGate.Enabled(features.StrictCostEnforcementForWebhooks))), - }, nil -} - -// SetAuthenticationInfoResolverWrapper sets the -// AuthenticationInfoResolverWrapper. -// TODO find a better way wire this, but keep this pull small for now. -func (a *Webhook) SetAuthenticationInfoResolverWrapper(wrapper webhookutil.AuthenticationInfoResolverWrapper) { - a.clientManager.SetAuthenticationInfoResolverWrapper(wrapper) -} - -// SetServiceResolver sets a service resolver for the webhook admission plugin. -// Passing a nil resolver does not have an effect, instead a default one will be used. -func (a *Webhook) SetServiceResolver(sr webhookutil.ServiceResolver) { - a.clientManager.SetServiceResolver(sr) -} - -// SetExternalKubeClientSet implements the WantsExternalKubeInformerFactory interface. -// It sets external ClientSet for admission plugins that need it -func (a *Webhook) SetExternalKubeClientSet(client clientset.Interface) { - a.namespaceMatcher.Client = client -} - -// SetExternalKubeInformerFactory implements the WantsExternalKubeInformerFactory interface. -func (a *Webhook) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { - namespaceInformer := f.Core().V1().Namespaces() - a.namespaceMatcher.NamespaceLister = namespaceInformer.Lister() - a.hookSource = a.sourceFactory(f) - a.SetReadyFunc(func() bool { - return namespaceInformer.Informer().HasSynced() && a.hookSource.HasSynced() - }) -} - -func (a *Webhook) SetAuthorizer(authorizer authorizer.Authorizer) { - a.authorizer = authorizer -} - -// ValidateInitialization implements the InitializationValidator interface. -func (a *Webhook) ValidateInitialization() error { - if a.hookSource == nil { - return fmt.Errorf("kubernetes client is not properly setup") - } - if err := a.namespaceMatcher.Validate(); err != nil { - return fmt.Errorf("namespaceMatcher is not properly setup: %v", err) - } - if err := a.clientManager.Validate(); err != nil { - return fmt.Errorf("clientManager is not properly setup: %v", err) - } - return nil -} - -// ShouldCallHook returns invocation details if the webhook should be called, nil if the webhook should not be called, -// or an error if an error was encountered during evaluation. -func (a *Webhook) ShouldCallHook(ctx context.Context, h webhook.WebhookAccessor, attr admission.Attributes, o admission.ObjectInterfaces, v VersionedAttributeAccessor) (*WebhookInvocation, *apierrors.StatusError) { - matches, matchNsErr := a.namespaceMatcher.MatchNamespaceSelector(h, attr) - // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. - if !matches && matchNsErr == nil { - return nil, nil - } - - // Should not return an error here for webhooks which do not apply to the request, even if err is an unexpected scenario. - matches, matchObjErr := a.objectMatcher.MatchObjectSelector(h, attr) - if !matches && matchObjErr == nil { - return nil, nil - } - - var invocation *WebhookInvocation - for _, r := range h.GetRules() { - m := rules.Matcher{Rule: r, Attr: attr} - if m.Matches() { - invocation = &WebhookInvocation{ - Webhook: h, - Resource: attr.GetResource(), - Subresource: attr.GetSubresource(), - Kind: attr.GetKind(), - } - break - } - } - if invocation == nil && h.GetMatchPolicy() != nil && *h.GetMatchPolicy() == v1.Equivalent { - attrWithOverride := &attrWithResourceOverride{Attributes: attr} - equivalents := o.GetEquivalentResourceMapper().EquivalentResourcesFor(attr.GetResource(), attr.GetSubresource()) - // honor earlier rules first - OuterLoop: - for _, r := range h.GetRules() { - // see if the rule matches any of the equivalent resources - for _, equivalent := range equivalents { - if equivalent == attr.GetResource() { - // exclude attr.GetResource(), which we already checked - continue - } - attrWithOverride.resource = equivalent - m := rules.Matcher{Rule: r, Attr: attrWithOverride} - if m.Matches() { - kind := o.GetEquivalentResourceMapper().KindFor(equivalent, attr.GetSubresource()) - if kind.Empty() { - return nil, apierrors.NewInternalError(fmt.Errorf("unable to convert to %v: unknown kind", equivalent)) - } - invocation = &WebhookInvocation{ - Webhook: h, - Resource: equivalent, - Subresource: attr.GetSubresource(), - Kind: kind, - } - break OuterLoop - } - } - } - } - - if invocation == nil { - return nil, nil - } - if matchNsErr != nil { - return nil, matchNsErr - } - if matchObjErr != nil { - return nil, matchObjErr - } - matchConditions := h.GetMatchConditions() - if len(matchConditions) > 0 { - versionedAttr, err := v.VersionedAttribute(invocation.Kind) - if err != nil { - return nil, apierrors.NewInternalError(err) - } - - matcher := h.GetCompiledMatcher(a.filterCompiler) - matchResult := matcher.Match(ctx, versionedAttr, nil, a.authorizer) - - if matchResult.Error != nil { - klog.Warningf("Failed evaluating match conditions, failing closed %v: %v", h.GetName(), matchResult.Error) - return nil, apierrors.NewForbidden(attr.GetResource().GroupResource(), attr.GetName(), matchResult.Error) - } else if !matchResult.Matches { - admissionmetrics.Metrics.ObserveMatchConditionExclusion(ctx, h.GetName(), "webhook", h.GetType(), string(attr.GetOperation())) - // if no match, always skip webhook - return nil, nil - } - } - - return invocation, nil -} - -type attrWithResourceOverride struct { - admission.Attributes - resource schema.GroupVersionResource -} - -func (a *attrWithResourceOverride) GetResource() schema.GroupVersionResource { return a.resource } - -// Dispatch is called by the downstream Validate or Admit methods. -func (a *Webhook) Dispatch(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { - if rules.IsExemptAdmissionConfigurationResource(attr) { - return nil - } - if !a.WaitForReady() { - return admission.NewForbidden(attr, fmt.Errorf("not yet ready to handle request")) - } - hooks := a.hookSource.Webhooks() - return a.dispatcher.Dispatch(ctx, attr, o, hooks) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go deleted file mode 100644 index 094a019d1..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package matchconditions - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/apiserver/pkg/authorization/authorizer" -) - -type MatchResult struct { - Matches bool - Error error - FailedConditionName string -} - -// Matcher contains logic for converting Evaluations to bool of matches or does not match -type Matcher interface { - // Match is used to take cel evaluations and convert into decisions - Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, authz authorizer.Authorizer) MatchResult -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go deleted file mode 100644 index 21dd28f6c..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package matchconditions - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/google/cel-go/cel" - celtypes "github.com/google/cel-go/common/types" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apiserver/pkg/admission" - admissionmetrics "k8s.io/apiserver/pkg/admission/metrics" - celplugin "k8s.io/apiserver/pkg/admission/plugin/cel" - celconfig "k8s.io/apiserver/pkg/apis/cel" - "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/klog/v2" -) - -var _ celplugin.ExpressionAccessor = &MatchCondition{} - -// MatchCondition contains the inputs needed to compile, evaluate and match a cel expression -type MatchCondition v1.MatchCondition - -func (v *MatchCondition) GetExpression() string { - return v.Expression -} - -func (v *MatchCondition) ReturnTypes() []*cel.Type { - return []*cel.Type{cel.BoolType} -} - -var _ Matcher = &matcher{} - -// matcher evaluates compiled cel expressions and determines if they match the given request or not -type matcher struct { - filter celplugin.Filter - failPolicy v1.FailurePolicyType - matcherType string - matcherKind string - objectName string -} - -func NewMatcher(filter celplugin.Filter, failPolicy *v1.FailurePolicyType, matcherKind, matcherType, objectName string) Matcher { - var f v1.FailurePolicyType - if failPolicy == nil { - f = v1.Fail - } else { - f = *failPolicy - } - return &matcher{ - filter: filter, - failPolicy: f, - matcherKind: matcherKind, - matcherType: matcherType, - objectName: objectName, - } -} - -func (m *matcher) Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, authz authorizer.Authorizer) MatchResult { - t := time.Now() - evalResults, _, err := m.filter.ForInput(ctx, versionedAttr, celplugin.CreateAdmissionRequest(versionedAttr.Attributes, metav1.GroupVersionResource(versionedAttr.GetResource()), metav1.GroupVersionKind(versionedAttr.VersionedKind)), celplugin.OptionalVariableBindings{ - VersionedParams: versionedParams, - Authorizer: authz, - }, nil, celconfig.RuntimeCELCostBudgetMatchConditions) - - if err != nil { - admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation())) - // filter returning error is unexpected and not an evaluation error so not incrementing metric here - if m.failPolicy == v1.Fail { - return MatchResult{ - Error: err, - } - } else if m.failPolicy == v1.Ignore { - return MatchResult{ - Matches: false, - } - } - //TODO: add default so that if in future we add different failure types it doesn't fall through - } - - errorList := []error{} - for _, evalResult := range evalResults { - matchCondition, ok := evalResult.ExpressionAccessor.(*MatchCondition) - if !ok { - // This shouldnt happen, but if it does treat same as eval error - klog.Error("Invalid type conversion to MatchCondition") - errorList = append(errorList, errors.New(fmt.Sprintf("internal error converting ExpressionAccessor to MatchCondition"))) - continue - } - if evalResult.Error != nil { - errorList = append(errorList, evalResult.Error) - admissionmetrics.Metrics.ObserveMatchConditionEvalError(ctx, m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation())) - } - if evalResult.EvalResult == celtypes.False { - admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation())) - // If any condition false, skip calling webhook always - return MatchResult{ - Matches: false, - FailedConditionName: matchCondition.Name, - } - } - } - if len(errorList) > 0 { - admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation())) - // If mix of true and eval errors then resort to fail policy - if m.failPolicy == v1.Fail { - // mix of true and errors with fail policy fail should fail request without calling webhook - err = utilerrors.NewAggregate(errorList) - return MatchResult{ - Error: err, - } - } else if m.failPolicy == v1.Ignore { - // if fail policy ignore then skip call to webhook - return MatchResult{ - Matches: false, - } - } - } - // if no results eval to false, return matches true with list of any errors encountered - return MatchResult{ - Matches: true, - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/doc.go deleted file mode 100644 index 660001dff..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package namespace defines the utilities that are used by the webhook -// plugin to decide if a webhook should be applied to an object based on its -// namespace. -package namespace // import "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go deleted file mode 100644 index 6427bc674..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "fmt" - - v1 "k8s.io/api/core/v1" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apiserver/pkg/admission" - clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" -) - -type NamespaceSelectorProvider interface { - // GetNamespaceSelector gets the webhook NamespaceSelector field. - GetParsedNamespaceSelector() (labels.Selector, error) -} - -// Matcher decides if a request is exempted by the NamespaceSelector of a -// webhook configuration. -type Matcher struct { - NamespaceLister corelisters.NamespaceLister - Client clientset.Interface -} - -func (m *Matcher) GetNamespace(name string) (*v1.Namespace, error) { - return m.NamespaceLister.Get(name) -} - -// Validate checks if the Matcher has a NamespaceLister and Client. -func (m *Matcher) Validate() error { - var errs []error - if m.NamespaceLister == nil { - errs = append(errs, fmt.Errorf("the namespace matcher requires a namespaceLister")) - } - if m.Client == nil { - errs = append(errs, fmt.Errorf("the namespace matcher requires a client")) - } - return utilerrors.NewAggregate(errs) -} - -// GetNamespaceLabels gets the labels of the namespace related to the attr. -func (m *Matcher) GetNamespaceLabels(attr admission.Attributes) (map[string]string, error) { - // If the request itself is creating or updating a namespace, then get the - // labels from attr.Object, because namespaceLister doesn't have the latest - // namespace yet. - // - // However, if the request is deleting a namespace, then get the label from - // the namespace in the namespaceLister, because a delete request is not - // going to change the object, and attr.Object will be a DeleteOptions - // rather than a namespace object. - if attr.GetResource().Resource == "namespaces" && - len(attr.GetSubresource()) == 0 && - (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { - accessor, err := meta.Accessor(attr.GetObject()) - if err != nil { - return nil, err - } - return accessor.GetLabels(), nil - } - - namespaceName := attr.GetNamespace() - namespace, err := m.NamespaceLister.Get(namespaceName) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - if apierrors.IsNotFound(err) { - // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not - namespace, err = m.Client.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - } - return namespace.Labels, nil -} - -// MatchNamespaceSelector decideds whether the request matches the -// namespaceSelctor of the webhook. Only when they match, the webhook is called. -func (m *Matcher) MatchNamespaceSelector(p NamespaceSelectorProvider, attr admission.Attributes) (bool, *apierrors.StatusError) { - namespaceName := attr.GetNamespace() - if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { - // If the request is about a cluster scoped resource, and it is not a - // namespace, it is never exempted. - // TODO: figure out a way selective exempt cluster scoped resources. - // Also update the comment in types.go - return true, nil - } - selector, err := p.GetParsedNamespaceSelector() - if err != nil { - return false, apierrors.NewInternalError(err) - } - if selector.Empty() { - return true, nil - } - - namespaceLabels, err := m.GetNamespaceLabels(attr) - // this means the namespace is not found, for backwards compatibility, - // return a 404 - if apierrors.IsNotFound(err) { - status, ok := err.(apierrors.APIStatus) - if !ok { - return false, apierrors.NewInternalError(err) - } - return false, &apierrors.StatusError{ErrStatus: status.Status()} - } - if err != nil { - return false, apierrors.NewInternalError(err) - } - return selector.Matches(labels.Set(namespaceLabels)), nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/doc.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/doc.go deleted file mode 100644 index 8964afa6c..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package object defines the utilities that are used by the webhook plugin to -// decide if a webhook should run, as long as either the old object or the new -// object has labels matching the webhook config's objectSelector. -package object // import "k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/matcher.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/matcher.go deleted file mode 100644 index aadf90732..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object/matcher.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/admission" - "k8s.io/klog/v2" -) - -type ObjectSelectorProvider interface { - // GetObjectSelector gets the webhook ObjectSelector field. - GetParsedObjectSelector() (labels.Selector, error) -} - -// Matcher decides if a request selected by the ObjectSelector. -type Matcher struct { -} - -func matchObject(obj runtime.Object, selector labels.Selector) bool { - if obj == nil { - return false - } - accessor, err := meta.Accessor(obj) - if err != nil { - klog.V(5).InfoS("Accessing metadata failed", "object", obj, "err", err) - return false - } - return selector.Matches(labels.Set(accessor.GetLabels())) - -} - -// MatchObjectSelector decideds whether the request matches the ObjectSelector -// of the webhook. Only when they match, the webhook is called. -func (m *Matcher) MatchObjectSelector(p ObjectSelectorProvider, attr admission.Attributes) (bool, *apierrors.StatusError) { - selector, err := p.GetParsedObjectSelector() - if err != nil { - return false, apierrors.NewInternalError(err) - } - if selector.Empty() { - return true, nil - } - return matchObject(attr.GetObject(), selector) || matchObject(attr.GetOldObject(), selector), nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go deleted file mode 100644 index b926f65dc..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules/rules.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rules - -import ( - "strings" - - "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" -) - -// Matcher determines if the Attr matches the Rule. -type Matcher struct { - Rule v1.RuleWithOperations - Attr admission.Attributes -} - -// Matches returns if the Attr matches the Rule. -func (r *Matcher) Matches() bool { - return r.scope() && - r.operation() && - r.group() && - r.version() && - r.resource() -} - -func exactOrWildcard(items []string, requested string) bool { - for _, item := range items { - if item == "*" { - return true - } - if item == requested { - return true - } - } - - return false -} - -var namespaceResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - -func (r *Matcher) scope() bool { - if r.Rule.Scope == nil || *r.Rule.Scope == v1.AllScopes { - return true - } - // attr.GetNamespace() is set to the name of the namespace for requests of the namespace object itself. - switch *r.Rule.Scope { - case v1.NamespacedScope: - // first make sure that we are not requesting a namespace object (namespace objects are cluster-scoped) - return r.Attr.GetResource() != namespaceResource && r.Attr.GetNamespace() != metav1.NamespaceNone - case v1.ClusterScope: - // also return true if the request is for a namespace object (namespace objects are cluster-scoped) - return r.Attr.GetResource() == namespaceResource || r.Attr.GetNamespace() == metav1.NamespaceNone - default: - return false - } -} - -func (r *Matcher) group() bool { - return exactOrWildcard(r.Rule.APIGroups, r.Attr.GetResource().Group) -} - -func (r *Matcher) version() bool { - return exactOrWildcard(r.Rule.APIVersions, r.Attr.GetResource().Version) -} - -func (r *Matcher) operation() bool { - attrOp := r.Attr.GetOperation() - for _, op := range r.Rule.Operations { - if op == v1.OperationAll { - return true - } - // The constants are the same such that this is a valid cast (and this - // is tested). - if op == v1.OperationType(attrOp) { - return true - } - } - return false -} - -func splitResource(resSub string) (res, sub string) { - parts := strings.SplitN(resSub, "/", 2) - if len(parts) == 2 { - return parts[0], parts[1] - } - return parts[0], "" -} - -func (r *Matcher) resource() bool { - opRes, opSub := r.Attr.GetResource().Resource, r.Attr.GetSubresource() - for _, res := range r.Rule.Resources { - res, sub := splitResource(res) - resMatch := res == "*" || res == opRes - subMatch := sub == "*" || sub == opSub - if resMatch && subMatch { - return true - } - } - return false -} - -// IsExemptAdmissionConfigurationResource determines if an admission.Attributes object is describing -// the admission of a ValidatingWebhookConfiguration or a MutatingWebhookConfiguration or a ValidatingAdmissionPolicy or a ValidatingAdmissionPolicyBinding -func IsExemptAdmissionConfigurationResource(attr admission.Attributes) bool { - gvk := attr.GetKind() - if gvk.Group == "admissionregistration.k8s.io" { - if gvk.Kind == "ValidatingWebhookConfiguration" || gvk.Kind == "MutatingWebhookConfiguration" || gvk.Kind == "ValidatingAdmissionPolicy" || gvk.Kind == "ValidatingAdmissionPolicyBinding" { - return true - } - } - return false -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/plugins.go deleted file mode 100644 index 10a435d49..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" - - "k8s.io/klog/v2" -) - -// Factory is a function that returns an Interface for admission decisions. -// The config parameter provides an io.Reader handler to the factory in -// order to load specific configurations. If no configuration is provided -// the parameter is nil. -type Factory func(config io.Reader) (Interface, error) - -type Plugins struct { - lock sync.Mutex - registry map[string]Factory -} - -func NewPlugins() *Plugins { - return &Plugins{} -} - -// All registered admission options. -var ( - // PluginEnabledFn checks whether a plugin is enabled. By default, if you ask about it, it's enabled. - PluginEnabledFn = func(name string, config io.Reader) bool { - return true - } -) - -// PluginEnabledFunc is a function type that can provide an external check on whether an admission plugin may be enabled -type PluginEnabledFunc func(name string, config io.Reader) bool - -// Registered enumerates the names of all registered plugins. -func (ps *Plugins) Registered() []string { - ps.lock.Lock() - defer ps.lock.Unlock() - keys := []string{} - for k := range ps.registry { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// Register registers a plugin Factory by name. This -// is expected to happen during app startup. -func (ps *Plugins) Register(name string, plugin Factory) { - ps.lock.Lock() - defer ps.lock.Unlock() - if ps.registry != nil { - _, found := ps.registry[name] - if found { - klog.Fatalf("Admission plugin %q was registered twice", name) - } - } else { - ps.registry = map[string]Factory{} - } - - klog.V(1).InfoS("Registered admission plugin", "plugin", name) - ps.registry[name] = plugin -} - -// getPlugin creates an instance of the named plugin. It returns `false` if -// the name is not known. The error is returned only when the named provider was -// known but failed to initialize. The config parameter specifies the io.Reader -// handler of the configuration file for the cloud provider, or nil for no configuration. -func (ps *Plugins) getPlugin(name string, config io.Reader) (Interface, bool, error) { - ps.lock.Lock() - defer ps.lock.Unlock() - f, found := ps.registry[name] - if !found { - return nil, false, nil - } - - config1, config2, err := splitStream(config) - if err != nil { - return nil, true, err - } - if !PluginEnabledFn(name, config1) { - return nil, true, nil - } - - ret, err := f(config2) - return ret, true, err -} - -// splitStream reads the stream bytes and constructs two copies of it. -func splitStream(config io.Reader) (io.Reader, io.Reader, error) { - if config == nil || reflect.ValueOf(config).IsNil() { - return nil, nil, nil - } - - configBytes, err := io.ReadAll(config) - if err != nil { - return nil, nil, err - } - - return bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil -} - -// NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all -// the given plugins. -func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer, decorator Decorator) (Interface, error) { - handlers := []Interface{} - mutationPlugins := []string{} - validationPlugins := []string{} - for _, pluginName := range pluginNames { - pluginConfig, err := configProvider.ConfigFor(pluginName) - if err != nil { - return nil, err - } - - plugin, err := ps.InitPlugin(pluginName, pluginConfig, pluginInitializer) - if err != nil { - return nil, err - } - if plugin != nil { - if decorator != nil { - handlers = append(handlers, decorator.Decorate(plugin, pluginName)) - } else { - handlers = append(handlers, plugin) - } - - if _, ok := plugin.(MutationInterface); ok { - mutationPlugins = append(mutationPlugins, pluginName) - } - if _, ok := plugin.(ValidationInterface); ok { - validationPlugins = append(validationPlugins, pluginName) - } - } - } - if len(mutationPlugins) != 0 { - klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) - } - if len(validationPlugins) != 0 { - klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) - } - return newReinvocationHandler(chainAdmissionHandler(handlers)), nil -} - -// InitPlugin creates an instance of the named interface. -func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { - if name == "" { - klog.Info("No admission plugin specified.") - return nil, nil - } - - plugin, found, err := ps.getPlugin(name, config) - if err != nil { - return nil, fmt.Errorf("couldn't init admission plugin %q: %v", name, err) - } - if !found { - return nil, fmt.Errorf("unknown admission plugin: %s", name) - } - - pluginInitializer.Initialize(plugin) - // ensure that plugins have been properly initialized - if err := ValidateInitialization(plugin); err != nil { - return nil, fmt.Errorf("failed to initialize admission plugin %q: %v", name, err) - } - - return plugin, nil -} - -// ValidateInitialization will call the InitializationValidate function in each plugin if they implement -// the InitializationValidator interface. -func ValidateInitialization(plugin Interface) error { - if validater, ok := plugin.(InitializationValidator); ok { - err := validater.ValidateInitialization() - if err != nil { - return err - } - } - return nil -} - -type PluginInitializers []PluginInitializer - -func (pp PluginInitializers) Initialize(plugin Interface) { - for _, p := range pp { - p.Initialize(plugin) - } -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go deleted file mode 100644 index f93c703a1..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/reinvocation.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import "context" - -// newReinvocationHandler creates a handler that wraps the provided admission chain and reinvokes it -// if needed according to re-invocation policy of the webhooks. -func newReinvocationHandler(admissionChain Interface) Interface { - return &reinvoker{admissionChain} -} - -type reinvoker struct { - admissionChain Interface -} - -// Admit performs an admission control check using the wrapped admission chain, reinvoking the -// admission chain if needed according to the reinvocation policy. Plugins are expected to check -// the admission attributes' reinvocation context against their reinvocation policy to decide if -// they should re-run, and to update the reinvocation context if they perform any mutations. -func (r *reinvoker) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { - if mutator, ok := r.admissionChain.(MutationInterface); ok { - err := mutator.Admit(ctx, a, o) - if err != nil { - return err - } - s := a.GetReinvocationContext() - if s.ShouldReinvoke() { - s.SetIsReinvoke() - // Calling admit a second time will reinvoke all in-tree plugins - // as well as any webhook plugins that need to be reinvoked based on the - // reinvocation policy. - return mutator.Admit(ctx, a, o) - } - } - return nil -} - -// Validate performs an admission control check using the wrapped admission chain, and returns immediately on first error. -func (r *reinvoker) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { - if validator, ok := r.admissionChain.(ValidationInterface); ok { - return validator.Validate(ctx, a, o) - } - return nil -} - -// Handles will return true if any of the admission chain handlers handle the given operation. -func (r *reinvoker) Handles(operation Operation) bool { - return r.admissionChain.Handles(operation) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/admission/util.go b/constraint/vendor/k8s.io/apiserver/pkg/admission/util.go deleted file mode 100644 index 842932f73..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/admission/util.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import "k8s.io/apimachinery/pkg/runtime" - -type RuntimeObjectInterfaces struct { - runtime.ObjectCreater - runtime.ObjectTyper - runtime.ObjectDefaulter - runtime.ObjectConvertor - runtime.EquivalentResourceMapper -} - -func NewObjectInterfacesFromScheme(scheme *runtime.Scheme) ObjectInterfaces { - return &RuntimeObjectInterfaces{scheme, scheme, scheme, scheme, runtime.NewEquivalentResourceRegistry()} -} - -func (r *RuntimeObjectInterfaces) GetObjectCreater() runtime.ObjectCreater { - return r.ObjectCreater -} -func (r *RuntimeObjectInterfaces) GetObjectTyper() runtime.ObjectTyper { - return r.ObjectTyper -} -func (r *RuntimeObjectInterfaces) GetObjectDefaulter() runtime.ObjectDefaulter { - return r.ObjectDefaulter -} -func (r *RuntimeObjectInterfaces) GetObjectConvertor() runtime.ObjectConvertor { - return r.ObjectConvertor -} -func (r *RuntimeObjectInterfaces) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper { - return r.EquivalentResourceMapper -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index a31b87536..a610ebc1a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -165,6 +165,25 @@ type AuthenticationConfiguration struct { metav1.TypeMeta JWT []JWTAuthenticator + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string } // JWTAuthenticator provides the configuration for a single JWT authenticator. @@ -382,6 +401,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go index b71b53c65..46fb841a5 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go @@ -48,3 +48,12 @@ func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { obj.CacheSize = &defaultCacheSize } } + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go index 0de8db497..7b1b51b62 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go @@ -47,6 +47,7 @@ func init() { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, + &AuthorizationConfiguration{}, &EncryptionConfiguration{}, ) // also register into the v1 group as EncryptionConfig (due to a docs bug) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go index e139dceb9..18328c558 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go @@ -48,3 +48,129 @@ type AdmissionPluginConfiguration struct { // +optional Configuration *runtime.Unknown `json:"configuration"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go index c0f218742..63083025a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -67,6 +67,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*apiserver.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*apiserver.EncryptionConfiguration), scope) }); err != nil { @@ -137,6 +157,36 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } return nil } @@ -204,6 +254,50 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfigu return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s) } +func autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in, out, s) +} + func autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error { out.Resources = *(*[]apiserver.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) return nil @@ -361,3 +455,83 @@ func autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(i func Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { return autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) } + +func autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in, out, s) +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go index cbdcaa5a0..6afdbd3a2 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -100,6 +100,59 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { *out = *in @@ -279,3 +332,65 @@ func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go index 82fec0111..4c8189b13 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -29,10 +29,20 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) return nil } +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} + func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { for i := range in.Resources { a := &in.Resources[i] diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index fc75c464a..dee2c115a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -185,6 +185,25 @@ type AuthenticationConfiguration struct { // "": "username" // } JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` } // JWTAuthenticator provides the configuration for a single JWT authenticator. @@ -596,6 +615,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string `json:"expression"` } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 9ee1ef8a4..3a6c66c3a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -57,6 +57,26 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) }); err != nil { @@ -324,6 +344,48 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) } +func autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in, out, s) +} + func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { if in.JWT != nil { in, out := &in.JWT, &out.JWT @@ -336,6 +398,7 @@ func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_Authenticatio } else { out.JWT = nil } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } @@ -356,6 +419,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_Authenticatio } else { out.JWT = nil } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index e618178bf..81b652254 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -78,6 +78,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -89,6 +126,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go index 00a55f7a9..a0e13593b 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -156,6 +156,25 @@ type AuthenticationConfiguration struct { // "": "username" // } JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` } // JWTAuthenticator provides the configuration for a single JWT authenticator. @@ -567,6 +586,13 @@ type WebhookMatchCondition struct { // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, // the contents would be converted to the v1 version before evaluating the CEL expression. // + // - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default' + // - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'. + // - 'user' is the user to test for. e.g. request.user == 'alice' + // - 'groups' is the groups to test for. e.g. ('group1' in request.groups) + // - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator. + // - 'uid' is the information about the requesting user. e.g. request.uid == '1' + // // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Expression string `json:"expression"` } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go index 911a331f2..30ef049d4 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go @@ -37,6 +37,26 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) }); err != nil { @@ -260,6 +280,48 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } +func autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in, out, s) +} + func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { if in.JWT != nil { in, out := &in.JWT, &out.JWT @@ -272,6 +334,7 @@ func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_Authentication } else { out.JWT = nil } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } @@ -292,6 +355,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_Authentication } else { out.JWT = nil } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) return nil } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go index 7da9db927..0d78e51a9 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,43 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -36,6 +73,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index b88c47c67..6439e8220 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -100,6 +100,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { *out = *in @@ -111,6 +148,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } return } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/constraint/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index bda8c6953..032c58691 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/constraint/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -48,11 +48,11 @@ message Event { optional string verb = 5; // Authenticated user information. - optional k8s.io.api.authentication.v1.UserInfo user = 6; + optional .k8s.io.api.authentication.v1.UserInfo user = 6; // Impersonated user information. // +optional - optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; + optional .k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; // Source IPs, from where the request originated and intermediate proxies. // The source IPs are listed from (in order): @@ -79,28 +79,28 @@ message Event { // For successful responses, this will only include the Code and StatusSuccess. // For non-status type error responses, this will be auto-populated with the error Message. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; // API object from the request, in JSON format. The RequestObject is recorded as-is in the request // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or // merging. It is an external versioned object type, and may not be a valid object on its own. // Omitted for non-resource requests. Only logged at Request Level and higher. // +optional - optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; + optional .k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; // API object returned in the response, in JSON. The ResponseObject is recorded after conversion // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged // at Response Level. // +optional - optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; + optional .k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; // Time the request reached the apiserver. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; // Time the request reached current audit stage. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; // Annotations is an unstructured key value map stored with an audit event that may be set by // plugins invoked in the request serving chain, including authentication, authorization and @@ -115,7 +115,7 @@ message Event { // EventList is a list of audit Events. message EventList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Event items = 2; } @@ -187,7 +187,7 @@ message ObjectReference { message Policy { // ObjectMeta is included for interoperability with API infrastructure. // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules specify the audit Level a request should be recorded at. // A request may match multiple rules, in which case the FIRST matching rule is used. @@ -215,7 +215,7 @@ message Policy { // PolicyList is a list of audit Policies. message PolicyList { // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated Policy items = 2; } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/constraint/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go index 3e29d4e71..dd11efbde 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -17,18 +17,12 @@ limitations under the License. package serviceaccount import ( - "context" "fmt" "strings" v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/user" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - - "k8s.io/klog/v2" ) const ( @@ -36,9 +30,6 @@ const ( ServiceAccountUsernameSeparator = ":" ServiceAccountGroupPrefix = "system:serviceaccounts:" AllServiceAccountsGroup = "system:serviceaccounts" - // CredentialIDKey is the key used in a user's "extra" to specify the unique - // identifier for this identity document). - CredentialIDKey = "authentication.kubernetes.io/credential-id" // IssuedCredentialIDAuditAnnotationKey is the annotation key used in the audit event that is persisted to the // '/token' endpoint for service accounts. // This annotation indicates the generated credential identifier for the service account token being issued. @@ -156,7 +147,7 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { if info.Extra == nil { info.Extra = make(map[string][]string) } - info.Extra[CredentialIDKey] = []string{sa.CredentialID} + info.Extra[user.CredentialIDKey] = []string{sa.CredentialID} } if sa.NodeName != "" { if info.Extra == nil { @@ -172,15 +163,6 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { return info } -// CredentialIDForJTI converts a given JTI string into a credential identifier for use in a -// users 'extra' info. -func CredentialIDForJTI(jti string) string { - if len(jti) == 0 { - return "" - } - return "JTI=" + jti -} - // IsServiceAccountToken returns true if the secret is a valid api token for the service account func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { if secret.Type != v1.SecretTypeServiceAccountToken { @@ -200,29 +182,3 @@ func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { return true } - -func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { - sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - return sa, nil - } - if !apierrors.IsNotFound(err) { - return nil, err - } - - // Create the namespace if we can't verify it exists. - // Tolerate errors, since we don't know whether this component has namespace creation permissions. - if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { - if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) - } - } - - // Create the service account - sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - // If we're racing to init and someone else already created it, re-fetch - return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - } - return sa, err -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/constraint/vendor/k8s.io/apiserver/pkg/authentication/user/user.go index 4d6ec0980..1af6f2b27 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/authentication/user/user.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -66,8 +66,8 @@ func (i *DefaultInfo) GetExtra() map[string][]string { return i.Extra } -// well-known user and group names const ( + // well-known user and group names SystemPrivilegedGroup = "system:masters" NodesGroup = "system:nodes" MonitoringGroup = "system:monitoring" @@ -81,4 +81,8 @@ const ( KubeProxy = "system:kube-proxy" KubeControllerManager = "system:kube-controller-manager" KubeScheduler = "system:kube-scheduler" + + // CredentialIDKey is the key used in a user's "extra" to specify the unique + // identifier for this identity document). + CredentialIDKey = "authentication.kubernetes.io/credential-id" ) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index 8261c5b58..2f5f65e22 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -20,6 +20,8 @@ import ( "context" "net/http" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/authentication/user" ) @@ -62,6 +64,16 @@ type Attributes interface { // GetPath returns the path of the request GetPath() string + + // ParseFieldSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the field selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetFieldSelector() (fields.Requirements, error) + + // ParseLabelSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the label selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetLabelSelector() (labels.Requirements, error) } // Authorizer makes an authorization decision based on information gained by making @@ -80,7 +92,7 @@ func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, // RuleResolver provides a mechanism for resolving the list of rules that apply to a given user within a namespace. type RuleResolver interface { // RulesFor get the list of cluster wide rules, the list of rules in the specific namespace, incomplete status and errors. - RulesFor(user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) + RulesFor(ctx context.Context, user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) } // RequestAttributesGetter provides a function that extracts Attributes from an http.Request @@ -100,6 +112,11 @@ type AttributesRecord struct { Name string ResourceRequest bool Path string + + FieldSelectorRequirements fields.Requirements + FieldSelectorParsingErr error + LabelSelectorRequirements labels.Requirements + LabelSelectorParsingErr error } func (a AttributesRecord) GetUser() user.Info { @@ -146,6 +163,14 @@ func (a AttributesRecord) GetPath() string { return a.Path } +func (a AttributesRecord) GetFieldSelector() (fields.Requirements, error) { + return a.FieldSelectorRequirements, a.FieldSelectorParsingErr +} + +func (a AttributesRecord) GetLabelSelector() (labels.Requirements, error) { + return a.LabelSelectorRequirements, a.LabelSelectorParsingErr +} + type Decision int const ( diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go new file mode 100644 index 000000000..685a585c7 --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go @@ -0,0 +1,127 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// TypeResolver resolves a type by a given name. +type TypeResolver interface { + // Resolve resolves the type by its name. + // This function returns false if the name does not refer to a known object type. + Resolve(name string) (ResolvedType, bool) +} + +// ResolvedType refers an object type that can be looked up for its fields. +type ResolvedType interface { + ref.Type + + Type() *types.Type + + // Field finds the field by the field name, or false if the field is not known. + // This function directly return a FieldType that is known to CEL to be more customizable. + Field(name string) (*types.FieldType, bool) + + // FieldNames returns the field names associated with the type, if the type + // is found. + FieldNames() ([]string, bool) + + // Val creates an instance for the ResolvedType, given its fields and their values. + Val(fields map[string]ref.Val) ref.Val +} + +// ResolverTypeProvider delegates type resolution first to the TypeResolver and then +// to the underlying types.Provider for types not resolved by the TypeResolver. +type ResolverTypeProvider struct { + typeResolver TypeResolver + underlyingTypeProvider types.Provider +} + +var _ types.Provider = (*ResolverTypeProvider)(nil) + +// FindStructType returns the Type give a qualified type name, by looking it up with +// the DynamicTypeResolver and translating it to CEL Type. +// If the type is not known to the DynamicTypeResolver, the lookup falls back to the underlying +// ResolverTypeProvider instead. +func (p *ResolverTypeProvider) FindStructType(structType string) (*types.Type, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return types.NewTypeTypeWithParam(t.Type()), true + } + return p.underlyingTypeProvider.FindStructType(structType) +} + +// FindStructFieldNames returns the field names associated with the type, if the type +// is found. +func (p *ResolverTypeProvider) FindStructFieldNames(structType string) ([]string, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.FieldNames() + } + return p.underlyingTypeProvider.FindStructFieldNames(structType) +} + +// FindStructFieldType returns the field type for a checked type value. +// Returns false if the field could not be found. +func (p *ResolverTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.Field(fieldName) + } + return p.underlyingTypeProvider.FindStructFieldType(structType, fieldName) +} + +// NewValue creates a new type value from a qualified name and map of fields. +func (p *ResolverTypeProvider) NewValue(structType string, fields map[string]ref.Val) ref.Val { + t, ok := p.typeResolver.Resolve(structType) + if ok { + return t.Val(fields) + } + return p.underlyingTypeProvider.NewValue(structType, fields) +} + +func (p *ResolverTypeProvider) EnumValue(enumName string) ref.Val { + return p.underlyingTypeProvider.EnumValue(enumName) +} + +func (p *ResolverTypeProvider) FindIdent(identName string) (ref.Val, bool) { + return p.underlyingTypeProvider.FindIdent(identName) +} + +// ResolverEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver, +// and also returns the CEL ResolverEnvOption to apply it to the env. +func ResolverEnvOption(resolver TypeResolver) cel.EnvOption { + _, envOpt := NewResolverTypeProviderAndEnvOption(resolver) + return envOpt +} + +// NewResolverTypeProviderAndEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver, +// and also returns the CEL ResolverEnvOption to apply it to the env. +func NewResolverTypeProviderAndEnvOption(resolver TypeResolver) (*ResolverTypeProvider, cel.EnvOption) { + tp := &ResolverTypeProvider{typeResolver: resolver} + var envOption cel.EnvOption = func(e *cel.Env) (*cel.Env, error) { + // wrap the existing type provider (acquired from the env) + // and set new type provider for the env. + tp.underlyingTypeProvider = e.CELTypeProvider() + typeProviderOption := cel.CustomTypeProvider(tp) + return typeProviderOption(e) + } + return tp, envOption +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 2cea83c2e..b210377ae 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -20,6 +20,7 @@ import ( "fmt" "strconv" "sync" + "sync/atomic" "github.com/google/cel-go/cel" "github.com/google/cel-go/checker" @@ -30,20 +31,30 @@ import ( "k8s.io/apimachinery/pkg/util/version" celconfig "k8s.io/apiserver/pkg/apis/cel" "k8s.io/apiserver/pkg/cel/library" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" + utilversion "k8s.io/component-base/version" ) // DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet // that guarantees compatibility with CEL features/libraries/parameters understood by -// an n-1 version +// the api server min compatibility version // -// This default will be set to no more than n-1 the current Kubernetes major.minor version. +// This default will be set to no more than the current Kubernetes major.minor version. // -// Note that a default version number less than n-1 indicates a wider range of version -// compatibility than strictly required for rollback. A wide range of compatibility is -// desirable because it means that CEL expressions are portable across a wider range -// of Kubernetes versions. +// Note that a default version number less than n-1 the current Kubernetes major.minor version +// indicates a wider range of version compatibility than strictly required for rollback. +// A wide range of compatibility is desirable because it means that CEL expressions are portable +// across a wider range of Kubernetes versions. +// A default version number equal to the current Kubernetes major.minor version +// indicates fast forward CEL features that can be used when rollback is no longer needed. func DefaultCompatibilityVersion() *version.Version { - return version.MajorMinor(1, 29) + effectiveVer := featuregate.DefaultComponentGlobalsRegistry.EffectiveVersionFor(featuregate.DefaultKubeComponent) + if effectiveVer == nil { + effectiveVer = utilversion.DefaultKubeEffectiveVersion() + } + return effectiveVer.MinCompatibilityVersion() } var baseOpts = append(baseOptsWithoutStrictCost, StrictCostOpt) @@ -61,9 +72,9 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true), - library.URLs(), - library.Regex(), - library.Lists(), + UnversionedLib(library.URLs), + UnversionedLib(library.Regex), + UnversionedLib(library.Lists), // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. @@ -81,7 +92,7 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ { IntroducedVersion: version.MajorMinor(1, 27), EnvOptions: []cel.EnvOption{ - library.Authz(), + UnversionedLib(library.Authz), }, }, { @@ -89,7 +100,7 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ EnvOptions: []cel.EnvOption{ cel.CrossTypeNumericComparisons(true), cel.OptionalTypes(), - library.Quantity(), + UnversionedLib(library.Quantity), }, }, // add the new validator in 1.29 @@ -128,12 +139,58 @@ var baseOptsWithoutStrictCost = []VersionedOptions{ { IntroducedVersion: version.MajorMinor(1, 30), EnvOptions: []cel.EnvOption{ - library.IP(), - library.CIDR(), + UnversionedLib(library.IP), + UnversionedLib(library.CIDR), + }, + }, + // Format Library + { + IntroducedVersion: version.MajorMinor(1, 31), + EnvOptions: []cel.EnvOption{ + UnversionedLib(library.Format), + }, + }, + // Authz selectors + { + IntroducedVersion: version.MajorMinor(1, 31), + FeatureEnabled: func() bool { + enabled := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) + authzSelectorsLibraryInit.Do(func() { + // Record the first time feature enablement was checked for this library. + // This is checked from integration tests to ensure no cached cel envs + // are constructed before feature enablement is effectively set. + authzSelectorsLibraryEnabled.Store(enabled) + // Uncomment to debug where the first initialization is coming from if needed. + // debug.PrintStack() + }) + return enabled + }, + EnvOptions: []cel.EnvOption{ + UnversionedLib(library.AuthzSelectors), + }, + }, + // Two variable comprehensions + { + IntroducedVersion: version.MajorMinor(1, 32), + EnvOptions: []cel.EnvOption{ + UnversionedLib(ext.TwoVarComprehensions), }, }, } +var ( + authzSelectorsLibraryInit sync.Once + authzSelectorsLibraryEnabled atomic.Value +) + +// AuthzSelectorsLibraryEnabled returns whether the AuthzSelectors library was enabled when it was constructed. +// If it has not been contructed yet, this returns `false, false`. +// This is solely for the benefit of the integration tests making sure feature gates get correctly parsed before AuthzSelector ever has to check for enablement. +func AuthzSelectorsLibraryEnabled() (enabled, constructed bool) { + enabled, constructed = authzSelectorsLibraryEnabled.Load().(bool) + return +} + var StrictCostOpt = VersionedOptions{ // This is to configure the cost calculation for extended libraries IntroducedVersion: version.MajorMinor(1, 0), @@ -142,6 +199,19 @@ var StrictCostOpt = VersionedOptions{ }, } +// cacheBaseEnvs controls whether calls to MustBaseEnvSet are cached. +// Defaults to true, may be disabled by calling DisableBaseEnvSetCachingForTests. +var cacheBaseEnvs = true + +// DisableBaseEnvSetCachingForTests clears and disables base env caching. +// This is only intended for unit tests exercising MustBaseEnvSet directly with different enablement options. +// It does not clear other initialization paths that may cache results of calling MustBaseEnvSet. +func DisableBaseEnvSetCachingForTests() { + cacheBaseEnvs = false + baseEnvs.Clear() + baseEnvsWithOption.Clear() +} + // MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics // if the version is nil, or does not have major and minor components. // @@ -167,7 +237,9 @@ func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet { } entry, _, _ = baseEnvsSingleflight.Do(key, func() (interface{}, error) { entry := mustNewEnvSet(ver, baseOpts) - baseEnvs.Store(key, entry) + if cacheBaseEnvs { + baseEnvs.Store(key, entry) + } return entry, nil }) } else { @@ -176,7 +248,9 @@ func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet { } entry, _, _ = baseEnvsWithOptionSingleflight.Do(key, func() (interface{}, error) { entry := mustNewEnvSet(ver, baseOptsWithoutStrictCost) - baseEnvsWithOption.Store(key, entry) + if cacheBaseEnvs { + baseEnvsWithOption.Store(key, entry) + } return entry, nil }) } @@ -190,3 +264,20 @@ var ( baseEnvsSingleflight = &singleflight.Group{} baseEnvsWithOptionSingleflight = &singleflight.Group{} ) + +// UnversionedLib wraps library initialization calls like ext.Sets() or library.IP() +// to force compilation errors if the call evolves to include a varadic variable option. +// +// This provides automatic detection of a problem that is hard to catch in review-- +// If a CEL library used in Kubernetes is unversioned and then become versioned, and we +// fail to set a desired version, the libraries defaults to the latest version, changing +// CEL environment without controlled rollout, bypassing the entire purpose of the base +// environment. +// +// If usages of this function fail to compile: add version=1 argument to all call sites +// that fail compilation while removing the UnversionedLib wrapper. Next, review +// the changes in the library present in higher versions and, if needed, use VersionedOptions to +// the base environment to roll out to a newer version safely. +func UnversionedLib(initializer func() cel.EnvOption) cel.EnvOption { + return initializer() +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go index b47bc8e98..07b9e8f54 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go @@ -175,7 +175,15 @@ type VersionedOptions struct { // // Optional. RemovedVersion *version.Version - + // FeatureEnabled returns true if these options are enabled by feature gates, + // and returns false if these options are not enabled due to feature gates. + // + // This takes priority over IntroducedVersion / RemovedVersion for the NewExpressions environment. + // + // The StoredExpressions environment ignores this function. + // + // Optional. + FeatureEnabled func() bool // EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a // cel.Library, or to enable other CEL EnvOptions such as language settings. // @@ -210,7 +218,7 @@ type VersionedOptions struct { // making multiple calls to Extend. func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { if len(options) > 0 { - newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, options) + newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, true, options) if err != nil { return nil, err } @@ -218,7 +226,7 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { if err != nil { return nil, err } - storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), options) + storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), false, options) if err != nil { return nil, err } @@ -231,13 +239,26 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { return e, nil } -func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, opts []VersionedOptions) (cel.EnvOption, error) { +func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, honorFeatureGateEnablement bool, opts []VersionedOptions) (cel.EnvOption, error) { var envOpts []cel.EnvOption var progOpts []cel.ProgramOption var declTypes []*apiservercel.DeclType for _, opt := range opts { + var allowedByFeatureGate, allowedByVersion bool + if opt.FeatureEnabled != nil && honorFeatureGateEnablement { + // Feature-gate-enabled libraries must follow compatible default feature enablement. + // Enabling alpha features in their first release enables libraries the previous API server is unaware of. + allowedByFeatureGate = opt.FeatureEnabled() + if !allowedByFeatureGate { + continue + } + } if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) { + allowedByVersion = true + } + + if allowedByFeatureGate || allowedByVersion { envOpts = append(envOpts, opt.EnvOptions...) progOpts = append(progOpts, opt.ProgramOptions...) declTypes = append(declTypes, opt.DeclTypes...) @@ -246,7 +267,10 @@ func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, o if len(declTypes) > 0 { provider := apiservercel.NewDeclTypeProvider(declTypes...) - providerOpts, err := provider.EnvOptions(base.TypeProvider()) + if compatVer.AtLeast(version.MajorMinor(1, 31)) { + provider.SetRecognizeKeywordAsFieldName(true) + } + providerOpts, err := provider.EnvOptions(base.CELTypeProvider()) if err != nil { return nil, err } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/errors.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/errors.go index 907ca6ec8..d7b052fc9 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/errors.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/errors.go @@ -16,11 +16,46 @@ limitations under the License. package cel +import ( + "fmt" + + "github.com/google/cel-go/cel" +) + +// ErrInternal the basic error that occurs when the expression fails to evaluate +// due to internal reasons. Any Error that has the Type of +// ErrorInternal is considered equal to ErrInternal +var ErrInternal = fmt.Errorf("internal") + +// ErrInvalid is the basic error that occurs when the expression fails to +// evaluate but not due to internal reasons. Any Error that has the Type of +// ErrorInvalid is considered equal to ErrInvalid. +var ErrInvalid = fmt.Errorf("invalid") + +// ErrRequired is the basic error that occurs when the expression is required +// but absent. +// Any Error that has the Type of ErrorRequired is considered equal +// to ErrRequired. +var ErrRequired = fmt.Errorf("required") + +// ErrCompilation is the basic error that occurs when the expression fails to +// compile. Any CompilationError wraps ErrCompilation. +// ErrCompilation wraps ErrInvalid +var ErrCompilation = fmt.Errorf("%w: compilation error", ErrInvalid) + +// ErrOutOfBudget is the basic error that occurs when the expression fails due to +// exceeding budget. +var ErrOutOfBudget = fmt.Errorf("out of budget") + // Error is an implementation of the 'error' interface, which represents a // XValidation error. type Error struct { Type ErrorType Detail string + + // Cause is an optional wrapped errors that can be useful to + // programmatically retrieve detailed errors. + Cause error } var _ error = &Error{} @@ -30,7 +65,24 @@ func (v *Error) Error() string { return v.Detail } -// ErrorType is a machine readable value providing more detail about why +func (v *Error) Is(err error) bool { + switch v.Type { + case ErrorTypeRequired: + return err == ErrRequired + case ErrorTypeInvalid: + return err == ErrInvalid + case ErrorTypeInternal: + return err == ErrInternal + } + return false +} + +// Unwrap returns the wrapped Cause. +func (v *Error) Unwrap() error { + return v.Cause +} + +// ErrorType is a machine-readable value providing more detail about why // a XValidation is invalid. type ErrorType string @@ -45,3 +97,28 @@ const ( // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" ) + +// CompilationError indicates an error during expression compilation. +// It wraps ErrCompilation. +type CompilationError struct { + err *Error + Issues *cel.Issues +} + +// NewCompilationError wraps a cel.Issues to indicate a compilation failure. +func NewCompilationError(issues *cel.Issues) *CompilationError { + return &CompilationError{ + Issues: issues, + err: &Error{ + Type: ErrorTypeInvalid, + Detail: fmt.Sprintf("compilation error: %s", issues), + }} +} + +func (e *CompilationError) Error() string { + return e.err.Error() +} + +func (e *CompilationError) Unwrap() []error { + return []error{e.err, ErrCompilation} +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/format.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/format.go new file mode 100644 index 000000000..31216806f --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/format.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +var ( + FormatObject = decls.NewObjectType("kubernetes.NamedFormat") + FormatType = cel.ObjectType("kubernetes.NamedFormat") +) + +// Format provdes a CEL representation of kubernetes format +type Format struct { + Name string + ValidateFunc func(string) []string + + // Size of the regex string or estimated equivalent regex string used + // for cost estimation + MaxRegexSize int +} + +func (d Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc) +} + +func (d Format) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case FormatType: + return d + case types.TypeType: + return FormatType + default: + return types.NewErr("type conversion error from '%s' to '%s'", FormatType, typeVal) + } +} + +func (d Format) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Format) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(d.Name == otherDur.Name) +} + +func (d Format) Type() ref.Type { + return FormatType +} + +func (d Format) Value() interface{} { + return d +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go deleted file mode 100644 index 16183050d..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package lazy - -import ( - "fmt" - "reflect" - - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - "k8s.io/apiserver/pkg/cel" -) - -type GetFieldFunc func(*MapValue) ref.Val - -var _ ref.Val = (*MapValue)(nil) -var _ traits.Mapper = (*MapValue)(nil) - -// MapValue is a map that lazily evaluate its value when a field is first accessed. -// The map value is not designed to be thread-safe. -type MapValue struct { - typeValue *types.Type - - // values are previously evaluated values obtained from callbacks - values map[string]ref.Val - // callbacks are a map of field name to the function that returns the field Val - callbacks map[string]GetFieldFunc - // knownValues are registered names, used for iteration - knownValues []string -} - -func NewMapValue(objectType ref.Type) *MapValue { - return &MapValue{ - typeValue: types.NewTypeValue(objectType.TypeName(), traits.IndexerType|traits.FieldTesterType|traits.IterableType), - values: map[string]ref.Val{}, - callbacks: map[string]GetFieldFunc{}, - } -} - -// Append adds the given field with its name and callback. -func (m *MapValue) Append(name string, callback GetFieldFunc) { - m.knownValues = append(m.knownValues, name) - m.callbacks[name] = callback -} - -// Contains checks if the key is known to the map -func (m *MapValue) Contains(key ref.Val) ref.Val { - v, found := m.Find(key) - if v != nil && types.IsUnknownOrError(v) { - return v - } - return types.Bool(found) -} - -// Iterator returns an iterator to traverse the map. -func (m *MapValue) Iterator() traits.Iterator { - return &iterator{parent: m, index: 0} -} - -// Size returns the number of currently known fields -func (m *MapValue) Size() ref.Val { - return types.Int(len(m.callbacks)) -} - -// ConvertToNative returns an error because it is disallowed -func (m *MapValue) ConvertToNative(typeDesc reflect.Type) (any, error) { - return nil, fmt.Errorf("disallowed conversion from %q to %q", m.typeValue.TypeName(), typeDesc.Name()) -} - -// ConvertToType converts the map to the given type. -// Only its own type and "Type" type are allowed. -func (m *MapValue) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case m.typeValue: - return m - case types.TypeType: - return m.typeValue - } - return types.NewErr("disallowed conversion from %q to %q", m.typeValue.TypeName(), typeVal.TypeName()) -} - -// Equal returns true if the other object is the same pointer-wise. -func (m *MapValue) Equal(other ref.Val) ref.Val { - otherMap, ok := other.(*MapValue) - if !ok { - return types.MaybeNoSuchOverloadErr(other) - } - return types.Bool(m == otherMap) -} - -// Type returns its registered type. -func (m *MapValue) Type() ref.Type { - return m.typeValue -} - -// Value is not allowed. -func (m *MapValue) Value() any { - return types.NoSuchOverloadErr() -} - -// resolveField resolves the field. Calls the callback if the value is not yet stored. -func (m *MapValue) resolveField(name string) ref.Val { - v, seen := m.values[name] - if seen { - return v - } - f := m.callbacks[name] - v = f(m) - m.values[name] = v - return v -} - -func (m *MapValue) Find(key ref.Val) (ref.Val, bool) { - n, ok := key.(types.String) - if !ok { - return types.MaybeNoSuchOverloadErr(n), true - } - name, ok := cel.Unescape(n.Value().(string)) - if !ok { - return nil, false - } - if _, exists := m.callbacks[name]; !exists { - return nil, false - } - return m.resolveField(name), true -} - -func (m *MapValue) Get(key ref.Val) ref.Val { - v, found := m.Find(key) - if found { - return v - } - return types.ValOrErr(key, "no such key: %v", key) -} - -type iterator struct { - parent *MapValue - index int -} - -func (i *iterator) ConvertToNative(typeDesc reflect.Type) (any, error) { - return nil, fmt.Errorf("disallowed conversion to %q", typeDesc.Name()) -} - -func (i *iterator) ConvertToType(typeValue ref.Type) ref.Val { - return types.NewErr("disallowed conversion o %q", typeValue.TypeName()) -} - -func (i *iterator) Equal(other ref.Val) ref.Val { - otherIterator, ok := other.(*iterator) - if !ok { - return types.MaybeNoSuchOverloadErr(other) - } - return types.Bool(otherIterator == i) -} - -func (i *iterator) Type() ref.Type { - return types.IteratorType -} - -func (i *iterator) Value() any { - return nil -} - -func (i *iterator) HasNext() ref.Val { - return types.Bool(i.index < len(i.parent.knownValues)) -} - -func (i *iterator) Next() ref.Val { - ret := i.parent.Get(types.String(i.parent.knownValues[i.index])) - i.index++ - return ret -} - -var _ traits.Iterator = (*iterator)(nil) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/authz.go index df4bf0807..77332cff8 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/authz.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -22,6 +22,11 @@ import ( "reflect" "strings" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -194,6 +199,30 @@ import ( // Examples: // // authorizer.group('').resource('pods').namespace('default').check('create').error() +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector (added in v1, Kubernetes 1.31+) +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() func Authz() cel.EnvOption { return cel.Lib(authzLib) } @@ -203,7 +232,20 @@ var authzLib = &authz{} type authz struct{} func (*authz) LibraryName() string { - return "k8s.authz" + return "kubernetes.authz" +} + +func (*authz) Types() []*cel.Type { + return []*cel.Type{ + AuthorizerType, + PathCheckType, + GroupCheckType, + ResourceCheckType, + DecisionType} +} + +func (*authz) declarations() map[string][]cel.FunctionOpt { + return authzLibraryDecls } var authzLibraryDecls = map[string][]cel.FunctionOpt{ @@ -259,6 +301,74 @@ func (*authz) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } +// AuthzSelectors provides a CEL function library extension for adding fieldSelector and +// labelSelector filters to authorization checks. This requires the Authz library. +// See documentation of the Authz library for use and availability of the authorizer variable. +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() +func AuthzSelectors() cel.EnvOption { + return cel.Lib(authzSelectorsLib) +} + +var authzSelectorsLib = &authzSelectors{} + +type authzSelectors struct{} + +func (*authzSelectors) LibraryName() string { + return "kubernetes.authzSelectors" +} + +func (*authzSelectors) Types() []*cel.Type { + return []*cel.Type{ResourceCheckType} +} + +func (*authzSelectors) declarations() map[string][]cel.FunctionOpt { + return authzSelectorsLibraryDecls +} + +var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{ + "fieldSelector": { + cel.MemberOverload("authorizer_fieldselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckFieldSelector))}, + "labelSelector": { + cel.MemberOverload("authorizer_labelselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckLabelSelector))}, +} + +func (*authzSelectors) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(authzSelectorsLibraryDecls)) + for name, overloads := range authzSelectorsLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*authzSelectors) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + func authorizerPath(arg1, arg2 ref.Val) ref.Val { authz, ok := arg1.(authorizerVal) if !ok { @@ -354,6 +464,38 @@ func resourceCheckSubresource(arg1, arg2 ref.Val) ref.Val { return result } +func resourceCheckFieldSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + fieldSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.fieldSelector = fieldSelector + return result +} + +func resourceCheckLabelSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + labelSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.labelSelector = labelSelector + return result +} + func resourceCheckNamespace(arg1, arg2 ref.Val) ref.Val { resourceCheck, ok := arg1.(resourceCheckVal) if !ok { @@ -544,11 +686,13 @@ func (g groupCheckVal) resourceCheck(resource string) resourceCheckVal { type resourceCheckVal struct { receiverOnlyObjectVal - groupCheck groupCheckVal - resource string - subresource string - namespace string - name string + groupCheck groupCheckVal + resource string + subresource string + namespace string + name string + fieldSelector string + labelSelector string } func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val { @@ -563,6 +707,26 @@ func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val { Verb: verb, User: a.groupCheck.authorizer.userInfo, } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + if len(a.fieldSelector) > 0 { + selector, err := fields.ParseSelector(a.fieldSelector) + if err != nil { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = nil, err + } else { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = selector.Requirements(), nil + } + } + if len(a.labelSelector) > 0 { + requirements, err := labels.ParseToRequirements(a.labelSelector) + if err != nil { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = nil, err + } else { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = requirements, nil + } + } + } + decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr) return newDecision(decision, err, reason) } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go index c4259daed..2992e99e6 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go @@ -109,7 +109,15 @@ var cidrsLib = &cidrs{} type cidrs struct{} func (*cidrs) LibraryName() string { - return "net.cidr" + return "kubernetes.net.cidr" +} + +func (*cidrs) declarations() map[string][]cel.FunctionOpt { + return cidrLibraryDecls +} + +func (*cidrs) Types() []*cel.Type { + return []*cel.Type{apiservercel.CIDRType, apiservercel.IPType} } var cidrLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cost.go index e3bde017b..a9e5db811 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cost.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -17,16 +17,45 @@ limitations under the License. package library import ( - "math" - + "fmt" "github.com/google/cel-go/checker" "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "math" + + "k8s.io/apiserver/pkg/cel" ) +// panicOnUnknown makes cost estimate functions panic on unrecognized functions. +// This is only set to true for unit tests. +var panicOnUnknown = false + +// builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator. +var knownUnhandledFunctions = map[string]bool{ + "@not_strictly_false": true, + "uint": true, + "duration": true, + "bytes": true, + "cel.@mapInsert": true, + "timestamp": true, + "strings.quote": true, + "value": true, + "_==_": true, + "_&&_": true, + "_||_": true, + "_>_": true, + "_>=_": true, + "_<_": true, + "_<=_": true, + "!_": true, + "_?_:_": true, + "_+_": true, + "_-_": true, +} + // CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator. type CostEstimator struct { // SizeEstimator provides a CostEstimator.EstimateSize that this CostEstimator will delegate size estimation @@ -34,6 +63,25 @@ type CostEstimator struct { SizeEstimator checker.CostEstimator } +const ( + // shortest repeatable selector requirement that allocates a values slice is 2 characters: k, + selectorLengthToRequirementCount = float64(.5) + // the expensive parts to represent each requirement are a struct and a values slice + costPerRequirement = float64(common.ListCreateBaseCost + common.StructCreateBaseCost) +) + +// a selector consists of a list of requirements held in a slice +var baseSelectorCost = checker.CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + +func selectorCostEstimate(selectorLength checker.SizeEstimate) checker.CostEstimate { + parseCost := selectorLength.MultiplyByCostFactor(common.StringTraversalCostFactor) + + requirementCount := selectorLength.MultiplyByCostFactor(selectorLengthToRequirementCount) + requirementCost := requirementCount.MultiplyByCostFactor(costPerRequirement) + + return baseSelectorCost.Add(parseCost).Add(requirementCost) +} + func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, result ref.Val) *uint64 { switch function { case "check": @@ -45,13 +93,20 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re // All authorization builder and accessor functions have a nominal cost cost := uint64(1) return &cost + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) >= 2 { + selectorLength := actualSize(args[1]) + cost := selectorCostEstimate(checker.SizeEstimate{Min: selectorLength, Max: selectorLength}) + return &cost.Max + } case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": var cost uint64 if len(args) > 0 { cost += traversalCost(args[0]) // these O(n) operations all cost roughly the cost of a single traversal } return &cost - case "url", "lowerAscii", "upperAscii", "substring", "trim": + case "url", "lowerAscii", "upperAscii", "substring", "trim", "jsonpatch.escapeKey": if len(args) >= 1 { cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) return &cost @@ -105,7 +160,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor)) return &cost } - case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast": + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": // IP and CIDR accessors are nominal cost. cost := uint64(1) return &cost @@ -152,9 +207,65 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) return &cost } + case "validate": + if len(args) >= 2 { + format, isFormat := args[0].Value().(cel.Format) + if isFormat { + strSize := actualSize(args[1]) + + // Dont have access to underlying regex, estimate a long regexp + regexSize := format.MaxRegexSize + + // Copied from CEL implementation for regex cost + // + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := uint64(math.Ceil((1.0 + float64(strSize)) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(regexSize) * common.RegexStringLengthCostFactor)) + cost := strCost * regexCost + return &cost + } + } + case "format.named": + // Simply dictionary lookup + cost := uint64(1) + return &cost case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": cost := uint64(1) return &cost + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + cost := uint64(1) + return &cost + case "_==_": + if len(args) == 2 { + unitCost := uint64(1) + lhs := args[0] + switch lhs.(type) { + case *cel.Quantity, cel.Quantity, + *cel.IP, cel.IP, + *cel.CIDR, cel.CIDR, + *cel.Format, cel.Format, // Formats have a small max size. Format takes pointer receiver. + *cel.URL, cel.URL, // TODO: Computing the actual cost is expensive, and changing this would be a breaking change + *cel.Semver, cel.Semver, + *authorizerVal, authorizerVal, *pathCheckVal, pathCheckVal, *groupCheckVal, groupCheckVal, + *resourceCheckVal, resourceCheckVal, *decisionVal, decisionVal: + return &unitCost + default: + if panicOnUnknown && lhs.Type() != nil && isRegisteredType(lhs.Type().TypeName()) { + panic(fmt.Errorf("CallCost: unhandled equality for Kubernetes type %T", lhs)) + } + } + } + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args)) } return nil } @@ -170,6 +281,11 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored": // All authorization builder and accessor functions have a nominal cost return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) == 1 { + return &checker.CallEstimate{CostEstimate: selectorCostEstimate(l.sizeEstimate(args[0]))} + } case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": if target != nil { // Charge 1 cost for comparing each element in the list @@ -187,10 +303,10 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)} } } - case "url": + case "url", "jsonpatch.escapeKey": if len(args) == 1 { sz := l.sizeEstimate(args[0]) - return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz} } case "lowerAscii", "upperAscii", "substring", "trim": if target != nil { @@ -255,8 +371,10 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // Worst case size is where is that a separator of "" is used, and each char is returned as a list element. max := sz.Max if len(args) > 1 { - if c := args[1].Expr().GetConstExpr(); c != nil { - max = uint64(c.GetInt64Value()) + if v := args[1].Expr().AsLiteral(); v != nil { + if i, ok := v.Value().(int64); ok { + max = uint64(i) + } } } // Cost is the traversal plus the construction of the result. @@ -327,7 +445,7 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // So we double the cost of parsing the string. return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor)} } - case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast": + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": // IP and CIDR accessors are nominal cost. return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} case "containsIP": @@ -373,8 +491,55 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch sz := l.sizeEstimate(args[0]) return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} } + case "validate": + if target != nil { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).MultiplyByCostFactor(cel.MaxNameFormatRegexSize * common.RegexStringLengthCostFactor)} + } + case "format.named": + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "_==_": + if len(args) == 2 { + lhs := args[0] + rhs := args[1] + if lhs.Type().Equal(rhs.Type()) == types.True { + t := lhs.Type() + if t.Kind() == types.OpaqueKind { + switch t.TypeName() { + case cel.IPType.TypeName(), cel.CIDRType.TypeName(): + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + } + if t.Kind() == types.StructKind { + switch t { + case cel.QuantityType, AuthorizerType, PathCheckType, // O(1) cost equality checks + GroupCheckType, ResourceCheckType, DecisionType, cel.SemverType: + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case cel.FormatType: + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: cel.MaxFormatSize}.MultiplyByCostFactor(common.StringTraversalCostFactor)} + case cel.URLType: + size := checker.SizeEstimate{Min: 1, Max: 1} + rhSize := rhs.ComputedSize() + lhSize := rhs.ComputedSize() + if rhSize != nil && lhSize != nil { + size = rhSize.Union(*lhSize) + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: size.Max}.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + } + if panicOnUnknown && isRegisteredType(t.TypeName()) { + panic(fmt.Errorf("EstimateCallCost: unhandled equality for Kubernetes type %v", t)) + } + } + } + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args)) } return nil } @@ -383,6 +548,10 @@ func actualSize(value ref.Val) uint64 { if sz, ok := value.(traits.Sizer); ok { return uint64(sz.Size().(types.Int)) } + if panicOnUnknown { + // debug.PrintStack() + panic(fmt.Errorf("actualSize: non-sizer type %T", value)) + } return 1 } @@ -425,7 +594,7 @@ func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstim type itemsNode struct { path []string t *types.Type - expr *exprpb.Expr + expr ast.Expr } func (i *itemsNode) Path() []string { @@ -436,7 +605,7 @@ func (i *itemsNode) Type() *types.Type { return i.t } -func (i *itemsNode) Expr() *exprpb.Expr { +func (i *itemsNode) Expr() ast.Expr { return i.expr } @@ -444,6 +613,8 @@ func (i *itemsNode) ComputedSize() *checker.SizeEstimate { return nil } +var _ checker.AstNode = (*itemsNode)(nil) + // traversalCost computes the cost of traversing a ref.Val as a data tree. func traversalCost(v ref.Val) uint64 { // TODO: This could potentially be optimized by sampling maps and lists instead of traversing. diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/format.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/format.go new file mode 100644 index 000000000..82ecffb41 --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/format.go @@ -0,0 +1,279 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "net/url" + + "github.com/asaskevich/govalidator" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/kube-openapi/pkg/validation/strfmt" +) + +// Format provides a CEL library exposing common named Kubernetes string +// validations. Can be used in CRD ValidationRules messageExpression. +// +// Example: +// +// rule: format.dns1123label.validate(object.metadata.name).hasValue() +// messageExpression: format.dns1123label.validate(object.metadata.name).value().join("\n") +// +// format.named(name: string) -> ?Format +// +// Returns the Format with the given name, if it exists. Otherwise, optional.none +// Allowed names are: +// - `dns1123Label` +// - `dns1123Subdomain` +// - `dns1035Label` +// - `qualifiedName` +// - `dns1123LabelPrefix` +// - `dns1123SubdomainPrefix` +// - `dns1035LabelPrefix` +// - `labelValue` +// - `uri` +// - `uuid` +// - `byte` +// - `date` +// - `datetime` +// +// format.() -> Format +// +// Convenience functions for all the named formats are also available +// +// Examples: +// format.dns1123Label().validate("my-label-name") +// format.dns1123Subdomain().validate("apiextensions.k8s.io") +// format.dns1035Label().validate("my-label-name") +// format.qualifiedName().validate("apiextensions.k8s.io/v1beta1") +// format.dns1123LabelPrefix().validate("my-label-prefix-") +// format.dns1123SubdomainPrefix().validate("mysubdomain.prefix.-") +// format.dns1035LabelPrefix().validate("my-label-prefix-") +// format.uri().validate("http://example.com") +// Uses same pattern as isURL, but returns an error +// format.uuid().validate("123e4567-e89b-12d3-a456-426614174000") +// format.byte().validate("aGVsbG8=") +// format.date().validate("2021-01-01") +// format.datetime().validate("2021-01-01T00:00:00Z") +// + +// .validate(str: string) -> ?list +// +// Validates the given string against the given format. Returns optional.none +// if the string is valid, otherwise a list of validation error strings. +func Format() cel.EnvOption { + return cel.Lib(formatLib) +} + +var formatLib = &format{} + +type format struct{} + +func (*format) LibraryName() string { + return "kubernetes.format" +} + +func (*format) Types() []*cel.Type { + return []*cel.Type{apiservercel.FormatType} +} + +func (*format) declarations() map[string][]cel.FunctionOpt { + return formatLibraryDecls +} + +func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt { + return func(o *decls.OverloadDecl) (*decls.OverloadDecl, error) { + wrapped, err := decls.FunctionBinding(func(values ...ref.Val) ref.Val { return binding() })(o) + if err != nil { + return nil, err + } + if len(wrapped.ArgTypes()) != 0 { + return nil, fmt.Errorf("function binding must have 0 arguments") + } + return o, nil + } +} + +func (*format) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(formatLibraryDecls)) + for name, overloads := range formatLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + for name, constantValue := range ConstantFormats { + prefixedName := "format." + name + options = append(options, cel.Function(prefixedName, cel.Overload(prefixedName, []*cel.Type{}, apiservercel.FormatType, ZeroArgumentFunctionBinding(func() ref.Val { + return constantValue + })))) + } + return options +} + +func (*format) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +var ConstantFormats = map[string]apiservercel.Format{ + "dns1123Label": { + Name: "DNS1123Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) }, + MaxRegexSize: 30, + }, + "dns1123Subdomain": { + Name: "DNS1123Subdomain", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, false) }, + MaxRegexSize: 60, + }, + "dns1035Label": { + Name: "DNS1035Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, false) }, + MaxRegexSize: 30, + }, + "qualifiedName": { + Name: "QualifiedName", + ValidateFunc: validation.IsQualifiedName, + MaxRegexSize: 60, // uses subdomain regex + }, + + "dns1123LabelPrefix": { + Name: "DNS1123LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, true) }, + MaxRegexSize: 30, + }, + "dns1123SubdomainPrefix": { + Name: "DNS1123SubdomainPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, true) }, + MaxRegexSize: 60, + }, + "dns1035LabelPrefix": { + Name: "DNS1035LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, true) }, + MaxRegexSize: 30, + }, + "labelValue": { + Name: "LabelValue", + ValidateFunc: validation.IsValidLabelValue, + MaxRegexSize: 40, + }, + + // CRD formats + // Implementations sourced from strfmt, which kube-openapi uses as its + // format library. There are other CRD formats supported, but they are + // covered by other portions of the CEL library (like IP/CIDR), or their + // use is discouraged (like bsonobjectid, email, etc) + "uri": { + Name: "URI", + ValidateFunc: func(s string) []string { + // Directly call ParseRequestURI since we can get a better error message + _, err := url.ParseRequestURI(s) + if err != nil { + return []string{err.Error()} + } + return nil + }, + // Use govalidator url regex to estimate, since ParseRequestURI + // doesnt use regex + MaxRegexSize: len(govalidator.URL), + }, + "uuid": { + Name: "uuid", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("uuid", s) { + return []string{"does not match the UUID format"} + } + return nil + }, + MaxRegexSize: len(strfmt.UUIDPattern), + }, + "byte": { + Name: "byte", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("byte", s) { + return []string{"invalid base64"} + } + return nil + }, + MaxRegexSize: len(govalidator.Base64), + }, + "date": { + Name: "date", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("date", s) { + return []string{"invalid date"} + } + return nil + }, + // Estimated regex size for RFC3339FullDate which is + // a date format. Assume a date-time pattern is longer + // so use that to conservatively estimate this + MaxRegexSize: len(strfmt.DateTimePattern), + }, + "datetime": { + Name: "datetime", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("datetime", s) { + return []string{"invalid datetime"} + } + return nil + }, + MaxRegexSize: len(strfmt.DateTimePattern), + }, +} + +var formatLibraryDecls = map[string][]cel.FunctionOpt{ + "validate": { + cel.MemberOverload("format-validate", []*cel.Type{apiservercel.FormatType, cel.StringType}, cel.OptionalType(cel.ListType(cel.StringType)), cel.BinaryBinding(formatValidate)), + }, + "format.named": { + cel.Overload("format-named", []*cel.Type{cel.StringType}, cel.OptionalType(apiservercel.FormatType), cel.UnaryBinding(func(name ref.Val) ref.Val { + nameString, ok := name.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(name) + } + + f, ok := ConstantFormats[nameString] + if !ok { + return types.OptionalNone + } + return types.OptionalOf(f) + })), + }, +} + +func formatValidate(arg1, arg2 ref.Val) ref.Val { + f, ok := arg1.Value().(apiservercel.Format) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + str, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + + res := f.ValidateFunc(str) + if len(res) == 0 { + return types.OptionalNone + } + return types.OptionalOf(types.NewStringList(types.DefaultTypeAdapter, res)) +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/ip.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/ip.go index cdfeb1daf..8edc4463a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/ip.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/ip.go @@ -132,7 +132,15 @@ var ipLib = &ip{} type ip struct{} func (*ip) LibraryName() string { - return "net.ip" + return "kubernetes.net.ip" +} + +func (*ip) declarations() map[string][]cel.FunctionOpt { + return ipLibraryDecls +} + +func (*ip) Types() []*cel.Type { + return []*cel.Type{apiservercel.IPType} } var ipLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go new file mode 100644 index 000000000..bdcb6d852 --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "strings" +) + +// JSONPatch provides a CEL function library extension of JSONPatch functions. +// +// jsonpatch.escapeKey +// +// Escapes a string for use as a JSONPatch path key. +// +// jsonpatch.escapeKey() +// +// Examples: +// +// "/metadata/labels/" + jsonpatch.escapeKey('k8s.io/my~label') // returns "/metadata/labels/k8s.io~1my~0label" +func JSONPatch() cel.EnvOption { + return cel.Lib(jsonPatchLib) +} + +var jsonPatchLib = &jsonPatch{} + +type jsonPatch struct{} + +func (*jsonPatch) LibraryName() string { + return "kubernetes.jsonpatch" +} + +func (*jsonPatch) declarations() map[string][]cel.FunctionOpt { + return jsonPatchLibraryDecls +} + +func (*jsonPatch) Types() []*cel.Type { + return []*cel.Type{} +} + +var jsonPatchLibraryDecls = map[string][]cel.FunctionOpt{ + "jsonpatch.escapeKey": { + cel.Overload("string_jsonpatch_escapeKey_string", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(escape)), + }, +} + +func (*jsonPatch) CompileOptions() []cel.EnvOption { + var options []cel.EnvOption + for name, overloads := range jsonPatchLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*jsonPatch) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +var jsonPatchReplacer = strings.NewReplacer("/", "~1", "~", "~0") + +func escapeKey(k string) string { + return jsonPatchReplacer.Replace(k) +} + +func escape(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + escaped := escapeKey(s) + return types.String(escaped) +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go new file mode 100644 index 000000000..dc436973e --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go @@ -0,0 +1,61 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/google/cel-go/cel" +) + +// Library represents a CEL library used by kubernetes. +type Library interface { + // SingletonLibrary provides the library name and ensures the library can be safely registered into environments. + cel.SingletonLibrary + + // Types provides all custom types introduced by the library. + Types() []*cel.Type + + // declarations returns all function declarations provided by the library. + declarations() map[string][]cel.FunctionOpt +} + +// KnownLibraries returns all libraries used in Kubernetes. +func KnownLibraries() []Library { + return []Library{ + authzLib, + authzSelectorsLib, + listsLib, + regexLib, + urlsLib, + quantityLib, + ipLib, + cidrsLib, + formatLib, + semverLib, + jsonPatchLib, + } +} + +func isRegisteredType(typeName string) bool { + for _, lib := range KnownLibraries() { + for _, rt := range lib.Types() { + if rt.TypeName() == typeName { + return true + } + } + } + return false +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/lists.go index 327ec93d6..1f61b1181 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/lists.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/lists.go @@ -96,7 +96,15 @@ var listsLib = &lists{} type lists struct{} func (*lists) LibraryName() string { - return "k8s.lists" + return "kubernetes.lists" +} + +func (*lists) Types() []*cel.Type { + return []*cel.Type{} +} + +func (*lists) declarations() map[string][]cel.FunctionOpt { + return listsLibraryDecls } var paramA = cel.TypeParamType("A") diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go index b4ac91c8a..236b366b4 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go @@ -143,7 +143,15 @@ var quantityLib = &quantity{} type quantity struct{} func (*quantity) LibraryName() string { - return "k8s.quantity" + return "kubernetes.quantity" +} + +func (*quantity) Types() []*cel.Type { + return []*cel.Type{apiservercel.QuantityType} +} + +func (*quantity) declarations() map[string][]cel.FunctionOpt { + return quantityLibraryDecls } var quantityLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/regex.go index 147a40f9b..2cf8b0037 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/regex.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -52,7 +52,15 @@ var regexLib = ®ex{} type regex struct{} func (*regex) LibraryName() string { - return "k8s.regex" + return "kubernetes.regex" +} + +func (*regex) Types() []*cel.Type { + return []*cel.Type{} +} + +func (*regex) declarations() map[string][]cel.FunctionOpt { + return regexLibraryDecls } var regexLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go new file mode 100644 index 000000000..d8c79ae02 --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go @@ -0,0 +1,247 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "github.com/blang/semver/v4" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// Semver provides a CEL function library extension for [semver.Version]. +// +// semver +// +// Converts a string to a semantic version or results in an error if the string is not a valid semantic version. Refer +// to semver.org documentation for information on accepted patterns. +// +// semver() +// +// Examples: +// +// semver('1.0.0') // returns a Semver +// semver('0.1.0-alpha.1') // returns a Semver +// semver('200K') // error +// semver('Three') // error +// semver('Mi') // error +// +// isSemver +// +// Returns true if a string is a valid Semver. isSemver returns true if and +// only if semver does not result in error. +// +// isSemver( ) +// +// Examples: +// +// isSemver('1.0.0') // returns true +// isSemver('v1.0') // returns true (tolerant parsing) +// isSemver('hello') // returns false +// +// Conversion to Scalars: +// +// - major/minor/patch: return the major version number as int64. +// +// .major() +// +// Examples: +// +// semver("1.2.3").major() // returns 1 +// +// Comparisons +// +// - isGreaterThan: Returns true if and only if the receiver is greater than the operand +// +// - isLessThan: Returns true if and only if the receiver is less than the operand +// +// - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand +// +// +// .isLessThan() +// .isGreaterThan() +// .compareTo() +// +// Examples: +// +// semver("1.2.3").compareTo(semver("1.2.3")) // returns 0 +// semver("1.2.3").compareTo(semver("2.0.0")) // returns -1 +// semver("1.2.3").compareTo(semver("0.1.2")) // returns 1 + +func SemverLib() cel.EnvOption { + return cel.Lib(semverLib) +} + +var semverLib = &semverLibType{} + +type semverLibType struct{} + +func (*semverLibType) LibraryName() string { + return "kubernetes.Semver" +} + +func (*semverLibType) Types() []*cel.Type { + return []*cel.Type{apiservercel.SemverType} +} + +func (*semverLibType) declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "semver": { + cel.Overload("string_to_semver", []*cel.Type{cel.StringType}, apiservercel.SemverType, cel.UnaryBinding((stringToSemver))), + }, + "isSemver": { + cel.Overload("is_semver_string", []*cel.Type{cel.StringType}, cel.BoolType, cel.UnaryBinding(isSemver)), + }, + "isGreaterThan": { + cel.MemberOverload("semver_is_greater_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsGreaterThan)), + }, + "isLessThan": { + cel.MemberOverload("semver_is_less_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsLessThan)), + }, + "compareTo": { + cel.MemberOverload("semver_compare_to", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.IntType, cel.BinaryBinding(semverCompareTo)), + }, + "major": { + cel.MemberOverload("semver_major", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMajor)), + }, + "minor": { + cel.MemberOverload("semver_minor", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMinor)), + }, + "patch": { + cel.MemberOverload("semver_patch", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverPatch)), + }, + } +} + +func (s *semverLibType) CompileOptions() []cel.EnvOption { + // Defined in this function to avoid an initialization order problem. + semverLibraryDecls := s.declarations() + options := make([]cel.EnvOption, 0, len(semverLibraryDecls)) + for name, overloads := range semverLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*semverLibType) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func isSemver(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + // Using semver/v4 here is okay because this function isn't + // used to validate the Kubernetes API. In the CEL base library + // we would have to use the regular expression from + // pkg/apis/resource/structured/namedresources/validation/validation.go. + _, err := semver.Parse(str) + if err != nil { + return types.Bool(false) + } + + return types.Bool(true) +} + +func stringToSemver(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + // Using semver/v4 here is okay because this function isn't + // used to validate the Kubernetes API. In the CEL base library + // we would have to use the regular expression from + // pkg/apis/resource/structured/namedresources/validation/validation.go + // first before parsing. + v, err := semver.Parse(str) + if err != nil { + return types.WrapErr(err) + } + + return apiservercel.Semver{Version: v} +} + +func semverMajor(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Major) +} + +func semverMinor(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Minor) +} + +func semverPatch(arg ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(v.Patch) +} + +func semverIsGreaterThan(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(v.Compare(v2) == 1) +} + +func semverIsLessThan(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(v.Compare(v2) == -1) +} + +func semverCompareTo(arg ref.Val, other ref.Val) ref.Val { + v, ok := arg.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + v2, ok := other.Value().(semver.Version) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Int(v.Compare(v2)) +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/test.go index dcbc058a1..282d93962 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/test.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -38,7 +38,7 @@ type testLib struct { } func (*testLib) LibraryName() string { - return "k8s.test" + return "kubernetes.test" } type TestOption func(*testLib) *testLib diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/urls.go index 8f4ba85af..4b7ffb95a 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/library/urls.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/library/urls.go @@ -113,7 +113,15 @@ var urlsLib = &urls{} type urls struct{} func (*urls) LibraryName() string { - return "k8s.urls" + return "kubernetes.urls" +} + +func (*urls) Types() []*cel.Type { + return []*cel.Type{apiservercel.URLType} +} + +func (*urls) declarations() map[string][]cel.FunctionOpt { + return urlLibraryDecls } var urlLibraryDecls = map[string][]cel.FunctionOpt{ diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/limits.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/limits.go index 65c6ad5f3..14b3ec2d2 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/limits.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/limits.go @@ -47,4 +47,8 @@ const ( MinBoolSize = 4 // MinNumberSize is the length of literal 0 MinNumberSize = 1 + + // MaxFormatSize is the maximum size we allow for format strings + MaxFormatSize = 64 + MaxNameFormatRegexSize = 128 ) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go index 66b7dd8cf..02f232c17 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go @@ -44,14 +44,14 @@ func newCelMetrics() *CelMetrics { Subsystem: subsystem, Name: "compilation_duration_seconds", Help: "CEL compilation time in seconds.", - StabilityLevel: metrics.ALPHA, + StabilityLevel: metrics.BETA, }), evaluationTime: metrics.NewHistogram(&metrics.HistogramOpts{ Namespace: namespace, Subsystem: subsystem, Name: "evaluation_duration_seconds", Help: "CEL evaluation time in seconds.", - StabilityLevel: metrics.ALPHA, + StabilityLevel: metrics.BETA, }), } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go deleted file mode 100644 index eb3c37635..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Combine combines the DefinitionsSchemaResolver with a secondary schema resolver. -// The resulting schema resolver uses the DefinitionsSchemaResolver for a GVK that DefinitionsSchemaResolver knows, -// and the secondary otherwise. -func (d *DefinitionsSchemaResolver) Combine(secondary SchemaResolver) SchemaResolver { - return &combinedSchemaResolver{definitions: d, secondary: secondary} -} - -type combinedSchemaResolver struct { - definitions *DefinitionsSchemaResolver - secondary SchemaResolver -} - -// ResolveSchema takes a GroupVersionKind (GVK) and returns the OpenAPI schema -// identified by the GVK. -// If the DefinitionsSchemaResolver knows the gvk, the DefinitionsSchemaResolver handles the resolution, -// otherwise, the secondary does. -func (r *combinedSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - if _, ok := r.definitions.gvkToRef[gvk]; ok { - return r.definitions.ResolveSchema(gvk) - } - return r.secondary.ResolveSchema(gvk) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go deleted file mode 100644 index 12b353b0b..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/endpoints/openapi" - "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// DefinitionsSchemaResolver resolves the schema of a built-in type -// by looking up the OpenAPI definitions. -type DefinitionsSchemaResolver struct { - defs map[string]common.OpenAPIDefinition - gvkToRef map[schema.GroupVersionKind]string -} - -// NewDefinitionsSchemaResolver creates a new DefinitionsSchemaResolver. -// An example working setup: -// getDefinitions = "k8s.io/kubernetes/pkg/generated/openapi".GetOpenAPIDefinitions -// scheme = "k8s.io/client-go/kubernetes/scheme".Scheme -func NewDefinitionsSchemaResolver(getDefinitions common.GetOpenAPIDefinitions, schemes ...*runtime.Scheme) *DefinitionsSchemaResolver { - gvkToRef := make(map[schema.GroupVersionKind]string) - namer := openapi.NewDefinitionNamer(schemes...) - defs := getDefinitions(func(path string) spec.Ref { - return spec.MustCreateRef(path) - }) - for name := range defs { - _, e := namer.GetDefinitionName(name) - gvks := extensionsToGVKs(e) - for _, gvk := range gvks { - gvkToRef[gvk] = name - } - } - return &DefinitionsSchemaResolver{ - gvkToRef: gvkToRef, - defs: defs, - } -} - -func (d *DefinitionsSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - ref, ok := d.gvkToRef[gvk] - if !ok { - return nil, fmt.Errorf("cannot resolve %v: %w", gvk, ErrSchemaNotFound) - } - s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { - // find the schema by the ref string, and return a deep copy - def, ok := d.defs[ref] - if !ok { - return nil, false - } - s := def.Schema - return &s, true - }, ref) - if err != nil { - return nil, err - } - return s, nil -} - -func extensionsToGVKs(extensions spec.Extensions) []schema.GroupVersionKind { - gvksAny, ok := extensions[extGVK] - if !ok { - return nil - } - gvks, ok := gvksAny.([]any) - if !ok { - return nil - } - result := make([]schema.GroupVersionKind, 0, len(gvks)) - for _, gvkAny := range gvks { - // type check the map and all fields - gvkMap, ok := gvkAny.(map[string]any) - if !ok { - return nil - } - g, ok := gvkMap["group"].(string) - if !ok { - return nil - } - v, ok := gvkMap["version"].(string) - if !ok { - return nil - } - k, ok := gvkMap["kind"].(string) - if !ok { - return nil - } - result = append(result, schema.GroupVersionKind{ - Group: g, - Version: v, - Kind: k, - }) - } - return result -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go deleted file mode 100644 index 9c6cefce8..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "encoding/json" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// ClientDiscoveryResolver uses client-go discovery to resolve schemas at run time. -type ClientDiscoveryResolver struct { - Discovery discovery.DiscoveryInterface -} - -var _ SchemaResolver = (*ClientDiscoveryResolver)(nil) - -func (r *ClientDiscoveryResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - p, err := r.Discovery.OpenAPIV3().Paths() - if err != nil { - return nil, err - } - resourcePath := resourcePathFromGV(gvk.GroupVersion()) - c, ok := p[resourcePath] - if !ok { - return nil, fmt.Errorf("cannot resolve group version %q: %w", gvk.GroupVersion(), ErrSchemaNotFound) - } - b, err := c.Schema(runtime.ContentTypeJSON) - if err != nil { - return nil, err - } - resp := new(schemaResponse) - err = json.Unmarshal(b, resp) - if err != nil { - return nil, err - } - ref, err := resolveRef(resp, gvk) - if err != nil { - return nil, err - } - s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { - s, ok := resp.Components.Schemas[strings.TrimPrefix(ref, refPrefix)] - return s, ok - }, ref) - if err != nil { - return nil, err - } - return s, nil -} - -func resolveRef(resp *schemaResponse, gvk schema.GroupVersionKind) (string, error) { - for ref, s := range resp.Components.Schemas { - var gvks []schema.GroupVersionKind - err := s.Extensions.GetObject(extGVK, &gvks) - if err != nil { - return "", err - } - for _, g := range gvks { - if g == gvk { - return ref, nil - } - } - } - return "", fmt.Errorf("cannot resolve group version kind %q: %w", gvk, ErrSchemaNotFound) -} - -func resourcePathFromGV(gv schema.GroupVersion) string { - var resourcePath string - if len(gv.Group) == 0 { - resourcePath = fmt.Sprintf("api/%s", gv.Version) - } else { - resourcePath = fmt.Sprintf("apis/%s/%s", gv.Group, gv.Version) - } - return resourcePath -} - -type schemaResponse struct { - Components struct { - Schemas map[string]*spec.Schema `json:"schemas"` - } `json:"components"` -} - -const refPrefix = "#/components/schemas/" - -const extGVK = "x-kubernetes-group-version-kind" diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go deleted file mode 100644 index 56e2a4bbd..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// PopulateRefs recursively replaces Refs in the schema with the referred one. -// schemaOf is the callback to find the corresponding schema by the ref. -// This function will not mutate the original schema. If the schema needs to be -// mutated, a copy will be returned, otherwise it returns the original schema. -func PopulateRefs(schemaOf func(ref string) (*spec.Schema, bool), rootRef string) (*spec.Schema, error) { - visitedRefs := sets.New[string]() - rootSchema, ok := schemaOf(rootRef) - visitedRefs.Insert(rootRef) - if !ok { - return nil, fmt.Errorf("internal error: cannot resolve Ref for root schema %q: %w", rootRef, ErrSchemaNotFound) - } - return populateRefs(schemaOf, visitedRefs, rootSchema) -} - -func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.Set[string], schema *spec.Schema) (*spec.Schema, error) { - result := *schema - changed := false - - ref, isRef := refOf(schema) - if isRef { - if visited.Has(ref) { - return &spec.Schema{ - // for circular ref, return an empty object as placeholder - SchemaProps: spec.SchemaProps{Type: []string{"object"}}, - }, nil - } - visited.Insert(ref) - // restore visited state at the end of the recursion. - defer func() { - visited.Delete(ref) - }() - // replace the whole schema with the referred one. - resolved, ok := schemaOf(ref) - if !ok { - return nil, fmt.Errorf("internal error: cannot resolve Ref %q: %w", ref, ErrSchemaNotFound) - } - result = *resolved - changed = true - } - // schema is an object, populate its properties and additionalProperties - props := make(map[string]spec.Schema, len(schema.Properties)) - propsChanged := false - for name, prop := range result.Properties { - populated, err := populateRefs(schemaOf, visited, &prop) - if err != nil { - return nil, err - } - if populated != &prop { - propsChanged = true - } - props[name] = *populated - } - if propsChanged { - changed = true - result.Properties = props - } - if result.AdditionalProperties != nil && result.AdditionalProperties.Schema != nil { - populated, err := populateRefs(schemaOf, visited, result.AdditionalProperties.Schema) - if err != nil { - return nil, err - } - if populated != result.AdditionalProperties.Schema { - changed = true - result.AdditionalProperties.Schema = populated - } - } - // schema is a list, populate its items - if result.Items != nil && result.Items.Schema != nil { - populated, err := populateRefs(schemaOf, visited, result.Items.Schema) - if err != nil { - return nil, err - } - if populated != result.Items.Schema { - changed = true - result.Items.Schema = populated - } - } - if changed { - return &result, nil - } - return schema, nil -} - -func refOf(schema *spec.Schema) (string, bool) { - if schema.Ref.GetURL() != nil { - return schema.Ref.String(), true - } - // A Ref may be wrapped in allOf to preserve its description - // see https://github.com/kubernetes/kubernetes/issues/106387 - // For kube-openapi, allOf is only used for wrapping a Ref. - for _, allOf := range schema.AllOf { - if ref, isRef := refOf(&allOf); isRef { - return ref, isRef - } - } - return "", false -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/resolver.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/resolver.go deleted file mode 100644 index 4060c86ce..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/resolver.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// SchemaResolver finds the OpenAPI schema for the given GroupVersionKind. -// This interface uses the type defined by k8s.io/kube-openapi -type SchemaResolver interface { - // ResolveSchema takes a GroupVersionKind (GVK) and returns the OpenAPI schema - // identified by the GVK. - // The function returns a non-nil error if the schema cannot be found or fail - // to resolve. The returned error wraps ErrSchemaNotFound if the resolution is - // attempted but the corresponding schema cannot be found. - ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) -} - -// ErrSchemaNotFound is wrapped and returned if the schema cannot be located -// by the resolver. -var ErrSchemaNotFound = fmt.Errorf("schema not found") diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/quantity.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/quantity.go index 1057e33fe..ce8239643 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/quantity.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/quantity.go @@ -50,7 +50,7 @@ func (d Quantity) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { func (d Quantity) ConvertToType(typeVal ref.Type) ref.Val { switch typeVal { - case typeValue: + case quantityTypeValue: return d case types.TypeType: return quantityTypeValue diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/semver.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/semver.go new file mode 100644 index 000000000..c53b9c306 --- /dev/null +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/semver.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/blang/semver/v4" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +var ( + SemverType = cel.ObjectType("kubernetes.Semver") +) + +// Semver provdes a CEL representation of a [semver.Version]. +type Semver struct { + semver.Version +} + +func (v Semver) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + if reflect.TypeOf(v.Version).AssignableTo(typeDesc) { + return v.Version, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return v.Version.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'Semver' to '%v'", typeDesc) +} + +func (v Semver) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case SemverType: + return v + case types.TypeType: + return SemverType + default: + return types.NewErr("type conversion error from '%s' to '%s'", SemverType, typeVal) + } +} + +func (v Semver) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Semver) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(v.Version.EQ(otherDur.Version)) +} + +func (v Semver) Type() ref.Type { + return SemverType +} + +func (v Semver) Value() interface{} { + return v.Version +} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/cel/types.go b/constraint/vendor/k8s.io/apiserver/pkg/cel/types.go index bd14e1697..84bfd7e65 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/cel/types.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/cel/types.go @@ -27,7 +27,7 @@ import ( "github.com/google/cel-go/common/types/traits" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - "google.golang.org/protobuf/proto" + "k8s.io/apimachinery/pkg/api/resource" ) const ( @@ -348,9 +348,14 @@ func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider { // DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based // type-system. type DeclTypeProvider struct { - registeredTypes map[string]*DeclType - typeProvider ref.TypeProvider - typeAdapter ref.TypeAdapter + registeredTypes map[string]*DeclType + typeProvider types.Provider + typeAdapter types.Adapter + recognizeKeywordAsFieldName bool +} + +func (rt *DeclTypeProvider) SetRecognizeKeywordAsFieldName(recognize bool) { + rt.recognizeKeywordAsFieldName = recognize } func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val { @@ -365,7 +370,7 @@ func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) { // as well as a custom ref.TypeProvider. // // If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned. -func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) { +func (rt *DeclTypeProvider) EnvOptions(tp types.Provider) ([]cel.EnvOption, error) { if rt == nil { return []cel.EnvOption{}, nil } @@ -381,54 +386,52 @@ func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, er // WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider // If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil. -func (rt *DeclTypeProvider) WithTypeProvider(tp ref.TypeProvider) (*DeclTypeProvider, error) { +func (rt *DeclTypeProvider) WithTypeProvider(tp types.Provider) (*DeclTypeProvider, error) { if rt == nil { return nil, nil } - var ta ref.TypeAdapter = types.DefaultTypeAdapter - tpa, ok := tp.(ref.TypeAdapter) + var ta types.Adapter = types.DefaultTypeAdapter + tpa, ok := tp.(types.Adapter) if ok { ta = tpa } rtWithTypes := &DeclTypeProvider{ - typeProvider: tp, - typeAdapter: ta, - registeredTypes: rt.registeredTypes, + typeProvider: tp, + typeAdapter: ta, + registeredTypes: rt.registeredTypes, + recognizeKeywordAsFieldName: rt.recognizeKeywordAsFieldName, } for name, declType := range rt.registeredTypes { - tpType, found := tp.FindType(name) - expT, err := declType.ExprType() - if err != nil { - return nil, fmt.Errorf("fail to get cel type: %s", err) - } - if found && !proto.Equal(tpType, expT) { + tpType, found := tp.FindStructType(name) + // cast celType to types.type + + expT := declType.CelType() + if found && !expT.IsExactType(tpType) { return nil, fmt.Errorf( "type %s definition differs between CEL environment and type provider", name) } + } return rtWithTypes, nil } -// FindType attempts to resolve the typeName provided from the rule's rule-schema, or if not +// FindStructType attempts to resolve the typeName provided from the rule's rule-schema, or if not // from the embedded ref.TypeProvider. // -// FindType overrides the default type-finding behavior of the embedded TypeProvider. +// FindStructType overrides the default type-finding behavior of the embedded TypeProvider. // // Note, when the type name is based on the Open API Schema, the name will reflect the object path // where the type definition appears. -func (rt *DeclTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { +func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) { if rt == nil { return nil, false } declType, found := rt.findDeclType(typeName) if found { - expT, err := declType.ExprType() - if err != nil { - return expT, false - } - return expT, found + expT := declType.CelType() + return types.NewTypeTypeWithParam(expT), found } - return rt.typeProvider.FindType(typeName) + return rt.typeProvider.FindStructType(typeName) } // FindDeclType returns the CPT type description which can be mapped to a CEL type. @@ -439,37 +442,41 @@ func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) { return rt.findDeclType(typeName) } -// FindFieldType returns a field type given a type name and field name, if found. +// FindStructFieldNames returns the field names associated with the type, if the type +// is found. +func (rt *DeclTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + +// FindStructFieldType returns a field type given a type name and field name, if found. // // Note, the type name for an Open API Schema type is likely to be its qualified object path. // If, in the future an object instance rather than a type name were provided, the field // resolution might more accurately reflect the expected type model. However, in this case // concessions were made to align with the existing CEL interfaces. -func (rt *DeclTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { +func (rt *DeclTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { st, found := rt.findDeclType(typeName) if !found { - return rt.typeProvider.FindFieldType(typeName, fieldName) + return rt.typeProvider.FindStructFieldType(typeName, fieldName) } f, found := st.Fields[fieldName] + if rt.recognizeKeywordAsFieldName && !found && celReservedSymbols.Has(fieldName) { + f, found = st.Fields["__"+fieldName+"__"] + } + if found { ft := f.Type - expT, err := ft.ExprType() - if err != nil { - return nil, false - } - return &ref.FieldType{ + expT := ft.CelType() + return &types.FieldType{ Type: expT, }, true } // This could be a dynamic map. if st.IsMap() { et := st.ElemType - expT, err := et.ExprType() - if err != nil { - return nil, false - } - return &ref.FieldType{ + expT := et.CelType() + return &types.FieldType{ Type: expT, }, true } @@ -576,6 +583,10 @@ var ( // labeled as Timestamp will necessarily have the same MinSerializedSize. TimestampType = NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize) + // QuantityDeclType wraps a [QuantityType] and makes it usable with functions that expect + // a [DeclType]. + QuantityDeclType = NewSimpleTypeWithMinSize("quantity", QuantityType, Quantity{Quantity: resource.NewQuantity(0, resource.DecimalSI)}, 8) + // UintType is equivalent to the CEL 'uint' type. UintType = NewSimpleTypeWithMinSize("uint", cel.UintType, types.Uint(0), 1) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/constraint/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go deleted file mode 100644 index e61f44439..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapi - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strings" - "unicode" - - restful "github.com/emicklei/go-restful/v3" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/util" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var verbs = util.NewTrie([]string{"get", "log", "read", "replace", "patch", "delete", "deletecollection", "watch", "connect", "proxy", "list", "create", "patch"}) - -const ( - extensionGVK = "x-kubernetes-group-version-kind" -) - -// ToValidOperationID makes an string a valid op ID (e.g. removing punctuations and whitespaces and make it camel case) -func ToValidOperationID(s string, capitalizeFirstLetter bool) string { - var buffer bytes.Buffer - capitalize := capitalizeFirstLetter - for i, r := range s { - if unicode.IsLetter(r) || r == '_' || (i != 0 && unicode.IsDigit(r)) { - if capitalize { - buffer.WriteRune(unicode.ToUpper(r)) - capitalize = false - } else { - buffer.WriteRune(r) - } - } else { - capitalize = true - } - } - return buffer.String() -} - -// GetOperationIDAndTags returns a customize operation ID and a list of tags for kubernetes API server's OpenAPI spec to prevent duplicate IDs. -func GetOperationIDAndTags(r *restful.Route) (string, []string, error) { - op := r.Operation - path := r.Path - var tags []string - prefix, exists := verbs.GetPrefix(op) - if !exists { - return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op) - } - op = op[len(prefix):] - parts := strings.Split(strings.Trim(path, "/"), "/") - // Assume /api is /apis/core, remove this when we actually server /api/... on /apis/core/... - if len(parts) >= 1 && parts[0] == "api" { - parts = append([]string{"apis", "core"}, parts[1:]...) - } - if len(parts) >= 2 && parts[0] == "apis" { - trimmed := strings.TrimSuffix(parts[1], ".k8s.io") - prefix = prefix + ToValidOperationID(trimmed, prefix != "") - tag := ToValidOperationID(trimmed, false) - if len(parts) > 2 { - prefix = prefix + ToValidOperationID(parts[2], prefix != "") - tag = tag + "_" + ToValidOperationID(parts[2], false) - } - tags = append(tags, tag) - } else if len(parts) >= 1 { - tags = append(tags, ToValidOperationID(parts[0], false)) - } - return prefix + ToValidOperationID(op, prefix != ""), tags, nil -} - -type groupVersionKinds []v1.GroupVersionKind - -func (s groupVersionKinds) Len() int { - return len(s) -} - -func (s groupVersionKinds) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s groupVersionKinds) Less(i, j int) bool { - if s[i].Group == s[j].Group { - if s[i].Version == s[j].Version { - return s[i].Kind < s[j].Kind - } - return s[i].Version < s[j].Version - } - return s[i].Group < s[j].Group -} - -func (s groupVersionKinds) JSON() []interface{} { - j := []interface{}{} - for _, gvk := range s { - j = append(j, map[string]interface{}{ - "group": gvk.Group, - "version": gvk.Version, - "kind": gvk.Kind, - }) - } - return j -} - -// DefinitionNamer is the type to customize OpenAPI definition name. -type DefinitionNamer struct { - typeGroupVersionKinds map[string]groupVersionKinds -} - -func gvkConvert(gvk schema.GroupVersionKind) v1.GroupVersionKind { - return v1.GroupVersionKind{ - Group: gvk.Group, - Version: gvk.Version, - Kind: gvk.Kind, - } -} - -func friendlyName(name string) string { - nameParts := strings.Split(name, "/") - // Reverse first part. e.g., io.k8s... instead of k8s.io... - if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { - parts := strings.Split(nameParts[0], ".") - for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { - parts[i], parts[j] = parts[j], parts[i] - } - nameParts[0] = strings.Join(parts, ".") - } - return strings.Join(nameParts, ".") -} - -func typeName(t reflect.Type) string { - path := t.PkgPath() - if strings.Contains(path, "/vendor/") { - path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] - } - return fmt.Sprintf("%s.%s", path, t.Name()) -} - -// NewDefinitionNamer constructs a new DefinitionNamer to be used to customize OpenAPI spec. -func NewDefinitionNamer(schemes ...*runtime.Scheme) *DefinitionNamer { - ret := &DefinitionNamer{ - typeGroupVersionKinds: map[string]groupVersionKinds{}, - } - for _, s := range schemes { - for gvk, rtype := range s.AllKnownTypes() { - newGVK := gvkConvert(gvk) - exists := false - for _, existingGVK := range ret.typeGroupVersionKinds[typeName(rtype)] { - if newGVK == existingGVK { - exists = true - break - } - } - if !exists { - ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], newGVK) - } - } - } - for _, gvk := range ret.typeGroupVersionKinds { - sort.Sort(gvk) - } - return ret -} - -// GetDefinitionName returns the name and tags for a given definition -func (d *DefinitionNamer) GetDefinitionName(name string) (string, spec.Extensions) { - if groupVersionKinds, ok := d.typeGroupVersionKinds[name]; ok { - return friendlyName(name), spec.Extensions{ - extensionGVK: groupVersionKinds.JSON(), - } - } - return friendlyName(name), nil -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 2558494bd..808943d16 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -27,6 +27,8 @@ import ( metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -62,6 +64,13 @@ type RequestInfo struct { Name string // Parts are the path parts for the request, always starting with /{resource}/{name} Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string } // specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal @@ -77,6 +86,9 @@ var specialVerbsNoSubresources = sets.NewString("proxy") // this list allows the parser to distinguish between a namespace subresource, and a namespaced resource var namespaceSubresources = sets.NewString("status", "finalize") +// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters +var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection") + // NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...) @@ -151,6 +163,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er currentParts = currentParts[1:] // handle input of form /{specialVerb}/* + verbViaPathPrefix := false if specialVerbs.Has(currentParts[0]) { if len(currentParts) < 2 { return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL) @@ -158,6 +171,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er requestInfo.Verb = currentParts[0] currentParts = currentParts[1:] + verbViaPathPrefix = true } else { switch req.Method { @@ -238,11 +252,28 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er } } } + // if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" { requestInfo.Verb = "deletecollection" } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently. + // There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors. + if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) { + // interestingly these are parsed above, but the current structure there means that if one (or anything) in the + // listOptions fails to decode, the field and label selectors are lost. + // therefore, do the straight query param read here. + if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 { + requestInfo.FieldSelector = vals[0] + } + if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 { + requestInfo.LabelSelector = vals[0] + } + } + } + return &requestInfo, nil } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go b/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go index b7b7f897c..435af8e7c 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go @@ -274,6 +274,7 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook" validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook" decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object" + apfQueueWaitLatencyKey = "apiserver.latency.k8s.io/apf-queue-wait" ) tracker, ok := LatencyTrackersFrom(ctx) @@ -303,6 +304,8 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string if latency := tracker.DecodeTracker.GetLatency(); latency != 0 { annotations[decodeLatencyKey] = latency.String() } - + if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 { + annotations[apfQueueWaitLatencyKey] = latency.String() + } return annotations } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/constraint/vendor/k8s.io/apiserver/pkg/features/kube_features.go index bae04d954..c23343346 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -18,7 +18,7 @@ package features import ( "k8s.io/apimachinery/pkg/util/runtime" - + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" ) @@ -27,7 +27,6 @@ const ( // Every feature gate should add method here following this template: // // // owner: @username - // // alpha: v1.4 // MyFeature featuregate.Feature = "MyFeature" // // Feature gates should be listed in alphabetical, case-sensitive @@ -36,8 +35,6 @@ const ( // across the file. // owner: @ivelichkovich, @tallclair - // alpha: v1.27 - // beta: v1.28 // stable: v1.30 // kep: https://kep.k8s.io/3716 // @@ -45,81 +42,82 @@ const ( AdmissionWebhookMatchConditions featuregate.Feature = "AdmissionWebhookMatchConditions" // owner: @jefftree @alexzielenski - // alpha: v1.26 - // beta: v1.27 // stable: v1.30 // // Enables an single HTTP endpoint /discovery/ which supports native HTTP // caching with ETags containing all APIResources known to the apiserver. AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint" + // owner: @vinayakankugoyal + // kep: https://kep.k8s.io/4633 + // + // Allows us to enable anonymous auth for only certain apiserver endpoints. + AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints" + + // owner: @stlaz @tkashem @dgrisonnet + // kep: https://kep.k8s.io/3926 + // + // Enables the cluster admin to identify resources that fail to + // decrypt or fail to be decoded into an object, and introduces + // a new delete option to allow deletion of such corrupt + // resources using the Kubernetes API only. + AllowUnsafeMalformedObjectDeletion featuregate.Feature = "AllowUnsafeMalformedObjectDeletion" + // owner: @smarterclayton - // alpha: v1.8 - // beta: v1.9 // stable: 1.29 // // Allow API clients to retrieve resource lists in chunks rather than // all at once. APIListChunking featuregate.Feature = "APIListChunking" - // owner: @MikeSpreitzer @yue9944882 - // alpha: v1.18 - // beta: v1.20 - // stable: 1.29 - // - // Enables managing request concurrency with prioritization and fairness at each server. - // The FeatureGate was introduced in release 1.15 but the feature - // was not really implemented before 1.18. - APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness" - // owner: @ilackams - // alpha: v1.7 - // beta: v1.16 // // Enables compression of REST responses (GET and LIST only) APIResponseCompression featuregate.Feature = "APIResponseCompression" // owner: @roycaihw - // alpha: v1.20 // // Assigns each kube-apiserver an ID in a cluster. APIServerIdentity featuregate.Feature = "APIServerIdentity" // owner: @dashpole - // alpha: v1.22 - // beta: v1.27 // // Add support for distributed tracing in the API Server APIServerTracing featuregate.Feature = "APIServerTracing" // owner: @linxiulei - // beta: v1.30 // // Enables serving watch requests in separate goroutines. APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine" - // owner: @cici37 @jpbetz - // kep: http://kep.k8s.io/3488 - // alpha: v1.26 - // beta: v1.28 - // stable: v1.30 + // owner: @deads2k + // kep: https://kep.k8s.io/4601 // - // Note: the feature gate can be removed in 1.32 - // Enables expression validation in Admission Control - ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy" + // Allows authorization to use field and label selectors. + AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors" - // owner: @cici37 - // kep: https://kep.k8s.io/2876 - // alpha: v1.23 - // beta: v1.25 - // stable: v1.29 + // owner: @benluddy + // kep: https://kep.k8s.io/4222 // - // Enables expression validation for Custom Resource - CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" + // Enables CBOR as a supported encoding for requests and responses, and as the + // preferred storage encoding for custom resources. + CBORServingAndStorage featuregate.Feature = "CBORServingAndStorage" + + // owner: @serathius + // + // Replaces watch cache hashmap implementation with a btree based one, bringing performance improvements. + BtreeWatchCache featuregate.Feature = "BtreeWatchCache" + + // owner: @serathius + // Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed. + ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode" + + // owner: @jefftree + // kep: https://kep.k8s.io/4355 + // + // Enables coordinated leader election in the API server + CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection" - // alpha: v1.20 - // beta: v1.21 - // GA: v1.24 // // Allows for updating watchcache resource version with progress notify events. EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" @@ -131,73 +129,46 @@ const ( // Enables KMS v1 API for encryption at rest. KMSv1 featuregate.Feature = "KMSv1" - // owner: @aramase - // kep: https://kep.k8s.io/3299 - // alpha: v1.25 - // beta: v1.27 - // stable: v1.29 - // - // Enables KMS v2 API for encryption at rest. - KMSv2 featuregate.Feature = "KMSv2" - - // owner: @enj - // kep: https://kep.k8s.io/3299 - // beta: v1.28 - // stable: v1.29 - // - // Enables the use of derived encryption keys with KMS v2. - KMSv2KDF featuregate.Feature = "KMSv2KDF" - - // owner: @alexzielenski, @cici37, @jiahuif + // owner: @alexzielenski, @cici37, @jiahuif, @jpbetz // kep: https://kep.k8s.io/3962 - // alpha: v1.30 // // Enables the MutatingAdmissionPolicy in Admission Chain MutatingAdmissionPolicy featuregate.Feature = "MutatingAdmissionPolicy" // owner: @jiahuif // kep: https://kep.k8s.io/2887 - // alpha: v1.23 - // beta: v1.24 // // Enables populating "enum" field of OpenAPI schemas // in the spec returned from kube-apiserver. OpenAPIEnums featuregate.Feature = "OpenAPIEnums" // owner: @caesarxuchao - // alpha: v1.15 - // beta: v1.16 // stable: 1.29 // // Allow apiservers to show a count of remaining items in the response // to a chunking list request. RemainingItemCount featuregate.Feature = "RemainingItemCount" - // owner: @serathius - // beta: v1.30 + // owner: @stlaz // - // Allow watch cache to create a watch on a dedicated RPC. - // This prevents watch cache from being starved by other watches. - SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + // Enable kube-apiserver to accept UIDs via request header authentication. + // This will also make the kube-apiserver's API aggregator add UIDs via standard + // headers when forwarding requests to the servers serving the aggregated API. + RemoteRequestHeaderUID featuregate.Feature = "RemoteRequestHeaderUID" - // owner: @apelisse, @lavalamp - // alpha: v1.14 - // beta: v1.16 - // stable: v1.22 + // owner: @wojtek-t // - // Server-side apply. Merging happens on the server. - ServerSideApply featuregate.Feature = "ServerSideApply" + // Enables resilient watchcache initialization to avoid controlplane + // overload. + ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization" - // owner: @kevindelgado - // kep: https://kep.k8s.io/2885 - // alpha: v1.23 - // beta: v1.24 + // owner: @serathius // - // Enables server-side field validation. - ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" // owner: @enj - // beta: v1.29 // // Enables http2 DOS mitigations for unauthenticated clients. // @@ -215,13 +186,11 @@ const ( UnauthenticatedHTTP2DOSMitigation featuregate.Feature = "UnauthenticatedHTTP2DOSMitigation" // owner: @jpbetz - // alpha: v1.30 // Resource create requests using generateName are retried automatically by the apiserver // if the generated name conflicts with an existing resource name, up to a maximum number of 7 retries. RetryGenerateName featuregate.Feature = "RetryGenerateName" // owner: @cici37 - // alpha: v1.30 // // StrictCostEnforcementForVAP is used to apply strict CEL cost validation for ValidatingAdmissionPolicy. // It will be set to off by default for certain time of period to prevent the impact on the existing users. @@ -230,7 +199,6 @@ const ( StrictCostEnforcementForVAP featuregate.Feature = "StrictCostEnforcementForVAP" // owner: @cici37 - // alpha: v1.30 // // StrictCostEnforcementForWebhooks is used to apply strict CEL cost validation for matchConditions in Webhooks. // It will be set to off by default for certain time of period to prevent the impact on the existing users. @@ -239,14 +207,11 @@ const ( StrictCostEnforcementForWebhooks featuregate.Feature = "StrictCostEnforcementForWebhooks" // owner: @caesarxuchao @roycaihw - // alpha: v1.20 // // Enable the storage version API. StorageVersionAPI featuregate.Feature = "StorageVersionAPI" // owner: @caesarxuchao - // alpha: v1.14 - // beta: v1.15 // // Allow apiservers to expose the storage version hash in the discovery // document. @@ -254,136 +219,226 @@ const ( // owner: @aramase, @enj, @nabokihms // kep: https://kep.k8s.io/3331 - // alpha: v1.29 - // beta: v1.30 // // Enables Structured Authentication Configuration StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" // owner: @palnabarun // kep: https://kep.k8s.io/3221 - // alpha: v1.29 - // beta: v1.30 // // Enables Structured Authorization Configuration StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" // owner: @wojtek-t - // alpha: v1.15 - // beta: v1.16 - // GA: v1.17 // // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @wojtek-t + // + // Enables post-start-hook for storage readiness + WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook" + // owner: @serathius - // beta: 1.30 // Enables watches without resourceVersion to be served from storage. // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" - // owner: @vinaykul - // kep: http://kep.k8s.io/1287 - // alpha: v1.27 - // - // Enables In-Place Pod Vertical Scaling - InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" - // owner: @p0lyn0mial - // alpha: v1.27 // // Allow the API server to stream individual items instead of chunking WatchList featuregate.Feature = "WatchList" // owner: @serathius // kep: http://kep.k8s.io/2340 - // alpha: v1.28 // // Allow the API server to serve consistent lists from cache ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" - - // owner: @tkashem - // beta: v1.29 - // GA: v1.30 - // - // Allow Priority & Fairness in the API server to use a zero value for - // the 'nominalConcurrencyShares' field of the 'limited' section of a - // priority level. - ZeroLimitedNominalConcurrencyShares featuregate.Feature = "ZeroLimitedNominalConcurrencyShares" ) func init() { runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) + runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) } -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - - AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 - - APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - - APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, - - APIServerTracing: {Default: true, PreRelease: featuregate.Beta}, - - APIServingWithRoutine: {Default: true, PreRelease: featuregate.Beta}, - - ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, - - KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 - - OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - - RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 - - RetryGenerateName: {Default: false, PreRelease: featuregate.Alpha}, - - SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, - - ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, - - StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - - StrictCostEnforcementForVAP: {Default: false, PreRelease: featuregate.Beta}, - - StrictCostEnforcementForWebhooks: {Default: false, PreRelease: featuregate.Beta}, - - StructuredAuthenticationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - StructuredAuthorizationConfiguration: {Default: true, PreRelease: featuregate.Beta}, - - UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, - - WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, - - InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, - - WatchList: {Default: false, PreRelease: featuregate.Alpha}, - - ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha}, - - ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 +// +// Entries are alphabetized and separated from each other with blank lines to avoid sweeping gofmt changes +// when adding or removing one entry. +var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + AdmissionWebhookMatchConditions: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + AggregatedDiscoveryEndpoint: { + {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + AllowUnsafeMalformedObjectDeletion: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + AnonymousAuthConfigurableEndpoints: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIListChunking: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + APIResponseCompression: { + {Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServerIdentity: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServerTracing: { + {Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta}, + }, + + APIServingWithRoutine: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + }, + + BtreeWatchCache: { + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + AuthorizeWithSelectors: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, + + CBORServingAndStorage: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ConcurrentWatchObjectDecode: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + ConsistentListFromCache: { + {Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + CoordinatedLeaderElection: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, + }, + + EfficientWatchResumption: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + KMSv1: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated}, + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated}, + }, + + MutatingAdmissionPolicy: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + OpenAPIEnums: { + {Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta}, + }, + + RemainingItemCount: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + RemoteRequestHeaderUID: { + {Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha}, + }, + + ResilientWatchCacheInitialization: { + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + }, + + RetryGenerateName: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, + }, + + SeparateCacheWatchRPC: { + {Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta}, + }, + + StorageVersionAPI: { + {Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha}, + }, + + StorageVersionHash: { + {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta}, + }, + + StrictCostEnforcementForVAP: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + StrictCostEnforcementForWebhooks: { + {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + StructuredAuthenticationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + }, + + StructuredAuthorizationConfiguration: { + {Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + UnauthenticatedHTTP2DOSMitigation: { + {Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta}, + }, + + WatchBookmark: { + {Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta}, + {Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + }, + + WatchCacheInitializationPostStartHook: { + {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, + }, + + WatchFromStorageWithoutResourceVersion: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta}, + }, + + WatchList: { + {Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha}, + {Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta}, + }, } + +// defaultKubernetesFeatureGates consists of legacy unversioned Kubernetes-specific feature keys. +// Please do not add to this struct and use defaultVersionedKubernetesFeatureGates instead. +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS b/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS deleted file mode 100644 index 104836035..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - deads2k - - derekwaynecarr -reviewers: - - deads2k - - derekwaynecarr - - smarterclayton -labels: - - sig/api-machinery -emeritus_approvers: - - vishh diff --git a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go b/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go deleted file mode 100644 index 511e8818c..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/interfaces.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/admission" - "k8s.io/client-go/tools/cache" -) - -// UsageStatsOptions is an options structs that describes how stats should be calculated -type UsageStatsOptions struct { - // Namespace where stats should be calculate - Namespace string - // Scopes that must match counted objects - Scopes []corev1.ResourceQuotaScope - // Resources are the set of resources to include in the measurement - Resources []corev1.ResourceName - ScopeSelector *corev1.ScopeSelector -} - -// UsageStats is result of measuring observed resource use in the system -type UsageStats struct { - // Used maps resource to quantity used - Used corev1.ResourceList -} - -// Evaluator knows how to evaluate quota usage for a particular group resource -type Evaluator interface { - // Constraints ensures that each required resource is present on item - Constraints(required []corev1.ResourceName, item runtime.Object) error - // GroupResource returns the groupResource that this object knows how to evaluate - GroupResource() schema.GroupResource - // Handles determines if quota could be impacted by the specified attribute. - // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. - Handles(operation admission.Attributes) bool - // Matches returns true if the specified quota matches the input item - Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) - // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. - MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) - // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope - UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) - // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. - MatchingResources(input []corev1.ResourceName) []corev1.ResourceName - // Usage returns the resource usage for the specified object - Usage(item runtime.Object) (corev1.ResourceList, error) - // UsageStats calculates latest observed usage stats for all objects - UsageStats(options UsageStatsOptions) (UsageStats, error) -} - -// Configuration defines how the quota system is configured. -type Configuration interface { - // IgnoredResources are ignored by quota. - IgnoredResources() map[schema.GroupResource]struct{} - // Evaluators for quota evaluation. - Evaluators() []Evaluator -} - -// Registry maintains a list of evaluators -type Registry interface { - // Add to registry - Add(e Evaluator) - // Remove from registry - Remove(e Evaluator) - // Get by group resource - Get(gr schema.GroupResource) Evaluator - // List from registry - List() []Evaluator -} - -// ListerForResourceFunc knows how to get a lister for a specific resource -type ListerForResourceFunc func(schema.GroupVersionResource) (cache.GenericLister, error) diff --git a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go b/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go deleted file mode 100644 index b66471920..000000000 --- a/constraint/vendor/k8s.io/apiserver/pkg/quota/v1/resources.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "sort" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" -) - -// Equals returns true if the two lists are equivalent -func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { - if len(a) != len(b) { - return false - } - - for key, value1 := range a { - value2, found := b[key] - if !found { - return false - } - if value1.Cmp(value2) != 0 { - return false - } - } - - return true -} - -// LessThanOrEqual returns true if a < b for each key in b -// If false, it returns the keys in a that exceeded b -func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { - result := true - resourceNames := []corev1.ResourceName{} - for key, value := range b { - if other, found := a[key]; found { - if other.Cmp(value) > 0 { - result = false - resourceNames = append(resourceNames, key) - } - } - } - return result, resourceNames -} - -// Max returns the result of Max(a, b) for each named resource -func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { - result := corev1.ResourceList{} - for key, value := range a { - if other, found := b[key]; found { - if value.Cmp(other) <= 0 { - result[key] = other.DeepCopy() - continue - } - } - result[key] = value.DeepCopy() - } - for key, value := range b { - if _, found := result[key]; !found { - result[key] = value.DeepCopy() - } - } - return result -} - -// Add returns the result of a + b for each named resource -func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { - result := corev1.ResourceList{} - for key, value := range a { - quantity := value.DeepCopy() - if other, found := b[key]; found { - quantity.Add(other) - } - result[key] = quantity - } - for key, value := range b { - if _, found := result[key]; !found { - result[key] = value.DeepCopy() - } - } - return result -} - -// SubtractWithNonNegativeResult - subtracts and returns result of a - b but -// makes sure we don't return negative values to prevent negative resource usage. -func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { - zero := resource.MustParse("0") - - result := corev1.ResourceList{} - for key, value := range a { - quantity := value.DeepCopy() - if other, found := b[key]; found { - quantity.Sub(other) - } - if quantity.Cmp(zero) > 0 { - result[key] = quantity - } else { - result[key] = zero - } - } - - for key := range b { - if _, found := result[key]; !found { - result[key] = zero - } - } - return result -} - -// Subtract returns the result of a - b for each named resource -func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { - result := corev1.ResourceList{} - for key, value := range a { - quantity := value.DeepCopy() - if other, found := b[key]; found { - quantity.Sub(other) - } - result[key] = quantity - } - for key, value := range b { - if _, found := result[key]; !found { - quantity := value.DeepCopy() - quantity.Neg() - result[key] = quantity - } - } - return result -} - -// Mask returns a new resource list that only has the values with the specified names -func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { - nameSet := ToSet(names) - result := corev1.ResourceList{} - for key, value := range resources { - if nameSet.Has(string(key)) { - result[key] = value.DeepCopy() - } - } - return result -} - -// ResourceNames returns a list of all resource names in the ResourceList -func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { - result := []corev1.ResourceName{} - for resourceName := range resources { - result = append(result, resourceName) - } - return result -} - -// Contains returns true if the specified item is in the list of items -func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { - for _, i := range items { - if i == item { - return true - } - } - return false -} - -// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set -func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { - for _, prefix := range prefixSet { - if strings.HasPrefix(string(item), prefix) { - return true - } - } - return false -} - -// Intersection returns the intersection of both list of resources, deduped and sorted -func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { - result := make([]corev1.ResourceName, 0, len(a)) - for _, item := range a { - if Contains(result, item) { - continue - } - if !Contains(b, item) { - continue - } - result = append(result, item) - } - sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) - return result -} - -// Difference returns the list of resources resulting from a-b, deduped and sorted -func Difference(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { - result := make([]corev1.ResourceName, 0, len(a)) - for _, item := range a { - if Contains(b, item) || Contains(result, item) { - continue - } - result = append(result, item) - } - sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) - return result -} - -// IsZero returns true if each key maps to the quantity value 0 -func IsZero(a corev1.ResourceList) bool { - zero := resource.MustParse("0") - for _, v := range a { - if v.Cmp(zero) != 0 { - return false - } - } - return true -} - -// RemoveZeros returns a new resource list that only has no zero values -func RemoveZeros(a corev1.ResourceList) corev1.ResourceList { - result := corev1.ResourceList{} - for key, value := range a { - if !value.IsZero() { - result[key] = value - } - } - return result -} - -// IsNegative returns the set of resource names that have a negative value. -func IsNegative(a corev1.ResourceList) []corev1.ResourceName { - results := []corev1.ResourceName{} - zero := resource.MustParse("0") - for k, v := range a { - if v.Cmp(zero) < 0 { - results = append(results, k) - } - } - return results -} - -// ToSet takes a list of resource names and converts to a string set -func ToSet(resourceNames []corev1.ResourceName) sets.String { - result := sets.NewString() - for _, resourceName := range resourceNames { - result.Insert(string(resourceName)) - } - return result -} - -// CalculateUsage calculates and returns the requested ResourceList usage. -// If an error is returned, usage only contains the resources which encountered no calculation errors. -func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { - // find the intersection between the hard resources on the quota - // and the resources this controller can track to know what we can - // look to measure updated usage stats for - hardResources := ResourceNames(hardLimits) - potentialResources := []corev1.ResourceName{} - evaluators := registry.List() - for _, evaluator := range evaluators { - potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) - } - // NOTE: the intersection just removes duplicates since the evaluator match intersects with hard - matchedResources := Intersection(hardResources, potentialResources) - - errors := []error{} - - // sum the observed usage from each evaluator - newUsage := corev1.ResourceList{} - for _, evaluator := range evaluators { - // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything - intersection := evaluator.MatchingResources(matchedResources) - if len(intersection) == 0 { - continue - } - - usageStatsOptions := UsageStatsOptions{Namespace: namespaceName, Scopes: scopes, Resources: intersection, ScopeSelector: scopeSelector} - stats, err := evaluator.UsageStats(usageStatsOptions) - if err != nil { - // remember the error - errors = append(errors, err) - // exclude resources which encountered calculation errors - matchedResources = Difference(matchedResources, intersection) - continue - } - newUsage = Add(newUsage, stats.Used) - } - - // mask the observed usage to only the set of resources tracked by this quota - // merge our observed usage with the quota usage status - // if the new usage is different than the last usage, we will need to do an update - newUsage = Mask(newUsage, matchedResources) - return newUsage, utilerrors.NewAggregate(errors) -} diff --git a/constraint/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/constraint/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index ce9a3691a..0513b2822 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -22,13 +22,12 @@ import ( "strings" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/apis/apiserver" "k8s.io/apiserver/pkg/apis/apiserver/install" - "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" "k8s.io/utils/path" - "sigs.k8s.io/yaml" ) var cfgScheme = runtime.NewScheme() @@ -55,19 +54,13 @@ func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSe if err != nil { return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) } - var decodedConfig v1beta1.EgressSelectorConfiguration - err = yaml.Unmarshal(data, &decodedConfig) + config, gvk, err := serializer.NewCodecFactory(cfgScheme, serializer.EnableStrict).UniversalDecoder().Decode(data, nil, nil) if err != nil { - // we got an error where the decode wasn't related to a missing type return nil, err } - if decodedConfig.Kind != "EgressSelectorConfiguration" { - return nil, fmt.Errorf("invalid service configuration object %q", decodedConfig.Kind) - } - internalConfig := &apiserver.EgressSelectorConfiguration{} - if err := cfgScheme.Convert(&decodedConfig, internalConfig, nil); err != nil { - // we got an error where the decode wasn't related to a missing type - return nil, err + internalConfig, ok := config.(*apiserver.EgressSelectorConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected config type: %v", gvk) } return internalConfig, nil } diff --git a/constraint/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/constraint/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 5911b7568..00a9e099b 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -24,8 +24,8 @@ var ( // DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate. // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. // Tests that need to modify feature gates for the duration of their test should use: - // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() - DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate() + // featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., ) + DefaultMutableFeatureGate featuregate.MutableVersionedFeatureGate = featuregate.NewFeatureGate() // DefaultFeatureGate is a shared global FeatureGate. // Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate. diff --git a/constraint/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/constraint/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go index 695c00d17..9cf258b72 100644 --- a/constraint/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go +++ b/constraint/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -23,8 +23,18 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/transport" ) +func ValidateCABundle(fldPath *field.Path, caBundle []byte) field.ErrorList { + var allErrors field.ErrorList + _, err := transport.TLSConfigFor(&transport.Config{TLS: transport.TLSConfig{CAData: caBundle}}) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, caBundle, err.Error())) + } + return allErrors +} + // ValidateWebhookURL validates webhook's URL. func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { var allErrors field.ErrorList diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go index 64422c1df..0d50d44ac 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go index 38b7475cc..1f890bcfc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go index ea1dc377b..d8a816f1e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go index d8e982894..e840fe9eb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"` ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"` ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"` - MatchPolicy *apiadmissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` + MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} @@ -42,7 +42,7 @@ func MatchResources() *MatchResourcesApplyConfiguration { // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { b.NamespaceSelector = value return b } @@ -50,7 +50,7 @@ func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.Label // WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ObjectSelector field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *metav1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration { b.ObjectSelector = value return b } @@ -84,7 +84,7 @@ func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*N // WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MatchPolicy field is set to the value of the last call. -func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value apiadmissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { +func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1.MatchPolicyType) *MatchResourcesApplyConfiguration { b.MatchPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go index faff51a04..cd8096f90 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go index 61c8f667d..4267f5fbf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -56,18 +56,18 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "") } // ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) { return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status") } -func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { +func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) { b := &MutatingWebhookConfigurationApplyConfiguration{} err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmiss // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st // If called multiple times, the Name field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str // If called multiple times, the UID field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val // If called multiple times, the Generation field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod // overwriting an existing map entries in Labels field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[ // overwriting an existing map entries in Annotations field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration { +func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *MutatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go index be8d5206c..dd31981ad 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go @@ -22,14 +22,14 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} @@ -50,7 +50,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -60,7 +60,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -70,7 +70,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -80,7 +80,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -89,6 +89,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go index b77a30cf9..07577929a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go index b52becda5..140233f6b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go @@ -20,19 +20,19 @@ package v1 import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} @@ -57,7 +57,7 @@ func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyC // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go index 41d4179df..a8c68136b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// RuleApplyConfiguration represents an declarative configuration of the Rule type for use +// RuleApplyConfiguration represents a declarative configuration of the Rule type for use // with apply. type RuleApplyConfiguration struct { - APIGroups []string `json:"apiGroups,omitempty"` - APIVersions []string `json:"apiVersions,omitempty"` - Resources []string `json:"resources,omitempty"` - Scope *v1.ScopeType `json:"scope,omitempty"` + APIGroups []string `json:"apiGroups,omitempty"` + APIVersions []string `json:"apiVersions,omitempty"` + Resources []string `json:"resources,omitempty"` + Scope *admissionregistrationv1.ScopeType `json:"scope,omitempty"` } -// RuleApplyConfiguration constructs an declarative configuration of the Rule type for use with +// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with // apply. func Rule() *RuleApplyConfiguration { return &RuleApplyConfiguration{} @@ -70,7 +70,7 @@ func (b *RuleApplyConfiguration) WithResources(values ...string) *RuleApplyConfi // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleApplyConfiguration) WithScope(value v1.ScopeType) *RuleApplyConfiguration { +func (b *RuleApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleApplyConfiguration { b.Scope = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go index 59bbb8fe3..55a985f99 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// RuleWithOperationsApplyConfiguration represents an declarative configuration of the RuleWithOperations type for use +// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use // with apply. type RuleWithOperationsApplyConfiguration struct { - Operations []v1.OperationType `json:"operations,omitempty"` + Operations []admissionregistrationv1.OperationType `json:"operations,omitempty"` RuleApplyConfiguration `json:",inline"` } -// RuleWithOperationsApplyConfiguration constructs an declarative configuration of the RuleWithOperations type for use with +// RuleWithOperationsApplyConfiguration constructs a declarative configuration of the RuleWithOperations type for use with // apply. func RuleWithOperations() *RuleWithOperationsApplyConfiguration { return &RuleWithOperationsApplyConfiguration{} @@ -38,7 +38,7 @@ func RuleWithOperations() *RuleWithOperationsApplyConfiguration { // WithOperations adds the given value to the Operations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Operations field. -func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.OperationType) *RuleWithOperationsApplyConfiguration { +func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *RuleWithOperationsApplyConfiguration { for i := range values { b.Operations = append(b.Operations, values[i]) } @@ -50,7 +50,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithOperations(values ...v1.Opera // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -60,7 +60,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) * // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -70,7 +70,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) // If called multiple times, values provided by each call will be appended to the Resources field. func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) *RuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -78,7 +78,7 @@ func (b *RuleWithOperationsApplyConfiguration) WithResources(values ...string) * // WithScope sets the Scope field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. -func (b *RuleWithOperationsApplyConfiguration) WithScope(value v1.ScopeType) *RuleWithOperationsApplyConfiguration { - b.Scope = &value +func (b *RuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *RuleWithOperationsApplyConfiguration { + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go index 2cd55d9ea..239780664 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go index 8621ce71e..723d10ecf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go index fc96a8bdc..730de0369 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` - Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` + Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -57,18 +57,18 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "") } // ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status") } -func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { +func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) { b := &ValidatingAdmissionPolicyApplyConfiguration{} err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *apiadmissionreg // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries ma // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { +func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go index 5bc41a0f5..2921a711f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -56,18 +56,18 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "") } // ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status") } -func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { +func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(ent // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go index da6ecbe37..eb426af42 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go index eb930b9b1..1635b30a6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1 "k8s.io/api/admissionregistration/v1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go index 25cd67f08..e6f4e8459 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go index 613856bac..a2c705eb5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -39,7 +39,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go index 811bfdf0b..a7bebb59f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -56,18 +56,18 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "") } // ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status") } -func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { +func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) { b := &ValidatingWebhookConfigurationApplyConfiguration{} err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiad // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration { +func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ValidatingWebhookConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go index ac29d1436..9966a7a28 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { - Expression *string `json:"expression,omitempty"` - Message *string `json:"message,omitempty"` - Reason *v1.StatusReason `json:"reason,omitempty"` - MessageExpression *string `json:"messageExpression,omitempty"` + Expression *string `json:"expression,omitempty"` + Message *string `json:"message,omitempty"` + Reason *metav1.StatusReason `json:"reason,omitempty"` + MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} @@ -56,7 +56,7 @@ func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationAppl // WithReason sets the Reason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Reason field is set to the value of the last call. -func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration { +func (b *ValidationApplyConfiguration) WithReason(value metav1.StatusReason) *ValidationApplyConfiguration { b.Reason = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go index d55f29a38..9dd20afa7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go index aa358ae20..77f2227b9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go new file mode 100644 index 000000000..b08ac7224 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// ApplyConfigurationApplyConfiguration represents a declarative configuration of the ApplyConfiguration type for use +// with apply. +type ApplyConfigurationApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// ApplyConfigurationApplyConfiguration constructs a declarative configuration of the ApplyConfiguration type for use with +// apply. +func ApplyConfiguration() *ApplyConfigurationApplyConfiguration { + return &ApplyConfigurationApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *ApplyConfigurationApplyConfiguration) WithExpression(value string) *ApplyConfigurationApplyConfiguration { + b.Expression = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go index 023695139..958a53740 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go index f8b511f51..f36c2f0f5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go new file mode 100644 index 000000000..418d86a2b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// JSONPatchApplyConfiguration represents a declarative configuration of the JSONPatch type for use +// with apply. +type JSONPatchApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// JSONPatchApplyConfiguration constructs a declarative configuration of the JSONPatch type for use with +// apply. +func JSONPatch() *JSONPatchApplyConfiguration { + return &JSONPatchApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *JSONPatchApplyConfiguration) WithExpression(value string) *JSONPatchApplyConfiguration { + b.Expression = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go index 186c750f9..7f983dcb2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go index a6710ac7e..e443535b6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..d66071c18 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MutatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicy type for use +// with apply. +type MutatingAdmissionPolicyApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"` +} + +// MutatingAdmissionPolicy constructs a declarative configuration of the MutatingAdmissionPolicy type for use with +// apply. +func MutatingAdmissionPolicy(name string) *MutatingAdmissionPolicyApplyConfiguration { + b := &MutatingAdmissionPolicyApplyConfiguration{} + b.WithName(name) + b.WithKind("MutatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from +// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a +// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API. +// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "") +} + +// ExtractMutatingAdmissionPolicyStatus is the same as ExtractMutatingAdmissionPolicy except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingAdmissionPolicyStatus(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "status") +} + +func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) { + b := &MutatingAdmissionPolicyApplyConfiguration{} + err := managedfields.ExtractInto(mutatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(mutatingAdmissionPolicy.Name) + + b.WithKind("MutatingAdmissionPolicy") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MutatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MutatingAdmissionPolicyApplyConfiguration) WithSpec(value *MutatingAdmissionPolicySpecApplyConfiguration) *MutatingAdmissionPolicyApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..7cccd291b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// MutatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBinding type for use +// with apply. +type MutatingAdmissionPolicyBindingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` +} + +// MutatingAdmissionPolicyBinding constructs a declarative configuration of the MutatingAdmissionPolicyBinding type for use with +// apply. +func MutatingAdmissionPolicyBinding(name string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b := &MutatingAdmissionPolicyBindingApplyConfiguration{} + b.WithName(name) + b.WithKind("MutatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b +} + +// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from +// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a +// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API. +// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "") +} + +// ExtractMutatingAdmissionPolicyBindingStatus is the same as ExtractMutatingAdmissionPolicyBinding except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractMutatingAdmissionPolicyBindingStatus(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "status") +} + +func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) { + b := &MutatingAdmissionPolicyBindingApplyConfiguration{} + err := managedfields.ExtractInto(mutatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(mutatingAdmissionPolicyBinding.Name) + + b.WithKind("MutatingAdmissionPolicyBinding") + b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *MutatingAdmissionPolicyBindingSpecApplyConfiguration) *MutatingAdmissionPolicyBindingApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go new file mode 100644 index 000000000..04729f42b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// MutatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use +// with apply. +type MutatingAdmissionPolicyBindingSpecApplyConfiguration struct { + PolicyName *string `json:"policyName,omitempty"` + ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"` + MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"` +} + +// MutatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use with +// apply. +func MutatingAdmissionPolicyBindingSpec() *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + return &MutatingAdmissionPolicyBindingSpecApplyConfiguration{} +} + +// WithPolicyName sets the PolicyName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PolicyName field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.PolicyName = &value + return b +} + +// WithParamRef sets the ParamRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamRef field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.ParamRef = value + return b +} + +// WithMatchResources sets the MatchResources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchResources field is set to the value of the last call. +func (b *MutatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicyBindingSpecApplyConfiguration { + b.MatchResources = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go new file mode 100644 index 000000000..334056a37 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go @@ -0,0 +1,113 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// MutatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicySpec type for use +// with apply. +type MutatingAdmissionPolicySpecApplyConfiguration struct { + ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` + MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"` + Variables []VariableApplyConfiguration `json:"variables,omitempty"` + Mutations []MutationApplyConfiguration `json:"mutations,omitempty"` + FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"` + MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` + ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` +} + +// MutatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicySpec type for use with +// apply. +func MutatingAdmissionPolicySpec() *MutatingAdmissionPolicySpecApplyConfiguration { + return &MutatingAdmissionPolicySpecApplyConfiguration{} +} + +// WithParamKind sets the ParamKind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParamKind field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + b.ParamKind = value + return b +} + +// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchConstraints field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + b.MatchConstraints = value + return b +} + +// WithVariables adds the given value to the Variables field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Variables field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVariables") + } + b.Variables = append(b.Variables, *values[i]) + } + return b +} + +// WithMutations adds the given value to the Mutations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mutations field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMutations(values ...*MutationApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMutations") + } + b.Mutations = append(b.Mutations, *values[i]) + } + return b +} + +// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailurePolicy field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1alpha1.FailurePolicyType) *MutatingAdmissionPolicySpecApplyConfiguration { + b.FailurePolicy = &value + return b +} + +// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the MatchConditions field. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *MutatingAdmissionPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMatchConditions") + } + b.MatchConditions = append(b.MatchConditions, *values[i]) + } + return b +} + +// WithReinvocationPolicy sets the ReinvocationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReinvocationPolicy field is set to the value of the last call. +func (b *MutatingAdmissionPolicySpecApplyConfiguration) WithReinvocationPolicy(value v1.ReinvocationPolicyType) *MutatingAdmissionPolicySpecApplyConfiguration { + b.ReinvocationPolicy = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go new file mode 100644 index 000000000..4ed9d93fd --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" +) + +// MutationApplyConfiguration represents a declarative configuration of the Mutation type for use +// with apply. +type MutationApplyConfiguration struct { + PatchType *admissionregistrationv1alpha1.PatchType `json:"patchType,omitempty"` + ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"` + JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"` +} + +// MutationApplyConfiguration constructs a declarative configuration of the Mutation type for use with +// apply. +func Mutation() *MutationApplyConfiguration { + return &MutationApplyConfiguration{} +} + +// WithPatchType sets the PatchType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PatchType field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithPatchType(value admissionregistrationv1alpha1.PatchType) *MutationApplyConfiguration { + b.PatchType = &value + return b +} + +// WithApplyConfiguration sets the ApplyConfiguration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ApplyConfiguration field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithApplyConfiguration(value *ApplyConfigurationApplyConfiguration) *MutationApplyConfiguration { + b.ApplyConfiguration = value + return b +} + +// WithJSONPatch sets the JSONPatch field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JSONPatch field is set to the value of the last call. +func (b *MutationApplyConfiguration) WithJSONPatch(value *JSONPatchApplyConfiguration) *MutationApplyConfiguration { + b.JSONPatch = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go index bb2a7ba89..f630224ac 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} @@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go index 350993cea..daf17fb24 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go index 0951cae8a..669fadbd4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go @@ -19,20 +19,20 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} @@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo // WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { b.ParameterNotFoundAction = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go index 42a917071..d1a7fff50 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go index c860b85cf..7fd1c0651 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index dc0822640..ca8ac7dd0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go index c9a4ff7ab..0f8e4e435 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go index 7ee320e42..d5d352994 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go index 821184c8a..2fec5ba47 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go index 9a5fc8475..5f7304373 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go index 2c70a8cfb..0459dae65 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go index e92fba0dd..8718db944 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use +// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use // with apply. type AuditAnnotationApplyConfiguration struct { Key *string `json:"key,omitempty"` ValueExpression *string `json:"valueExpression,omitempty"` } -// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with +// AuditAnnotationApplyConfiguration constructs a declarative configuration of the AuditAnnotation type for use with // apply. func AuditAnnotation() *AuditAnnotationApplyConfiguration { return &AuditAnnotationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go index 059c1b94b..66cfc8cdc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use +// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use // with apply. type ExpressionWarningApplyConfiguration struct { FieldRef *string `json:"fieldRef,omitempty"` Warning *string `json:"warning,omitempty"` } -// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with +// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with // apply. func ExpressionWarning() *ExpressionWarningApplyConfiguration { return &ExpressionWarningApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go index d099b6b6e..63db7fc80 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// MatchConditionApplyConfiguration represents an declarative configuration of the MatchCondition type for use +// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use // with apply. type MatchConditionApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// MatchConditionApplyConfiguration constructs an declarative configuration of the MatchCondition type for use with +// MatchConditionApplyConfiguration constructs a declarative configuration of the MatchCondition type for use with // apply. func MatchCondition() *MatchConditionApplyConfiguration { return &MatchConditionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go index 25d4139db..4005e55a3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use +// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use // with apply. type MatchResourcesApplyConfiguration struct { NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` @@ -33,7 +33,7 @@ type MatchResourcesApplyConfiguration struct { MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"` } -// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with +// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with // apply. func MatchResources() *MatchResourcesApplyConfiguration { return &MatchResourcesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go index 54845341f..b2ab76aef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookApplyConfiguration represents an declarative configuration of the MutatingWebhook type for use +// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use // with apply. type MutatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -41,7 +41,7 @@ type MutatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// MutatingWebhookApplyConfiguration constructs an declarative configuration of the MutatingWebhook type for use with +// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with // apply. func MutatingWebhook() *MutatingWebhookApplyConfiguration { return &MutatingWebhookApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 10dd034e2..15c54c125 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MutatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the MutatingWebhookConfiguration type for use +// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use // with apply. type MutatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type MutatingWebhookConfigurationApplyConfiguration struct { Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// MutatingWebhookConfiguration constructs an declarative configuration of the MutatingWebhookConfiguration type for use with +// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with // apply. func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationApplyConfiguration { b := &MutatingWebhookConfigurationApplyConfiguration{} @@ -84,7 +84,7 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admission // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithKind(value string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value st // If called multiple times, the Name field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithNamespace(value str // If called multiple times, the UID field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithResourceVersion(val // If called multiple times, the Generation field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithGeneration(value in // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(v // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(v // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriod // overwriting an existing map entries in Labels field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[ // overwriting an existing map entries in Annotations field with the same key. func (b *MutatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(val func (b *MutatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *MutatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -250,3 +250,9 @@ func (b *MutatingWebhookConfigurationApplyConfiguration) WithWebhooks(values ... } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *MutatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go index fa346c4a5..62c617d2f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go @@ -23,14 +23,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" ) -// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use +// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use // with apply. type NamedRuleWithOperationsApplyConfiguration struct { ResourceNames []string `json:"resourceNames,omitempty"` v1.RuleWithOperationsApplyConfiguration `json:",inline"` } -// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with +// NamedRuleWithOperationsApplyConfiguration constructs a declarative configuration of the NamedRuleWithOperations type for use with // apply. func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration { return &NamedRuleWithOperationsApplyConfiguration{} @@ -51,7 +51,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ... // If called multiple times, values provided by each call will be appended to the Operations field. func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Operations = append(b.Operations, values[i]) + b.RuleWithOperationsApplyConfiguration.Operations = append(b.RuleWithOperationsApplyConfiguration.Operations, values[i]) } return b } @@ -61,7 +61,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...adm // If called multiple times, values provided by each call will be appended to the APIGroups field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIGroups = append(b.APIGroups, values[i]) + b.RuleApplyConfiguration.APIGroups = append(b.RuleApplyConfiguration.APIGroups, values[i]) } return b } @@ -71,7 +71,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...stri // If called multiple times, values provided by each call will be appended to the APIVersions field. func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.APIVersions = append(b.APIVersions, values[i]) + b.RuleApplyConfiguration.APIVersions = append(b.RuleApplyConfiguration.APIVersions, values[i]) } return b } @@ -81,7 +81,7 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...st // If called multiple times, values provided by each call will be appended to the Resources field. func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration { for i := range values { - b.Resources = append(b.Resources, values[i]) + b.RuleApplyConfiguration.Resources = append(b.RuleApplyConfiguration.Resources, values[i]) } return b } @@ -90,6 +90,6 @@ func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scope field is set to the value of the last call. func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration { - b.Scope = &value + b.RuleApplyConfiguration.Scope = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go index 6050e6025..398312528 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use +// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use // with apply. type ParamKindApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` Kind *string `json:"kind,omitempty"` } -// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with +// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with // apply. func ParamKind() *ParamKindApplyConfiguration { return &ParamKindApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go index 2be98dbc5..5143b0cb9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go @@ -19,20 +19,20 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use +// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use // with apply. type ParamRefApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` + Name *string `json:"name,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + ParameterNotFoundAction *admissionregistrationv1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"` } -// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with +// ParamRefApplyConfiguration constructs a declarative configuration of the ParamRef type for use with // apply. func ParamRef() *ParamRefApplyConfiguration { return &ParamRefApplyConfiguration{} @@ -65,7 +65,7 @@ func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyCo // WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ParameterNotFoundAction field is set to the value of the last call. -func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { +func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value admissionregistrationv1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration { b.ParameterNotFoundAction = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go index c21b57490..70cc6b5b2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ServiceReferenceApplyConfiguration represents an declarative configuration of the ServiceReference type for use +// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use // with apply. type ServiceReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -27,7 +27,7 @@ type ServiceReferenceApplyConfiguration struct { Port *int32 `json:"port,omitempty"` } -// ServiceReferenceApplyConfiguration constructs an declarative configuration of the ServiceReference type for use with +// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with // apply. func ServiceReference() *ServiceReferenceApplyConfiguration { return &ServiceReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go index 07baf334c..cea6e11de 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use +// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use // with apply. type TypeCheckingApplyConfiguration struct { ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"` } -// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with +// TypeCheckingApplyConfiguration constructs a declarative configuration of the TypeChecking type for use with // apply. func TypeChecking() *TypeCheckingApplyConfiguration { return &TypeCheckingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go index e144bc9f7..35a8adbf7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use +// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use // with apply. type ValidatingAdmissionPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ValidatingAdmissionPolicyApplyConfiguration struct { Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"` } -// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with +// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with // apply. func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration { b := &ValidatingAdmissionPolicyApplyConfiguration{} @@ -85,7 +85,7 @@ func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *Va // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *Va // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *Validati b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 0dc06aede..191d045ef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use +// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use // with apply. type ValidatingAdmissionPolicyBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingAdmissionPolicyBindingApplyConfiguration struct { Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"` } -// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with +// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with // apply. func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b := &ValidatingAdmissionPolicyBindingApplyConfiguration{} @@ -84,7 +84,7 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(valu // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value stri // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(va // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(valu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimesta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimesta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePe // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *Val b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go index d20a78eff..bddc3a40c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use // with apply. type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { PolicyName *string `json:"policyName,omitempty"` @@ -31,7 +31,7 @@ type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct { ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"` } -// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with +// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with // apply. func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration { return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go index c6e938910..8b235337d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go @@ -22,7 +22,7 @@ import ( admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" ) -// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use +// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use // with apply. type ValidatingAdmissionPolicySpecApplyConfiguration struct { ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"` @@ -34,7 +34,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct { Variables []VariableApplyConfiguration `json:"variables,omitempty"` } -// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with +// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with // apply. func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration { return &ValidatingAdmissionPolicySpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go index e3e6d417e..4612af0cf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use +// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use // with apply. type ValidatingAdmissionPolicyStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -30,7 +30,7 @@ type ValidatingAdmissionPolicyStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with +// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with // apply. func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration { return &ValidatingAdmissionPolicyStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go index 8c5c341ba..1e107d68f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookApplyConfiguration represents an declarative configuration of the ValidatingWebhook type for use +// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use // with apply. type ValidatingWebhookApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -40,7 +40,7 @@ type ValidatingWebhookApplyConfiguration struct { MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"` } -// ValidatingWebhookApplyConfiguration constructs an declarative configuration of the ValidatingWebhook type for use with +// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with // apply. func ValidatingWebhook() *ValidatingWebhookApplyConfiguration { return &ValidatingWebhookApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 75f1b9d71..e775e55a3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ValidatingWebhookConfigurationApplyConfiguration represents an declarative configuration of the ValidatingWebhookConfiguration type for use +// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use // with apply. type ValidatingWebhookConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ValidatingWebhookConfigurationApplyConfiguration struct { Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"` } -// ValidatingWebhookConfiguration constructs an declarative configuration of the ValidatingWebhookConfiguration type for use with +// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with // apply. func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfigurationApplyConfiguration { b := &ValidatingWebhookConfigurationApplyConfiguration{} @@ -84,7 +84,7 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithKind(value string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAPIVersion(value // If called multiple times, the Name field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithName(value string // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGenerateName(valu // If called multiple times, the Namespace field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithNamespace(value s // If called multiple times, the UID field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.UID) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithUID(value types.U // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(value string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithResourceVersion(v // If called multiple times, the Generation field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithGeneration(value // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithCreationTimestamp // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionTimestamp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithDeletionGracePeri // overwriting an existing map entries in Labels field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithLabels(entries ma // overwriting an existing map entries in Annotations field with the same key. func (b *ValidatingWebhookConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithOwnerReferences(v func (b *ValidatingWebhookConfigurationApplyConfiguration) WithFinalizers(values ...string) *ValidatingWebhookConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -250,3 +250,9 @@ func (b *ValidatingWebhookConfigurationApplyConfiguration) WithWebhooks(values . } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ValidatingWebhookConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go index ed9ff1ac0..019e8e7aa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use +// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use // with apply. type ValidationApplyConfiguration struct { Expression *string `json:"expression,omitempty"` @@ -31,7 +31,7 @@ type ValidationApplyConfiguration struct { MessageExpression *string `json:"messageExpression,omitempty"` } -// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with +// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with // apply. func Validation() *ValidationApplyConfiguration { return &ValidationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go index 0fc294c65..0ece197db 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// VariableApplyConfiguration represents an declarative configuration of the Variable type for use +// VariableApplyConfiguration represents a declarative configuration of the Variable type for use // with apply. type VariableApplyConfiguration struct { Name *string `json:"name,omitempty"` Expression *string `json:"expression,omitempty"` } -// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with +// VariableApplyConfiguration constructs a declarative configuration of the Variable type for use with // apply. func Variable() *VariableApplyConfiguration { return &VariableApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go index 490f9d5f3..76ff71b4a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// WebhookClientConfigApplyConfiguration represents an declarative configuration of the WebhookClientConfig type for use +// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use // with apply. type WebhookClientConfigApplyConfiguration struct { URL *string `json:"url,omitempty"` @@ -26,7 +26,7 @@ type WebhookClientConfigApplyConfiguration struct { CABundle []byte `json:"caBundle,omitempty"` } -// WebhookClientConfigApplyConfiguration constructs an declarative configuration of the WebhookClientConfig type for use with +// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with // apply. func WebhookClientConfig() *WebhookClientConfigApplyConfiguration { return &WebhookClientConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go index 81c56330b..8394298b9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ServerStorageVersionApplyConfiguration represents an declarative configuration of the ServerStorageVersion type for use +// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use // with apply. type ServerStorageVersionApplyConfiguration struct { APIServerID *string `json:"apiServerID,omitempty"` @@ -27,7 +27,7 @@ type ServerStorageVersionApplyConfiguration struct { ServedVersions []string `json:"servedVersions,omitempty"` } -// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with +// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with // apply. func ServerStorageVersion() *ServerStorageVersionApplyConfiguration { return &ServerStorageVersionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go index 6b9f17839..0061d8afb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,16 +27,16 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionApplyConfiguration represents an declarative configuration of the StorageVersion type for use +// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use // with apply. type StorageVersionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1alpha1.StorageVersionSpec `json:"spec,omitempty"` - Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` + Spec *apiserverinternalv1alpha1.StorageVersionSpec `json:"spec,omitempty"` + Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersion constructs an declarative configuration of the StorageVersion type for use with +// StorageVersion constructs a declarative configuration of the StorageVersion type for use with // apply. func StorageVersion(name string) *StorageVersionApplyConfiguration { b := &StorageVersionApplyConfiguration{} @@ -57,18 +57,18 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { +func ExtractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { return extractStorageVersion(storageVersion, fieldManager, "") } // ExtractStorageVersionStatus is the same as ExtractStorageVersion except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStorageVersionStatus(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { +func ExtractStorageVersionStatus(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) { return extractStorageVersion(storageVersion, fieldManager, "status") } -func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) { +func extractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) { b := &StorageVersionApplyConfiguration{} err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *StorageVersionApplyConfiguration) WithKind(value string) *StorageVersio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *StorageVersionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *StorageVersionApplyConfiguration) WithAPIVersion(value string) *Storage // If called multiple times, the Name field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *StorageVersionApplyConfiguration) WithName(value string) *StorageVersio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *StorageVersionApplyConfiguration) WithGenerateName(value string) *Stora // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *StorageVersionApplyConfiguration) WithNamespace(value string) *StorageV // If called multiple times, the UID field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *StorageVersionApplyConfiguration) WithUID(value types.UID) *StorageVers // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *StorageVersionApplyConfiguration) WithResourceVersion(value string) *St // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *StorageVersionApplyConfiguration) WithGeneration(value int64) *StorageV // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *StorageVersionApplyConfiguration) WithCreationTimestamp(value metav1.Ti // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *StorageVersionApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *StorageVersionApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *StorageVersionApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *StorageVersionApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *StorageVersionApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *StorageVersionApplyConfiguration) WithFinalizers(values ...string) *StorageVersionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -242,7 +242,7 @@ func (b *StorageVersionApplyConfiguration) ensureObjectMetaApplyConfigurationExi // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *StorageVersionApplyConfiguration) WithSpec(value v1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration { +func (b *StorageVersionApplyConfiguration) WithSpec(value apiserverinternalv1alpha1.StorageVersionSpec) *StorageVersionApplyConfiguration { b.Spec = &value return b } @@ -254,3 +254,9 @@ func (b *StorageVersionApplyConfiguration) WithStatus(value *StorageVersionStatu b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go index 75b625647..1ed71cf8e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StorageVersionConditionApplyConfiguration represents an declarative configuration of the StorageVersionCondition type for use +// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use // with apply. type StorageVersionConditionApplyConfiguration struct { - Type *v1alpha1.StorageVersionConditionType `json:"type,omitempty"` - Status *v1alpha1.ConditionStatus `json:"status,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *apiserverinternalv1alpha1.StorageVersionConditionType `json:"type,omitempty"` + Status *apiserverinternalv1alpha1.ConditionStatus `json:"status,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// StorageVersionConditionApplyConfiguration constructs an declarative configuration of the StorageVersionCondition type for use with +// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with // apply. func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { return &StorageVersionConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func StorageVersionCondition() *StorageVersionConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration { +func (b *StorageVersionConditionApplyConfiguration) WithType(value apiserverinternalv1alpha1.StorageVersionConditionType) *StorageVersionConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *StorageVersionConditionApplyConfiguration) WithType(value v1alpha1.Stor // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *StorageVersionConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration { +func (b *StorageVersionConditionApplyConfiguration) WithStatus(value apiserverinternalv1alpha1.ConditionStatus) *StorageVersionConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go index 43b0bf71b..2e25d6752 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// StorageVersionStatusApplyConfiguration represents an declarative configuration of the StorageVersionStatus type for use +// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use // with apply. type StorageVersionStatusApplyConfiguration struct { StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"` @@ -26,7 +26,7 @@ type StorageVersionStatusApplyConfiguration struct { Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"` } -// StorageVersionStatusApplyConfiguration constructs an declarative configuration of the StorageVersionStatus type for use with +// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with // apply. func StorageVersionStatus() *StorageVersionStatusApplyConfiguration { return &StorageVersionStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go index c4e208507..bfdad4a73 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go @@ -20,24 +20,24 @@ package v1 import ( appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Data *runtime.RawExtension `json:"data,omitempty"` - Revision *int64 `json:"revision,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration { +func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ControllerRevisionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go index cc9fdcd5d..47883d043 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -58,18 +58,18 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { +func ExtractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { return extractDaemonSet(daemonSet, fieldManager, "") } // ExtractDaemonSetStatus is the same as ExtractDaemonSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDaemonSetStatus(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { +func ExtractDaemonSetStatus(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) { return extractDaemonSet(daemonSet, fieldManager, "status") } -func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { +func extractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) { b := &DaemonSetApplyConfiguration{} err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration { +func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DaemonSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go index 283ae10a2..8c56e4994 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1.DaemonSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.DaemonSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go index 5e808874b..d2382b80e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go @@ -20,20 +20,20 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration { +func (b *DaemonSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DaemonSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go index d1c4462aa..a40dc1651 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go index f1ba18226..993e1bd57 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1.DaemonSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go index 13edda772..485357c00 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"` - Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"` + Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -58,18 +58,18 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractDeployment(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { +func ExtractDeployment(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { return extractDeployment(deployment, fieldManager, "") } // ExtractDeploymentStatus is the same as ExtractDeployment except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractDeploymentStatus(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { +func ExtractDeploymentStatus(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) { return extractDeployment(deployment, fieldManager, "status") } -func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { +func extractDeployment(deployment *appsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) { b := &DeploymentApplyConfiguration{} err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration { +func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *DeploymentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go index 774704413..3a6693637 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1.DeploymentConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.DeploymentConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go index 812253dae..5f34b0582 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go @@ -20,14 +20,14 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} @@ -53,7 +53,7 @@ func (b *DeploymentSpecApplyConfiguration) WithReplicas(value int32) *Deployment // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *DeploymentSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration { +func (b *DeploymentSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *DeploymentSpecApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go index 7b48b4255..747813ade 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go index e9571edab..7bf8a1595 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go index 4e7818e53..6e9c0e14f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -58,18 +58,18 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { +func ExtractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { return extractReplicaSet(replicaSet, fieldManager, "") } // ExtractReplicaSetStatus is the same as ExtractReplicaSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractReplicaSetStatus(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { +func ExtractReplicaSetStatus(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) { return extractReplicaSet(replicaSet, fieldManager, "status") } -func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { +func extractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) { b := &ReplicaSetApplyConfiguration{} err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration { +func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ReplicaSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go index 19b0355d1..0325ce058 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1.ReplicaSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.ReplicaSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go index ca3286583..714ddcfe3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go @@ -20,19 +20,19 @@ package v1 import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} @@ -57,7 +57,7 @@ func (b *ReplicaSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *Rep // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration { +func (b *ReplicaSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *ReplicaSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go index 12f41490f..a1408ae25 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go index ebe8e86d1..e898f5081 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go index ca9daaf24..2bc293724 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go index c1b5dea85..dd0de81a6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go index 24041d99f..cb5306935 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiappsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"` - Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"` + Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -58,18 +58,18 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { +func ExtractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { return extractStatefulSet(statefulSet, fieldManager, "") } // ExtractStatefulSetStatus is the same as ExtractStatefulSet except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStatefulSetStatus(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { +func ExtractStatefulSetStatus(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) { return extractStatefulSet(statefulSet, fieldManager, "status") } -func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { +func extractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) { b := &StatefulSetApplyConfiguration{} err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration { +func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *StatefulSetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go index f9d47850d..45b2ad81f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1.StatefulSetConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1.StatefulSetConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go index 9778f1c4a..86f39e16c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go index ba01d5d3c..dff3e2a76 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go index 81afdca59..c48b64fe3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go @@ -21,14 +21,14 @@ package v1 import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -57,7 +57,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithReplicas(value int32) *StatefulS // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *StatefulSetSpecApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go index d88881b65..637a1c649 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go index 5268a1e06..ae135d34d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go index 827c06359..910dd7bec 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "") } // ExtractControllerRevisionStatus is the same as ExtractControllerRevision except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractControllerRevisionStatus(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevisionStatus(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "status") } -func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { +func extractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go index e22f76b66..057ea5b6f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go index 9da8ce089..b0a45b1a6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta1.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go index 5e18476bd..5531c756f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go index f8d1cf5d2..adc023a34 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go index 7279318a8..03e66555a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go index 131e57a39..775f82eef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go index dde5f064b..244701a5e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go index 8989a08d2..94c297134 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go index ed5cfab41..ba8aa3a4c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go index 97e994ab7..5a13584bc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1beta1.StatefulSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta1.StatefulSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta1.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go index 8f349a2d2..2e3049e5e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go index 0048724c0..f9b6fbd88 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta1.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go index 1eb1ba7b0..137c7243b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" corev1 "k8s.io/client-go/applyconfigurations/core/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + PodManagementPolicy *appsv1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State // WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodManagementPolicy field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta1.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { b.PodManagementPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go index f31066b6f..27ae7540f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go index 895c1e7f8..24154f7af 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta1.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go index 4abab6851..6facd5384 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ControllerRevisionApplyConfiguration represents an declarative configuration of the ControllerRevision type for use +// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use // with apply. type ControllerRevisionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type ControllerRevisionApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// ControllerRevision constructs an declarative configuration of the ControllerRevision type for use with +// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with // apply. func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfiguration { b := &ControllerRevisionApplyConfiguration{} @@ -59,18 +59,18 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "") } // ExtractControllerRevisionStatus is the same as ExtractControllerRevision except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractControllerRevisionStatus(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { +func ExtractControllerRevisionStatus(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) { return extractControllerRevision(controllerRevision, fieldManager, "status") } -func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { +func extractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) { b := &ControllerRevisionApplyConfiguration{} err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *ControllerRevisionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ControllerRevisionApplyConfiguration) WithKind(value string) *Controlle // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *ControllerRevisionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ControllerRevisionApplyConfiguration) WithAPIVersion(value string) *Con // If called multiple times, the Name field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ControllerRevisionApplyConfiguration) WithName(value string) *Controlle // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ControllerRevisionApplyConfiguration) WithNamespace(value string) *Cont // If called multiple times, the UID field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ControllerRevisionApplyConfiguration) WithUID(value types.UID) *Control // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *ControllerRevisionApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *ControllerRevisionApplyConfiguration) WithGeneration(value int64) *Cont // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *ControllerRevisionApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ControllerRevisionApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ControllerRevisionApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ControllerRevisionApplyConfiguration) WithAnnotations(entries map[string]string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *ControllerRevisionApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ControllerRevisionApplyConfiguration) WithFinalizers(values ...string) *ControllerRevisionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -257,3 +257,9 @@ func (b *ControllerRevisionApplyConfiguration) WithRevision(value int64) *Contro b.Revision = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ControllerRevisionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go index 906a8ca46..89a2ebd4b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go index 55dc1f487..0aa47cf0a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1beta2.DaemonSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.DaemonSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value appsv1beta2.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go index 48137819a..74d8bf51c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go index 29cda7a90..6b0fda895 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go index 07fc07fc6..2cee58cf3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go index 7e39e6751..8948cc606 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -87,7 +87,7 @@ func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go index 852a2c683..f404dd9df 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta2.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value appsv1beta2.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go index 6898941ac..1b55130c6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -36,7 +36,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go index fe99ca991..5fa912233 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go index 8714e153e..6347a3a39 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta2.DeploymentStrategyType `json:"type,omitempty"` + Type *appsv1beta2.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value appsv1beta2.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go index d9303e1b2..679416b21 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go index 47776bfa2..3d8cd3632 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1beta2.ReplicaSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.ReplicaSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value appsv1beta2.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go index 14d548169..1d77b9e0f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go index 7c1b8fb29..d3c92e274 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go index b586b678d..ad6021d37 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go index 78ef21008..b0cc3a4ee 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go index 4a12e51c0..0046c264b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateStatefulSetStrategyApplyConfiguration represents an declarative configuration of the RollingUpdateStatefulSetStrategy type for use +// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use // with apply. type RollingUpdateStatefulSetStrategyApplyConfiguration struct { Partition *int32 `json:"partition,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` } -// RollingUpdateStatefulSetStrategyApplyConfiguration constructs an declarative configuration of the RollingUpdateStatefulSetStrategy type for use with +// RollingUpdateStatefulSetStrategyApplyConfiguration constructs a declarative configuration of the RollingUpdateStatefulSetStrategy type for use with // apply. func RollingUpdateStatefulSetStrategy() *RollingUpdateStatefulSetStrategyApplyConfiguration { return &RollingUpdateStatefulSetStrategyApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go index 0e89668cb..27067b6aa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1beta2.ScaleSpec `json:"spec,omitempty"` - Status *v1beta2.ScaleStatus `json:"status,omitempty"` + Spec *appsv1beta2.ScaleSpec `json:"spec,omitempty"` + Status *appsv1beta2.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithSpec(value appsv1beta2.ScaleSpec) *ScaleApplyConfiguration { b.Spec = &value return b } @@ -212,7 +212,13 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyC // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithStatus(value appsv1beta2.ScaleStatus) *ScaleApplyConfiguration { b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go index 03d5428b4..933072421 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetApplyConfiguration represents an declarative configuration of the StatefulSet type for use +// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use // with apply. type StatefulSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StatefulSetApplyConfiguration struct { Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"` } -// StatefulSet constructs an declarative configuration of the StatefulSet type for use with +// StatefulSet constructs a declarative configuration of the StatefulSet type for use with // apply. func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration { b := &StatefulSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *StatefulSetApplyConfiguration) WithKind(value string) *StatefulSetApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *StatefulSetApplyConfiguration) WithAPIVersion(value string) *StatefulSe // If called multiple times, the Name field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *StatefulSetApplyConfiguration) WithName(value string) *StatefulSetApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *StatefulSetApplyConfiguration) WithGenerateName(value string) *Stateful // If called multiple times, the Namespace field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *StatefulSetApplyConfiguration) WithNamespace(value string) *StatefulSet // If called multiple times, the UID field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *StatefulSetApplyConfiguration) WithUID(value types.UID) *StatefulSetApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *StatefulSetApplyConfiguration) WithResourceVersion(value string) *State // If called multiple times, the Generation field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *StatefulSetApplyConfiguration) WithGeneration(value int64) *StatefulSet // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *StatefulSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *StatefulSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *StatefulSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *StatefulSetApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *StatefulSetApplyConfiguration) WithAnnotations(entries map[string]string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *StatefulSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *StatefulSetApplyConfiguration) WithFinalizers(values ...string) *StatefulSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *StatefulSetApplyConfiguration) WithStatus(value *StatefulSetStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StatefulSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go index c33e68b5e..50bef2003 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// StatefulSetConditionApplyConfiguration represents an declarative configuration of the StatefulSetCondition type for use +// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use // with apply. type StatefulSetConditionApplyConfiguration struct { - Type *v1beta2.StatefulSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *appsv1beta2.StatefulSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// StatefulSetConditionApplyConfiguration constructs an declarative configuration of the StatefulSetCondition type for use with +// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with // apply. func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { return &StatefulSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func StatefulSetCondition() *StatefulSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetConditionApplyConfiguration) WithType(value v1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { +func (b *StatefulSetConditionApplyConfiguration) WithType(value appsv1beta2.StatefulSetConditionType) *StatefulSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go index c586da775..a899243a5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// StatefulSetOrdinalsApplyConfiguration represents an declarative configuration of the StatefulSetOrdinals type for use +// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use // with apply. type StatefulSetOrdinalsApplyConfiguration struct { Start *int32 `json:"start,omitempty"` } -// StatefulSetOrdinalsApplyConfiguration constructs an declarative configuration of the StatefulSetOrdinals type for use with +// StatefulSetOrdinalsApplyConfiguration constructs a declarative configuration of the StatefulSetOrdinals type for use with // apply. func StatefulSetOrdinals() *StatefulSetOrdinalsApplyConfiguration { return &StatefulSetOrdinalsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go index aee27803d..d4d139ae3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use // with apply. type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct { - WhenDeleted *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` - WhenScaled *v1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` + WhenDeleted *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"` + WhenScaled *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"` } -// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs an declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with +// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with // apply. func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { return &StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetPersistentVolumeClaimRetentionPolicy() *StatefulSetPersistentVol // WithWhenDeleted sets the WhenDeleted field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenDeleted field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenDeleted(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenDeleted = &value return b } @@ -46,7 +46,7 @@ func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) With // WithWhenScaled sets the WhenScaled field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenScaled field is set to the value of the last call. -func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value v1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { +func (b *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration) WithWhenScaled(value appsv1beta2.PersistentVolumeClaimRetentionPolicyType) *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration { b.WhenScaled = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go index b6165fbd9..952ca0a81 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" corev1 "k8s.io/client-go/applyconfigurations/core/v1" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StatefulSetSpecApplyConfiguration represents an declarative configuration of the StatefulSetSpec type for use +// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use // with apply. type StatefulSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type StatefulSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"` ServiceName *string `json:"serviceName,omitempty"` - PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` + PodManagementPolicy *appsv1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"` RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` @@ -40,7 +40,7 @@ type StatefulSetSpecApplyConfiguration struct { Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"` } -// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with +// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with // apply. func StatefulSetSpec() *StatefulSetSpecApplyConfiguration { return &StatefulSetSpecApplyConfiguration{} @@ -94,7 +94,7 @@ func (b *StatefulSetSpecApplyConfiguration) WithServiceName(value string) *State // WithPodManagementPolicy sets the PodManagementPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodManagementPolicy field is set to the value of the last call. -func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value v1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { +func (b *StatefulSetSpecApplyConfiguration) WithPodManagementPolicy(value appsv1beta2.PodManagementPolicyType) *StatefulSetSpecApplyConfiguration { b.PodManagementPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go index 63835904c..a647cd7d2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// StatefulSetStatusApplyConfiguration represents an declarative configuration of the StatefulSetStatus type for use +// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use // with apply. type StatefulSetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type StatefulSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` } -// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with +// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with // apply. func StatefulSetStatus() *StatefulSetStatusApplyConfiguration { return &StatefulSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go index 03c291491..f93db4f79 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" ) -// StatefulSetUpdateStrategyApplyConfiguration represents an declarative configuration of the StatefulSetUpdateStrategy type for use +// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use // with apply. type StatefulSetUpdateStrategyApplyConfiguration struct { - Type *v1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` + Type *appsv1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"` } -// StatefulSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the StatefulSetUpdateStrategy type for use with +// StatefulSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the StatefulSetUpdateStrategy type for use with // apply. func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { return &StatefulSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func StatefulSetUpdateStrategy() *StatefulSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value v1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { +func (b *StatefulSetUpdateStrategyApplyConfiguration) WithType(value appsv1beta2.StatefulSetUpdateStrategyType) *StatefulSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go index 0eac22692..51ec66501 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go index 38fa20584..8c9f08a73 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiautoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` - Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"` + Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -58,18 +58,18 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "") } // ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) { return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status") } -func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { +func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) { b := &HorizontalPodAutoscalerApplyConfiguration{} err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.Ho // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[ // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { +func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *HorizontalPodAutoscalerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go index 561ac60d3..0ca2f84ea 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go index abc2e05aa..8575214e1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"` - CurrentReplicas *int32 `json:"currentReplicas,omitempty"` - DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"` + CurrentReplicas *int32 `json:"currentReplicas,omitempty"` + DesiredReplicas *int32 `json:"desiredReplicas,omitempty"` + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} @@ -49,7 +49,7 @@ func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithObservedGeneration // WithLastScaleTime sets the LastScaleTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastScaleTime field is set to the value of the last call. -func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value v1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { +func (b *HorizontalPodAutoscalerStatusApplyConfiguration) WithLastScaleTime(value metav1.Time) *HorizontalPodAutoscalerStatusApplyConfiguration { b.LastScaleTime = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go index f77092280..13ae8e142 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"` - Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"` + Status *ScaleStatusApplyConfiguration `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -46,7 +46,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -54,7 +54,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -63,7 +63,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -72,7 +72,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -81,7 +81,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -90,7 +90,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -99,7 +99,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -108,25 +108,25 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -135,7 +135,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -145,11 +145,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -160,11 +160,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -172,13 +172,13 @@ func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *Sc // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -189,14 +189,14 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -215,3 +215,9 @@ func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguratio b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go index 2339a8fef..025004ba5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use +// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use // with apply. type ScaleSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` } -// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with +// ScaleSpecApplyConfiguration constructs a declarative configuration of the ScaleSpec type for use with // apply. func ScaleSpec() *ScaleSpecApplyConfiguration { return &ScaleSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go index 81c8d1b30..51f96d235 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use +// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use // with apply. type ScaleStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` Selector *string `json:"selector,omitempty"` } -// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with +// ScaleStatusApplyConfiguration constructs a declarative configuration of the ScaleStatus type for use with // apply. func ScaleStatus() *ScaleStatusApplyConfiguration { return &ScaleStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go index 15ef216d1..b6e071e84 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go index 34213bca3..46bd2bac2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go index 19045706d..645f09857 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go index 11a8eff26..a9c45b31a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go index 3b1a0329b..4280086f5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go index 31061de85..99a5cd4bd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.Horiz // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go index e6fdabd7c..05750cc21 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go index c020eccd3..25ea39039 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go @@ -19,22 +19,22 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go index c36bc3f22..e34ababc5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go index d4d551df8..f1a2c3f4e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go index 139f0fb5c..f89185c57 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go @@ -19,18 +19,18 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { - Type *v2.HPAScalingPolicyType `json:"type,omitempty"` - Value *int32 `json:"value,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + Type *autoscalingv2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} @@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { +func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go index e768076aa..6a6a2655f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go @@ -19,18 +19,18 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` - SelectPolicy *v2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"` Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value // WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectPolicy field is set to the value of the last call. -func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { b.SelectPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go index 312ad3ddd..2f99f7d0b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go index 094ead6c1..282b84a44 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go index c65ad446f..f1204824e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go index f301e4d2b..13d2e9365 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go @@ -19,20 +19,20 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { - Type *v2.MetricTargetType `json:"type,omitempty"` - Value *resource.Quantity `json:"value,omitempty"` - AverageValue *resource.Quantity `json:"averageValue,omitempty"` - AverageUtilization *int32 `json:"averageUtilization,omitempty"` + Type *autoscalingv2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} @@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricTargetApplyConfiguration) WithType(value v2.MetricTargetType) *MetricTargetApplyConfiguration { +func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2.MetricTargetType) *MetricTargetApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go index e8474b189..59732548b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go index a9482565e..2391fa5c2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go index 70ba43bed..9ffd0c180 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go index 0a7a5c259..28a35a2ae 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go index 865fcc33e..4614282ce 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go index 25a065fef..ffc9042b9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go index fb5625afa..0fdbfcb55 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go index 2594e8e07..f41c5af10 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go index ae897237c..4cd56eea3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -32,7 +32,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go index fe3d15e86..f03261612 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go index c118e6ca1..8dce4529d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go index ab771214e..4034d7e55 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -32,7 +32,7 @@ type ExternalMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go index 66b8d5f73..51ae84901 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go index de3e6ea5c..445cd55ae 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go @@ -19,22 +19,22 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta1.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go index 761d94a85..6f111ceaf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta1 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -27,7 +27,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go index 95ec5be43..391b57725 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go index 70beec84e..3a5faa3b2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2beta1.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go index b03ea2f9e..f281e182d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2beta1.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2beta1.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta1.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go index 07d467972..a9e2eead4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricSourceApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go index b5e0d3e3d..4d3be8df6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"` @@ -33,7 +33,7 @@ type ObjectMetricStatusApplyConfiguration struct { AverageValue *resource.Quantity `json:"averageValue,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go index a4122b898..cfcd752e2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricSourceApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go index d6172011b..f7a7777fd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { MetricName *string `json:"metricName,omitempty"` @@ -31,7 +31,7 @@ type PodsMetricStatusApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go index 804f3f492..ad97d83c3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricSourceApplyConfiguration struct { TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go index 5fdc29c13..78fbeaad0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go @@ -23,7 +23,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -31,7 +31,7 @@ type ResourceMetricStatusApplyConfiguration struct { CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go index aa334744e..1050165ea 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricSourceApplyConfiguration represents an declarative configuration of the ContainerResourceMetricSource type for use +// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use // with apply. type ContainerResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricSourceApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricSource type for use with +// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with // apply. func ContainerResourceMetricSource() *ContainerResourceMetricSourceApplyConfiguration { return &ContainerResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go index bf0822a06..708f68bc6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// ContainerResourceMetricStatusApplyConfiguration represents an declarative configuration of the ContainerResourceMetricStatus type for use +// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use // with apply. type ContainerResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` @@ -30,7 +30,7 @@ type ContainerResourceMetricStatusApplyConfiguration struct { Container *string `json:"container,omitempty"` } -// ContainerResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ContainerResourceMetricStatus type for use with +// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with // apply. func ContainerResourceMetricStatus() *ContainerResourceMetricStatusApplyConfiguration { return &ContainerResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go index 2903629bc..c281084b1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// CrossVersionObjectReferenceApplyConfiguration represents an declarative configuration of the CrossVersionObjectReference type for use +// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use // with apply. type CrossVersionObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -26,7 +26,7 @@ type CrossVersionObjectReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` } -// CrossVersionObjectReferenceApplyConfiguration constructs an declarative configuration of the CrossVersionObjectReference type for use with +// CrossVersionObjectReferenceApplyConfiguration constructs a declarative configuration of the CrossVersionObjectReference type for use with // apply. func CrossVersionObjectReference() *CrossVersionObjectReferenceApplyConfiguration { return &CrossVersionObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go index 80053a6b3..d34ca1149 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricSourceApplyConfiguration represents an declarative configuration of the ExternalMetricSource type for use +// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use // with apply. type ExternalMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ExternalMetricSourceApplyConfiguration constructs an declarative configuration of the ExternalMetricSource type for use with +// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with // apply. func ExternalMetricSource() *ExternalMetricSourceApplyConfiguration { return &ExternalMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go index 71ac35adb..be29e607f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// ExternalMetricStatusApplyConfiguration represents an declarative configuration of the ExternalMetricStatus type for use +// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use // with apply. type ExternalMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ExternalMetricStatusApplyConfiguration constructs an declarative configuration of the ExternalMetricStatus type for use with +// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with // apply. func ExternalMetricStatus() *ExternalMetricStatusApplyConfiguration { return &ExternalMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go index 1c750cb16..19794ff42 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// HorizontalPodAutoscalerApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscaler type for use +// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use // with apply. type HorizontalPodAutoscalerApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type HorizontalPodAutoscalerApplyConfiguration struct { Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"` } -// HorizontalPodAutoscaler constructs an declarative configuration of the HorizontalPodAutoscaler type for use with +// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with // apply. func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApplyConfiguration { b := &HorizontalPodAutoscalerApplyConfiguration{} @@ -87,7 +87,7 @@ func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithKind(value string) *Hori // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithName(value string) *Hori // If called multiple times, the GenerateName field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithUID(value types.UID) *Ho // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *HorizontalPodAutoscalerApplyConfiguration) WithAnnotations(entries map[string]string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithOwnerReferences(values . func (b *HorizontalPodAutoscalerApplyConfiguration) WithFinalizers(values ...string) *HorizontalPodAutoscalerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *HorizontalPodAutoscalerApplyConfiguration) WithStatus(value *Horizontal b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *HorizontalPodAutoscalerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go index ec41bfade..e9b1a9fb9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerBehaviorApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerBehavior type for use +// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use // with apply. type HorizontalPodAutoscalerBehaviorApplyConfiguration struct { ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"` ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"` } -// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerBehavior type for use with +// HorizontalPodAutoscalerBehaviorApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerBehavior type for use with // apply. func HorizontalPodAutoscalerBehavior() *HorizontalPodAutoscalerBehaviorApplyConfiguration { return &HorizontalPodAutoscalerBehaviorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go index 0f0cae75d..f88869124 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go @@ -19,22 +19,22 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerConditionApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerCondition type for use +// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use // with apply. type HorizontalPodAutoscalerConditionApplyConfiguration struct { - Type *v2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *autoscalingv2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// HorizontalPodAutoscalerConditionApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerCondition type for use with +// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with // apply. func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyConfiguration { return &HorizontalPodAutoscalerConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func HorizontalPodAutoscalerCondition() *HorizontalPodAutoscalerConditionApplyCo // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value v2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { +func (b *HorizontalPodAutoscalerConditionApplyConfiguration) WithType(value autoscalingv2beta2.HorizontalPodAutoscalerConditionType) *HorizontalPodAutoscalerConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go index c60adee58..9629e4bd5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// HorizontalPodAutoscalerSpecApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerSpec type for use +// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use // with apply. type HorizontalPodAutoscalerSpecApplyConfiguration struct { ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"` @@ -28,7 +28,7 @@ type HorizontalPodAutoscalerSpecApplyConfiguration struct { Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"` } -// HorizontalPodAutoscalerSpecApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerSpec type for use with +// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with // apply. func HorizontalPodAutoscalerSpec() *HorizontalPodAutoscalerSpecApplyConfiguration { return &HorizontalPodAutoscalerSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go index 881a874e5..1eee64505 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// HorizontalPodAutoscalerStatusApplyConfiguration represents an declarative configuration of the HorizontalPodAutoscalerStatus type for use +// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use // with apply. type HorizontalPodAutoscalerStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -33,7 +33,7 @@ type HorizontalPodAutoscalerStatusApplyConfiguration struct { Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"` } -// HorizontalPodAutoscalerStatusApplyConfiguration constructs an declarative configuration of the HorizontalPodAutoscalerStatus type for use with +// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with // apply. func HorizontalPodAutoscalerStatus() *HorizontalPodAutoscalerStatusApplyConfiguration { return &HorizontalPodAutoscalerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go index 2a535891a..2bbbbddec 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go @@ -19,18 +19,18 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingPolicyApplyConfiguration represents an declarative configuration of the HPAScalingPolicy type for use +// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use // with apply. type HPAScalingPolicyApplyConfiguration struct { - Type *v2beta2.HPAScalingPolicyType `json:"type,omitempty"` - Value *int32 `json:"value,omitempty"` - PeriodSeconds *int32 `json:"periodSeconds,omitempty"` + Type *autoscalingv2beta2.HPAScalingPolicyType `json:"type,omitempty"` + Value *int32 `json:"value,omitempty"` + PeriodSeconds *int32 `json:"periodSeconds,omitempty"` } -// HPAScalingPolicyApplyConfiguration constructs an declarative configuration of the HPAScalingPolicy type for use with +// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with // apply. func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { return &HPAScalingPolicyApplyConfiguration{} @@ -39,7 +39,7 @@ func HPAScalingPolicy() *HPAScalingPolicyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HPAScalingPolicyApplyConfiguration) WithType(value v2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { +func (b *HPAScalingPolicyApplyConfiguration) WithType(value autoscalingv2beta2.HPAScalingPolicyType) *HPAScalingPolicyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go index 57c917b89..92aa449aa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go @@ -19,18 +19,18 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// HPAScalingRulesApplyConfiguration represents an declarative configuration of the HPAScalingRules type for use +// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use // with apply. type HPAScalingRulesApplyConfiguration struct { - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` - SelectPolicy *v2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"` - Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"` + SelectPolicy *autoscalingv2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"` + Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"` } -// HPAScalingRulesApplyConfiguration constructs an declarative configuration of the HPAScalingRules type for use with +// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with // apply. func HPAScalingRules() *HPAScalingRulesApplyConfiguration { return &HPAScalingRulesApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *HPAScalingRulesApplyConfiguration) WithStabilizationWindowSeconds(value // WithSelectPolicy sets the SelectPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SelectPolicy field is set to the value of the last call. -func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value v2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { +func (b *HPAScalingRulesApplyConfiguration) WithSelectPolicy(value autoscalingv2beta2.ScalingPolicySelect) *HPAScalingRulesApplyConfiguration { b.SelectPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go index 70cbd4e81..e8b2abb0e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// MetricIdentifierApplyConfiguration represents an declarative configuration of the MetricIdentifier type for use +// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use // with apply. type MetricIdentifierApplyConfiguration struct { Name *string `json:"name,omitempty"` Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` } -// MetricIdentifierApplyConfiguration constructs an declarative configuration of the MetricIdentifier type for use with +// MetricIdentifierApplyConfiguration constructs a declarative configuration of the MetricIdentifier type for use with // apply. func MetricIdentifier() *MetricIdentifierApplyConfiguration { return &MetricIdentifierApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go index 1e7ee1419..3da1617cf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricSpecApplyConfiguration represents an declarative configuration of the MetricSpec type for use +// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use // with apply. type MetricSpecApplyConfiguration struct { - Type *v2beta2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricSpecApplyConfiguration struct { External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"` } -// MetricSpecApplyConfiguration constructs an declarative configuration of the MetricSpec type for use with +// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with // apply. func MetricSpec() *MetricSpecApplyConfiguration { return &MetricSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricSpec() *MetricSpecApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricSpecApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricSpecApplyConfiguration { +func (b *MetricSpecApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go index 353ec6d94..b528bd760 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go @@ -19,13 +19,13 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" ) -// MetricStatusApplyConfiguration represents an declarative configuration of the MetricStatus type for use +// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use // with apply. type MetricStatusApplyConfiguration struct { - Type *v2beta2.MetricSourceType `json:"type,omitempty"` + Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"` Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"` Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"` Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"` @@ -33,7 +33,7 @@ type MetricStatusApplyConfiguration struct { External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"` } -// MetricStatusApplyConfiguration constructs an declarative configuration of the MetricStatus type for use with +// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with // apply. func MetricStatus() *MetricStatusApplyConfiguration { return &MetricStatusApplyConfiguration{} @@ -42,7 +42,7 @@ func MetricStatus() *MetricStatusApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricStatusApplyConfiguration) WithType(value v2beta2.MetricSourceType) *MetricStatusApplyConfiguration { +func (b *MetricStatusApplyConfiguration) WithType(value autoscalingv2beta2.MetricSourceType) *MetricStatusApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go index fbf006a5a..286856d82 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go @@ -19,20 +19,20 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricTargetApplyConfiguration represents an declarative configuration of the MetricTarget type for use +// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use // with apply. type MetricTargetApplyConfiguration struct { - Type *v2beta2.MetricTargetType `json:"type,omitempty"` - Value *resource.Quantity `json:"value,omitempty"` - AverageValue *resource.Quantity `json:"averageValue,omitempty"` - AverageUtilization *int32 `json:"averageUtilization,omitempty"` + Type *autoscalingv2beta2.MetricTargetType `json:"type,omitempty"` + Value *resource.Quantity `json:"value,omitempty"` + AverageValue *resource.Quantity `json:"averageValue,omitempty"` + AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricTargetApplyConfiguration constructs an declarative configuration of the MetricTarget type for use with +// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with // apply. func MetricTarget() *MetricTargetApplyConfiguration { return &MetricTargetApplyConfiguration{} @@ -41,7 +41,7 @@ func MetricTarget() *MetricTargetApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MetricTargetApplyConfiguration) WithType(value v2beta2.MetricTargetType) *MetricTargetApplyConfiguration { +func (b *MetricTargetApplyConfiguration) WithType(value autoscalingv2beta2.MetricTargetType) *MetricTargetApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go index 5796a0b4c..cc409fc28 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// MetricValueStatusApplyConfiguration represents an declarative configuration of the MetricValueStatus type for use +// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use // with apply. type MetricValueStatusApplyConfiguration struct { Value *resource.Quantity `json:"value,omitempty"` @@ -30,7 +30,7 @@ type MetricValueStatusApplyConfiguration struct { AverageUtilization *int32 `json:"averageUtilization,omitempty"` } -// MetricValueStatusApplyConfiguration constructs an declarative configuration of the MetricValueStatus type for use with +// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with // apply. func MetricValueStatus() *MetricValueStatusApplyConfiguration { return &MetricValueStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go index eed31dab6..17b492fa0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricSourceApplyConfiguration represents an declarative configuration of the ObjectMetricSource type for use +// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use // with apply. type ObjectMetricSourceApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` } -// ObjectMetricSourceApplyConfiguration constructs an declarative configuration of the ObjectMetricSource type for use with +// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with // apply. func ObjectMetricSource() *ObjectMetricSourceApplyConfiguration { return &ObjectMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go index 175e2120d..e87417f2e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v2beta2 -// ObjectMetricStatusApplyConfiguration represents an declarative configuration of the ObjectMetricStatus type for use +// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use // with apply. type ObjectMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` @@ -26,7 +26,7 @@ type ObjectMetricStatusApplyConfiguration struct { DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"` } -// ObjectMetricStatusApplyConfiguration constructs an declarative configuration of the ObjectMetricStatus type for use with +// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with // apply. func ObjectMetricStatus() *ObjectMetricStatusApplyConfiguration { return &ObjectMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go index 036588095..6ecbb1807 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricSourceApplyConfiguration represents an declarative configuration of the PodsMetricSource type for use +// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use // with apply. type PodsMetricSourceApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// PodsMetricSourceApplyConfiguration constructs an declarative configuration of the PodsMetricSource type for use with +// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with // apply. func PodsMetricSource() *PodsMetricSourceApplyConfiguration { return &PodsMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go index e6f98be8c..cd1029726 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v2beta2 -// PodsMetricStatusApplyConfiguration represents an declarative configuration of the PodsMetricStatus type for use +// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use // with apply. type PodsMetricStatusApplyConfiguration struct { Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// PodsMetricStatusApplyConfiguration constructs an declarative configuration of the PodsMetricStatus type for use with +// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with // apply. func PodsMetricStatus() *PodsMetricStatusApplyConfiguration { return &PodsMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go index cc8118d5e..c482d75f4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricSourceApplyConfiguration represents an declarative configuration of the ResourceMetricSource type for use +// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use // with apply. type ResourceMetricSourceApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Target *MetricTargetApplyConfiguration `json:"target,omitempty"` } -// ResourceMetricSourceApplyConfiguration constructs an declarative configuration of the ResourceMetricSource type for use with +// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with // apply. func ResourceMetricSource() *ResourceMetricSourceApplyConfiguration { return &ResourceMetricSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go index 0ab56be0f..eb13e90b7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/api/core/v1" ) -// ResourceMetricStatusApplyConfiguration represents an declarative configuration of the ResourceMetricStatus type for use +// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use // with apply. type ResourceMetricStatusApplyConfiguration struct { Name *v1.ResourceName `json:"name,omitempty"` Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"` } -// ResourceMetricStatusApplyConfiguration constructs an declarative configuration of the ResourceMetricStatus type for use with +// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with // apply. func ResourceMetricStatus() *ResourceMetricStatusApplyConfiguration { return &ResourceMetricStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go index 5225a5a07..f96cba1c5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apibatchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batchv1 "k8s.io/api/batch/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"` - Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"` + Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -58,18 +58,18 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCronJob(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { +func ExtractCronJob(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { return extractCronJob(cronJob, fieldManager, "") } // ExtractCronJobStatus is the same as ExtractCronJob except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCronJobStatus(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { +func ExtractCronJobStatus(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) { return extractCronJob(cronJob, fieldManager, "status") } -func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { +func extractCronJob(cronJob *batchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) { b := &CronJobApplyConfiguration{} err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ // overwriting an existing map entries in Annotations field with the same key. func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration { +func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CronJobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go index 22a34dcb6..f53d140d3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` - ConcurrencyPolicy *v1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + ConcurrencyPolicy *batchv1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"` SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} @@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64) // WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConcurrencyPolicy field is set to the value of the last call. -func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { +func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { b.ConcurrencyPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go index b7cc2bdfb..d29d9e892 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go @@ -20,18 +20,18 @@ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { - Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` - LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` - LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` + Active []corev1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"` + LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} @@ -40,7 +40,7 @@ func CronJobStatus() *CronJobStatusApplyConfiguration { // WithActive adds the given value to the Active field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Active field. -func (b *CronJobStatusApplyConfiguration) WithActive(values ...*v1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration { +func (b *CronJobStatusApplyConfiguration) WithActive(values ...*corev1.ObjectReferenceApplyConfiguration) *CronJobStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithActive") diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go index fb10ba396..e508f1441 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apibatchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + batchv1 "k8s.io/api/batch/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobApplyConfiguration represents an declarative configuration of the Job type for use +// JobApplyConfiguration represents a declarative configuration of the Job type for use // with apply. type JobApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` - Status *JobStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` + Status *JobStatusApplyConfiguration `json:"status,omitempty"` } -// Job constructs an declarative configuration of the Job type for use with +// Job constructs a declarative configuration of the Job type for use with // apply. func Job(name, namespace string) *JobApplyConfiguration { b := &JobApplyConfiguration{} @@ -58,18 +58,18 @@ func Job(name, namespace string) *JobApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractJob(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { +func ExtractJob(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { return extractJob(job, fieldManager, "") } // ExtractJobStatus is the same as ExtractJob except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractJobStatus(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { +func ExtractJobStatus(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) { return extractJob(job, fieldManager, "status") } -func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) { +func extractJob(job *batchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) { b := &JobApplyConfiguration{} err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (* // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *JobApplyConfiguration) WithKind(value string) *JobApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *JobApplyConfiguration) WithAPIVersion(value string) *JobApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *JobApplyConfiguration) WithName(value string) *JobApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *JobApplyConfiguration) WithGenerateName(value string) *JobApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *JobApplyConfiguration) WithNamespace(value string) *JobApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *JobApplyConfiguration) WithUID(value types.UID) *JobApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *JobApplyConfiguration) WithResourceVersion(value string) *JobApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *JobApplyConfiguration) WithGeneration(value int64) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *JobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *JobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *JobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Job // overwriting an existing map entries in Labels field with the same key. func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *JobApplyConfiguration) WithLabels(entries map[string]string) *JobApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *JobApplyConfiguration) WithAnnotations(entries map[string]string) *JobA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration { +func (b *JobApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *JobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *JobApplyConfiguration) WithFinalizers(values ...string) *JobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *JobApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *JobApplyConfiguration) WithStatus(value *JobStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go index 388ca7a1c..fb3c65aba 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobConditionApplyConfiguration represents an declarative configuration of the JobCondition type for use +// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use // with apply. type JobConditionApplyConfiguration struct { - Type *v1.JobConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *batchv1.JobConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// JobConditionApplyConfiguration constructs an declarative configuration of the JobCondition type for use with +// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with // apply. func JobCondition() *JobConditionApplyConfiguration { return &JobConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func JobCondition() *JobConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *JobConditionApplyConfiguration) WithType(value v1.JobConditionType) *JobConditionApplyConfiguration { +func (b *JobConditionApplyConfiguration) WithType(value batchv1.JobConditionType) *JobConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go index bbcff71c8..2104fe113 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobSpecApplyConfiguration represents an declarative configuration of the JobSpec type for use +// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use // with apply. type JobSpecApplyConfiguration struct { Parallelism *int32 `json:"parallelism,omitempty"` @@ -45,7 +45,7 @@ type JobSpecApplyConfiguration struct { ManagedBy *string `json:"managedBy,omitempty"` } -// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with +// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with // apply. func JobSpec() *JobSpecApplyConfiguration { return &JobSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go index e8e472f8f..071a0153f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use +// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use // with apply. type JobStatusApplyConfiguration struct { Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"` @@ -38,7 +38,7 @@ type JobStatusApplyConfiguration struct { Ready *int32 `json:"ready,omitempty"` } -// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with +// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with // apply. func JobStatus() *JobStatusApplyConfiguration { return &JobStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go index b37a81568..b9666b03d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp // If called multiple times, the UID field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J // If called multiple times, the Generation field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration { +func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *JobTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -186,3 +186,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *JobSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go index 6da98386c..05a68b3c9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodFailurePolicyApplyConfiguration represents an declarative configuration of the PodFailurePolicy type for use +// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use // with apply. type PodFailurePolicyApplyConfiguration struct { Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// PodFailurePolicyApplyConfiguration constructs an declarative configuration of the PodFailurePolicy type for use with +// PodFailurePolicyApplyConfiguration constructs a declarative configuration of the PodFailurePolicy type for use with // apply. func PodFailurePolicy() *PodFailurePolicyApplyConfiguration { return &PodFailurePolicyApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go index 65f625181..aa4dfc4c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use // with apply. type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct { - ContainerName *string `json:"containerName,omitempty"` - Operator *v1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` - Values []int32 `json:"values,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + Operator *batchv1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"` + Values []int32 `json:"values,omitempty"` } -// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with +// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with // apply. func PodFailurePolicyOnExitCodesRequirement() *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { return &PodFailurePolicyOnExitCodesRequirementApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithContainer // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value v1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { +func (b *PodFailurePolicyOnExitCodesRequirementApplyConfiguration) WithOperator(value batchv1.PodFailurePolicyOnExitCodesOperator) *PodFailurePolicyOnExitCodesRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go index da1556ff8..6459a6e59 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use // with apply. type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct { - Type *v1.PodConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` + Type *corev1.PodConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` } -// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs an declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with +// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with // apply. func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { return &PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} @@ -38,7 +38,7 @@ func PodFailurePolicyOnPodConditionsPattern() *PodFailurePolicyOnPodConditionsPa // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value v1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(value corev1.PodConditionType) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { b.Type = &value return b } @@ -46,7 +46,7 @@ func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithType(valu // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { +func (b *PodFailurePolicyOnPodConditionsPatternApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodFailurePolicyOnPodConditionsPatternApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go index d43524353..847ec7c95 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" ) -// PodFailurePolicyRuleApplyConfiguration represents an declarative configuration of the PodFailurePolicyRule type for use +// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use // with apply. type PodFailurePolicyRuleApplyConfiguration struct { - Action *v1.PodFailurePolicyAction `json:"action,omitempty"` + Action *batchv1.PodFailurePolicyAction `json:"action,omitempty"` OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"` OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"` } -// PodFailurePolicyRuleApplyConfiguration constructs an declarative configuration of the PodFailurePolicyRule type for use with +// PodFailurePolicyRuleApplyConfiguration constructs a declarative configuration of the PodFailurePolicyRule type for use with // apply. func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { return &PodFailurePolicyRuleApplyConfiguration{} @@ -39,7 +39,7 @@ func PodFailurePolicyRule() *PodFailurePolicyRuleApplyConfiguration { // WithAction sets the Action field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Action field is set to the value of the last call. -func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value v1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { +func (b *PodFailurePolicyRuleApplyConfiguration) WithAction(value batchv1.PodFailurePolicyAction) *PodFailurePolicyRuleApplyConfiguration { b.Action = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go index 327aa1f5a..a3f4f39e2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SuccessPolicyApplyConfiguration represents an declarative configuration of the SuccessPolicy type for use +// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use // with apply. type SuccessPolicyApplyConfiguration struct { Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// SuccessPolicyApplyConfiguration constructs an declarative configuration of the SuccessPolicy type for use with +// SuccessPolicyApplyConfiguration constructs a declarative configuration of the SuccessPolicy type for use with // apply. func SuccessPolicy() *SuccessPolicyApplyConfiguration { return &SuccessPolicyApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go index 4c862e682..2b5e3d91f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SuccessPolicyRuleApplyConfiguration represents an declarative configuration of the SuccessPolicyRule type for use +// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use // with apply. type SuccessPolicyRuleApplyConfiguration struct { SucceededIndexes *string `json:"succeededIndexes,omitempty"` SucceededCount *int32 `json:"succeededCount,omitempty"` } -// SuccessPolicyRuleApplyConfiguration constructs an declarative configuration of the SuccessPolicyRule type for use with +// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with // apply. func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration { return &SuccessPolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go index 1409303ff..ff6b57b86 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use +// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use // with apply. type UncountedTerminatedPodsApplyConfiguration struct { Succeeded []types.UID `json:"succeeded,omitempty"` Failed []types.UID `json:"failed,omitempty"` } -// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with +// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with // apply. func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration { return &UncountedTerminatedPodsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go index 1d735a840..133ed36fa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CronJobApplyConfiguration represents an declarative configuration of the CronJob type for use +// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use // with apply. type CronJobApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CronJobApplyConfiguration struct { Status *CronJobStatusApplyConfiguration `json:"status,omitempty"` } -// CronJob constructs an declarative configuration of the CronJob type for use with +// CronJob constructs a declarative configuration of the CronJob type for use with // apply. func CronJob(name, namespace string) *CronJobApplyConfiguration { b := &CronJobApplyConfiguration{} @@ -87,7 +87,7 @@ func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *CronJobApplyConfiguration) WithKind(value string) *CronJobApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *CronJobApplyConfiguration) WithAPIVersion(value string) *CronJobApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *CronJobApplyConfiguration) WithName(value string) *CronJobApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *CronJobApplyConfiguration) WithGenerateName(value string) *CronJobApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *CronJobApplyConfiguration) WithNamespace(value string) *CronJobApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *CronJobApplyConfiguration) WithUID(value types.UID) *CronJobApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *CronJobApplyConfiguration) WithResourceVersion(value string) *CronJobAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *CronJobApplyConfiguration) WithGeneration(value int64) *CronJobApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *CronJobApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *CronJobApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Cr // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *CronJobApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *CronJobApplyConfiguration) WithLabels(entries map[string]string) *CronJ // overwriting an existing map entries in Annotations field with the same key. func (b *CronJobApplyConfiguration) WithAnnotations(entries map[string]string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *CronJobApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CronJobApplyConfiguration) WithFinalizers(values ...string) *CronJobApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *CronJobApplyConfiguration) WithStatus(value *CronJobStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CronJobApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go index 68c0777de..30604ac7e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/batch/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" ) -// CronJobSpecApplyConfiguration represents an declarative configuration of the CronJobSpec type for use +// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use // with apply. type CronJobSpecApplyConfiguration struct { Schedule *string `json:"schedule,omitempty"` TimeZone *string `json:"timeZone,omitempty"` StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` - ConcurrencyPolicy *v1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + ConcurrencyPolicy *batchv1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` Suspend *bool `json:"suspend,omitempty"` JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"` SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` } -// CronJobSpecApplyConfiguration constructs an declarative configuration of the CronJobSpec type for use with +// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with // apply. func CronJobSpec() *CronJobSpecApplyConfiguration { return &CronJobSpecApplyConfiguration{} @@ -68,7 +68,7 @@ func (b *CronJobSpecApplyConfiguration) WithStartingDeadlineSeconds(value int64) // WithConcurrencyPolicy sets the ConcurrencyPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConcurrencyPolicy field is set to the value of the last call. -func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value v1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { +func (b *CronJobSpecApplyConfiguration) WithConcurrencyPolicy(value batchv1beta1.ConcurrencyPolicy) *CronJobSpecApplyConfiguration { b.ConcurrencyPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go index 8dca14f66..335f9e0dc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// CronJobStatusApplyConfiguration represents an declarative configuration of the CronJobStatus type for use +// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use // with apply. type CronJobStatusApplyConfiguration struct { Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"` @@ -31,7 +31,7 @@ type CronJobStatusApplyConfiguration struct { LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"` } -// CronJobStatusApplyConfiguration constructs an declarative configuration of the CronJobStatus type for use with +// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with // apply. func CronJobStatus() *CronJobStatusApplyConfiguration { return &CronJobStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go index f925d65a7..4106b8e55 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go @@ -25,14 +25,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// JobTemplateSpecApplyConfiguration represents an declarative configuration of the JobTemplateSpec type for use +// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use // with apply. type JobTemplateSpecApplyConfiguration struct { *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"` } -// JobTemplateSpecApplyConfiguration constructs an declarative configuration of the JobTemplateSpec type for use with +// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with // apply. func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { return &JobTemplateSpecApplyConfiguration{} @@ -43,7 +43,7 @@ func JobTemplateSpec() *JobTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -52,7 +52,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithName(value string) *JobTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -61,7 +61,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGenerateName(value string) *JobT // If called multiple times, the Namespace field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -70,7 +70,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithNamespace(value string) *JobTemp // If called multiple times, the UID field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -79,7 +79,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithUID(value types.UID) *JobTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -88,7 +88,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithResourceVersion(value string) *J // If called multiple times, the Generation field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -97,7 +97,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithGeneration(value int64) *JobTemp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -106,7 +106,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.T // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -115,7 +115,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -125,11 +125,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -140,11 +140,11 @@ func (b *JobTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *JobTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -158,7 +158,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -169,7 +169,7 @@ func (b *JobTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *JobTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *JobTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -187,3 +187,9 @@ func (b *JobTemplateSpecApplyConfiguration) WithSpec(value *batchv1.JobSpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *JobTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go index 3d02c0be8..998e5723c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicertificatesv1 "k8s.io/api/certificates/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + certificatesv1 "k8s.io/api/certificates/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"` - Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"` + Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -57,18 +57,18 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { +func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "") } // ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCertificateSigningRequestStatus(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { +func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) { return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status") } -func extractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { +func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) { b := &CertificateSigningRequestApplyConfiguration{} err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *apicertificates // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries ma // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration { +func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CertificateSigningRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go index 13d69cfce..a6dedcb59 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { - Type *v1.RequestConditionType `json:"type,omitempty"` - Status *corev1.ConditionStatus `json:"status,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Type *certificatesv1.RequestConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { +func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go index 81ca214a9..82da53c9e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` - Usages []v1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []certificatesv1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]certificatesv1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} @@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds( // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { for i := range values { b.Usages = append(b.Usages, values[i]) } @@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Extra field, // overwriting an existing map entries in Extra field with the same key. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { if b.Extra == nil && len(entries) > 0 { - b.Extra = make(map[string]v1.ExtraValue, len(entries)) + b.Extra = make(map[string]certificatesv1.ExtraValue, len(entries)) } for k, v := range entries { b.Extra[k] = v diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go index 59d593033..897f6d1e9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go index 788d2a07d..6ae6b269d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleApplyConfiguration represents an declarative configuration of the ClusterTrustBundle type for use +// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use // with apply. type ClusterTrustBundleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type ClusterTrustBundleApplyConfiguration struct { Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"` } -// ClusterTrustBundle constructs an declarative configuration of the ClusterTrustBundle type for use with +// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with // apply. func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration { b := &ClusterTrustBundleApplyConfiguration{} @@ -84,7 +84,7 @@ func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterT // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTrustBundleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithKind(value string) *ClusterTr // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *ClusterTrustBundleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithName(value string) *ClusterTr // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ClusterTrustBundleApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterTrustBundleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *ClusterTrustBundleApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterTrustBundleApplyConfiguration) WithFinalizers(values ...string) *ClusterTrustBundleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *ClusterTrustBundleApplyConfiguration) WithSpec(value *ClusterTrustBundl b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterTrustBundleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go index d1aea1d6d..7bb36f708 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// ClusterTrustBundleSpecApplyConfiguration represents an declarative configuration of the ClusterTrustBundleSpec type for use +// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use // with apply. type ClusterTrustBundleSpecApplyConfiguration struct { SignerName *string `json:"signerName,omitempty"` TrustBundle *string `json:"trustBundle,omitempty"` } -// ClusterTrustBundleSpecApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleSpec type for use with +// ClusterTrustBundleSpecApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleSpec type for use with // apply. func ClusterTrustBundleSpec() *ClusterTrustBundleSpecApplyConfiguration { return &ClusterTrustBundleSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go index 83a0edc18..a1f57f268 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CertificateSigningRequestApplyConfiguration represents an declarative configuration of the CertificateSigningRequest type for use +// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use // with apply. type CertificateSigningRequestApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type CertificateSigningRequestApplyConfiguration struct { Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"` } -// CertificateSigningRequest constructs an declarative configuration of the CertificateSigningRequest type for use with +// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with // apply. func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfiguration { b := &CertificateSigningRequestApplyConfiguration{} @@ -85,7 +85,7 @@ func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1b // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *CertificateSigningRequestApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithKind(value string) *Ce // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithAPIVersion(value strin // If called multiple times, the Name field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithName(value string) *Ce // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGenerateName(value str // If called multiple times, the Namespace field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithNamespace(value string // If called multiple times, the UID field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithUID(value types.UID) * // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithGeneration(value int64 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithCreationTimestamp(valu // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionTimestamp(valu // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithDeletionGracePeriodSec // overwriting an existing map entries in Labels field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *CertificateSigningRequestApplyConfiguration) WithLabels(entries map[str // overwriting an existing map entries in Annotations field with the same key. func (b *CertificateSigningRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *CertificateSigningRequestApplyConfiguration) WithOwnerReferences(values func (b *CertificateSigningRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *CertificateSigningRequestApplyConfiguration) WithStatus(value *Certific b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CertificateSigningRequestApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go index 2c32a3272..a845ec404 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// CertificateSigningRequestConditionApplyConfiguration represents an declarative configuration of the CertificateSigningRequestCondition type for use +// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use // with apply. type CertificateSigningRequestConditionApplyConfiguration struct { - Type *v1beta1.RequestConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Type *certificatesv1beta1.RequestConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` } -// CertificateSigningRequestConditionApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestCondition type for use with +// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with // apply. func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApplyConfiguration { return &CertificateSigningRequestConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func CertificateSigningRequestCondition() *CertificateSigningRequestConditionApp // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value v1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { +func (b *CertificateSigningRequestConditionApplyConfiguration) WithType(value certificatesv1beta1.RequestConditionType) *CertificateSigningRequestConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go index 9554b1f40..ee4016c76 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" ) -// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use +// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use // with apply. type CertificateSigningRequestSpecApplyConfiguration struct { - Request []byte `json:"request,omitempty"` - SignerName *string `json:"signerName,omitempty"` - ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` - Usages []v1beta1.KeyUsage `json:"usages,omitempty"` - Username *string `json:"username,omitempty"` - UID *string `json:"uid,omitempty"` - Groups []string `json:"groups,omitempty"` - Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"` + Request []byte `json:"request,omitempty"` + SignerName *string `json:"signerName,omitempty"` + ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"` + Usages []certificatesv1beta1.KeyUsage `json:"usages,omitempty"` + Username *string `json:"username,omitempty"` + UID *string `json:"uid,omitempty"` + Groups []string `json:"groups,omitempty"` + Extra map[string]certificatesv1beta1.ExtraValue `json:"extra,omitempty"` } -// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with +// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with // apply. func CertificateSigningRequestSpec() *CertificateSigningRequestSpecApplyConfiguration { return &CertificateSigningRequestSpecApplyConfiguration{} @@ -70,7 +70,7 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds( // WithUsages adds the given value to the Usages field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Usages field. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...v1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithUsages(values ...certificatesv1beta1.KeyUsage) *CertificateSigningRequestSpecApplyConfiguration { for i := range values { b.Usages = append(b.Usages, values[i]) } @@ -107,9 +107,9 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithGroups(values ...s // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Extra field, // overwriting an existing map entries in Extra field with the same key. -func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]v1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { +func (b *CertificateSigningRequestSpecApplyConfiguration) WithExtra(entries map[string]certificatesv1beta1.ExtraValue) *CertificateSigningRequestSpecApplyConfiguration { if b.Extra == nil && len(entries) > 0 { - b.Extra = make(map[string]v1beta1.ExtraValue, len(entries)) + b.Extra = make(map[string]certificatesv1beta1.ExtraValue, len(entries)) } for k, v := range entries { b.Extra[k] = v diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go index 9d8c5d458..f82e8aed3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// CertificateSigningRequestStatusApplyConfiguration represents an declarative configuration of the CertificateSigningRequestStatus type for use +// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use // with apply. type CertificateSigningRequestStatusApplyConfiguration struct { Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"` Certificate []byte `json:"certificate,omitempty"` } -// CertificateSigningRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestStatus type for use with +// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with // apply. func CertificateSigningRequestStatus() *CertificateSigningRequestStatusApplyConfiguration { return &CertificateSigningRequestStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go index 618f12fb2..2a69e773c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apicoordinationv1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coordinationv1 "k8s.io/api/coordination/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -57,18 +57,18 @@ func Lease(name, namespace string) *LeaseApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractLease(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { +func ExtractLease(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { return extractLease(lease, fieldManager, "") } // ExtractLeaseStatus is the same as ExtractLease except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractLeaseStatus(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { +func ExtractLeaseStatus(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) { return extractLease(lease, fieldManager, "status") } -func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { +func extractLease(lease *coordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) { b := &LeaseApplyConfiguration{} err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L // overwriting an existing map entries in Labels field with the same key. func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp // overwriting an existing map entries in Annotations field with the same key. func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *Le // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration { +func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *LeaseApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go index a5f6a6ebb..d0099872c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coordinationv1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *metav1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -57,7 +60,7 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseDurationSeconds(value int32) *Lea // WithAcquireTime sets the AcquireTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AcquireTime field is set to the value of the last call. -func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *LeaseSpecApplyConfiguration { +func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration { b.AcquireTime = &value return b } @@ -65,7 +68,7 @@ func (b *LeaseSpecApplyConfiguration) WithAcquireTime(value v1.MicroTime) *Lease // WithRenewTime sets the RenewTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RenewTime field is set to the value of the last call. -func (b *LeaseSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseSpecApplyConfiguration { +func (b *LeaseSpecApplyConfiguration) WithRenewTime(value metav1.MicroTime) *LeaseSpecApplyConfiguration { b.RenewTime = &value return b } @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go new file mode 100644 index 000000000..b2cc2338e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use +// with apply. +type LeaseCandidateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with +// apply. +func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration { + b := &LeaseCandidateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha2") + return b +} + +// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from +// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a +// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API. +// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "") +} + +// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) { + return extractLeaseCandidate(leaseCandidate, fieldManager, "status") +} + +func extractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) { + b := &LeaseCandidateApplyConfiguration{} + err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha2.LeaseCandidate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(leaseCandidate.Name) + b.WithNamespace(leaseCandidate.Namespace) + + b.WithKind("LeaseCandidate") + b.WithAPIVersion("coordination.k8s.io/v1alpha2") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithKind(value string) *LeaseCandidateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithAPIVersion(value string) *LeaseCandidateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGenerateName(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithNamespace(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithUID(value types.UID) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithResourceVersion(value string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithGeneration(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithLabels(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *LeaseCandidateApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *LeaseCandidateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *LeaseCandidateApplyConfiguration) WithFinalizers(values ...string) *LeaseCandidateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *LeaseCandidateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *LeaseCandidateApplyConfiguration) WithSpec(value *LeaseCandidateSpecApplyConfiguration) *LeaseCandidateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseCandidateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go new file mode 100644 index 000000000..f52aaab24 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + coordinationv1 "k8s.io/api/coordination/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use +// with apply. +type LeaseCandidateSpecApplyConfiguration struct { + LeaseName *string `json:"leaseName,omitempty"` + PingTime *v1.MicroTime `json:"pingTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + BinaryVersion *string `json:"binaryVersion,omitempty"` + EmulationVersion *string `json:"emulationVersion,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` +} + +// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with +// apply. +func LeaseCandidateSpec() *LeaseCandidateSpecApplyConfiguration { + return &LeaseCandidateSpecApplyConfiguration{} +} + +// WithLeaseName sets the LeaseName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LeaseName field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithLeaseName(value string) *LeaseCandidateSpecApplyConfiguration { + b.LeaseName = &value + return b +} + +// WithPingTime sets the PingTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PingTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithPingTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.PingTime = &value + return b +} + +// WithRenewTime sets the RenewTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RenewTime field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithRenewTime(value v1.MicroTime) *LeaseCandidateSpecApplyConfiguration { + b.RenewTime = &value + return b +} + +// WithBinaryVersion sets the BinaryVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BinaryVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithBinaryVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.BinaryVersion = &value + return b +} + +// WithEmulationVersion sets the EmulationVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the EmulationVersion field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithEmulationVersion(value string) *LeaseCandidateSpecApplyConfiguration { + b.EmulationVersion = &value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseCandidateSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseCandidateSpecApplyConfiguration { + b.Strategy = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go index 867e0f58b..b321fe6b4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LeaseApplyConfiguration represents an declarative configuration of the Lease type for use +// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use // with apply. type LeaseApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type LeaseApplyConfiguration struct { Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"` } -// Lease constructs an declarative configuration of the Lease type for use with +// Lease constructs a declarative configuration of the Lease type for use with // apply. func Lease(name, namespace string) *LeaseApplyConfiguration { b := &LeaseApplyConfiguration{} @@ -86,7 +86,7 @@ func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subreso // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LeaseApplyConfiguration) WithKind(value string) *LeaseApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LeaseApplyConfiguration) WithAPIVersion(value string) *LeaseApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LeaseApplyConfiguration) WithName(value string) *LeaseApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LeaseApplyConfiguration) WithGenerateName(value string) *LeaseApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LeaseApplyConfiguration) WithNamespace(value string) *LeaseApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LeaseApplyConfiguration) WithUID(value types.UID) *LeaseApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *LeaseApplyConfiguration) WithResourceVersion(value string) *LeaseApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *LeaseApplyConfiguration) WithGeneration(value int64) *LeaseApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *LeaseApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LeaseApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Leas // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LeaseApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *L // overwriting an existing map entries in Labels field with the same key. func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LeaseApplyConfiguration) WithLabels(entries map[string]string) *LeaseAp // overwriting an existing map entries in Annotations field with the same key. func (b *LeaseApplyConfiguration) WithAnnotations(entries map[string]string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *LeaseApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *LeaseApplyConfiguration) WithFinalizers(values ...string) *LeaseApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -247,3 +247,9 @@ func (b *LeaseApplyConfiguration) WithSpec(value *LeaseSpecApplyConfiguration) * b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LeaseApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go index 865eb7645..8c7fddfc6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go @@ -19,20 +19,23 @@ limitations under the License. package v1beta1 import ( + coordinationv1 "k8s.io/api/coordination/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LeaseSpecApplyConfiguration represents an declarative configuration of the LeaseSpec type for use +// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use // with apply. type LeaseSpecApplyConfiguration struct { - HolderIdentity *string `json:"holderIdentity,omitempty"` - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` - AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` - RenewTime *v1.MicroTime `json:"renewTime,omitempty"` - LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + HolderIdentity *string `json:"holderIdentity,omitempty"` + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"` + AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"` + RenewTime *v1.MicroTime `json:"renewTime,omitempty"` + LeaseTransitions *int32 `json:"leaseTransitions,omitempty"` + Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"` + PreferredHolder *string `json:"preferredHolder,omitempty"` } -// LeaseSpecApplyConfiguration constructs an declarative configuration of the LeaseSpec type for use with +// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with // apply. func LeaseSpec() *LeaseSpecApplyConfiguration { return &LeaseSpecApplyConfiguration{} @@ -77,3 +80,19 @@ func (b *LeaseSpecApplyConfiguration) WithLeaseTransitions(value int32) *LeaseSp b.LeaseTransitions = &value return b } + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithStrategy(value coordinationv1.CoordinatedLeaseStrategy) *LeaseSpecApplyConfiguration { + b.Strategy = &value + return b +} + +// WithPreferredHolder sets the PreferredHolder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreferredHolder field is set to the value of the last call. +func (b *LeaseSpecApplyConfiguration) WithPreferredHolder(value string) *LeaseSpecApplyConfiguration { + b.PreferredHolder = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go index df6d1c64e..45484f140 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AffinityApplyConfiguration represents an declarative configuration of the Affinity type for use +// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use // with apply. type AffinityApplyConfiguration struct { NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` @@ -26,7 +26,7 @@ type AffinityApplyConfiguration struct { PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"` } -// AffinityApplyConfiguration constructs an declarative configuration of the Affinity type for use with +// AffinityApplyConfiguration constructs a declarative configuration of the Affinity type for use with // apply. func Affinity() *AffinityApplyConfiguration { return &AffinityApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go index 7f3c22afa..3f7de21b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// AppArmorProfileApplyConfiguration represents an declarative configuration of the AppArmorProfile type for use +// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use // with apply. type AppArmorProfileApplyConfiguration struct { - Type *v1.AppArmorProfileType `json:"type,omitempty"` - LocalhostProfile *string `json:"localhostProfile,omitempty"` + Type *corev1.AppArmorProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// AppArmorProfileApplyConfiguration constructs an declarative configuration of the AppArmorProfile type for use with +// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with // apply. func AppArmorProfile() *AppArmorProfileApplyConfiguration { return &AppArmorProfileApplyConfiguration{} @@ -38,7 +38,7 @@ func AppArmorProfile() *AppArmorProfileApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *AppArmorProfileApplyConfiguration) WithType(value v1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { +func (b *AppArmorProfileApplyConfiguration) WithType(value corev1.AppArmorProfileType) *AppArmorProfileApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go index 970bf24c4..2c76161a1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// AttachedVolumeApplyConfiguration represents an declarative configuration of the AttachedVolume type for use +// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use // with apply. type AttachedVolumeApplyConfiguration struct { - Name *v1.UniqueVolumeName `json:"name,omitempty"` - DevicePath *string `json:"devicePath,omitempty"` + Name *corev1.UniqueVolumeName `json:"name,omitempty"` + DevicePath *string `json:"devicePath,omitempty"` } -// AttachedVolumeApplyConfiguration constructs an declarative configuration of the AttachedVolume type for use with +// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with // apply. func AttachedVolume() *AttachedVolumeApplyConfiguration { return &AttachedVolumeApplyConfiguration{} @@ -38,7 +38,7 @@ func AttachedVolume() *AttachedVolumeApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *AttachedVolumeApplyConfiguration) WithName(value v1.UniqueVolumeName) *AttachedVolumeApplyConfiguration { +func (b *AttachedVolumeApplyConfiguration) WithName(value corev1.UniqueVolumeName) *AttachedVolumeApplyConfiguration { b.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go index 6ff335e9d..d08786965 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use +// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use // with apply. type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs an declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with +// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with // apply. func AWSElasticBlockStoreVolumeSource() *AWSElasticBlockStoreVolumeSourceApplyConfiguration { return &AWSElasticBlockStoreVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go index b2774735a..d4d20dfa9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// AzureDiskVolumeSourceApplyConfiguration represents an declarative configuration of the AzureDiskVolumeSource type for use +// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use // with apply. type AzureDiskVolumeSourceApplyConfiguration struct { - DiskName *string `json:"diskName,omitempty"` - DataDiskURI *string `json:"diskURI,omitempty"` - CachingMode *v1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"` - FSType *string `json:"fsType,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - Kind *v1.AzureDataDiskKind `json:"kind,omitempty"` + DiskName *string `json:"diskName,omitempty"` + DataDiskURI *string `json:"diskURI,omitempty"` + CachingMode *corev1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"` + FSType *string `json:"fsType,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + Kind *corev1.AzureDataDiskKind `json:"kind,omitempty"` } -// AzureDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureDiskVolumeSource type for use with +// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with // apply. func AzureDiskVolumeSource() *AzureDiskVolumeSourceApplyConfiguration { return &AzureDiskVolumeSourceApplyConfiguration{} @@ -58,7 +58,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithDataDiskURI(value string) // WithCachingMode sets the CachingMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CachingMode field is set to the value of the last call. -func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value v1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration { +func (b *AzureDiskVolumeSourceApplyConfiguration) WithCachingMode(value corev1.AzureDataDiskCachingMode) *AzureDiskVolumeSourceApplyConfiguration { b.CachingMode = &value return b } @@ -82,7 +82,7 @@ func (b *AzureDiskVolumeSourceApplyConfiguration) WithReadOnly(value bool) *Azur // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value v1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration { +func (b *AzureDiskVolumeSourceApplyConfiguration) WithKind(value corev1.AzureDataDiskKind) *AzureDiskVolumeSourceApplyConfiguration { b.Kind = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go index f17393833..70a6b17be 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFilePersistentVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFilePersistentVolumeSource type for use +// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use // with apply. type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type AzureFilePersistentVolumeSourceApplyConfiguration struct { SecretNamespace *string `json:"secretNamespace,omitempty"` } -// AzureFilePersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFilePersistentVolumeSource type for use with +// AzureFilePersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFilePersistentVolumeSource type for use with // apply. func AzureFilePersistentVolumeSource() *AzureFilePersistentVolumeSourceApplyConfiguration { return &AzureFilePersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go index a7f7f33d8..ff0c86791 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// AzureFileVolumeSourceApplyConfiguration represents an declarative configuration of the AzureFileVolumeSource type for use +// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use // with apply. type AzureFileVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -26,7 +26,7 @@ type AzureFileVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// AzureFileVolumeSourceApplyConfiguration constructs an declarative configuration of the AzureFileVolumeSource type for use with +// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with // apply. func AzureFileVolumeSource() *AzureFileVolumeSourceApplyConfiguration { return &AzureFileVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go index c3d176c4d..e5c52b3c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// CapabilitiesApplyConfiguration represents an declarative configuration of the Capabilities type for use +// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use // with apply. type CapabilitiesApplyConfiguration struct { - Add []v1.Capability `json:"add,omitempty"` - Drop []v1.Capability `json:"drop,omitempty"` + Add []corev1.Capability `json:"add,omitempty"` + Drop []corev1.Capability `json:"drop,omitempty"` } -// CapabilitiesApplyConfiguration constructs an declarative configuration of the Capabilities type for use with +// CapabilitiesApplyConfiguration constructs a declarative configuration of the Capabilities type for use with // apply. func Capabilities() *CapabilitiesApplyConfiguration { return &CapabilitiesApplyConfiguration{} @@ -38,7 +38,7 @@ func Capabilities() *CapabilitiesApplyConfiguration { // WithAdd adds the given value to the Add field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Add field. -func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *CapabilitiesApplyConfiguration { +func (b *CapabilitiesApplyConfiguration) WithAdd(values ...corev1.Capability) *CapabilitiesApplyConfiguration { for i := range values { b.Add = append(b.Add, values[i]) } @@ -48,7 +48,7 @@ func (b *CapabilitiesApplyConfiguration) WithAdd(values ...v1.Capability) *Capab // WithDrop adds the given value to the Drop field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Drop field. -func (b *CapabilitiesApplyConfiguration) WithDrop(values ...v1.Capability) *CapabilitiesApplyConfiguration { +func (b *CapabilitiesApplyConfiguration) WithDrop(values ...corev1.Capability) *CapabilitiesApplyConfiguration { for i := range values { b.Drop = append(b.Drop, values[i]) } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go index a41936fe3..f3ee2d03e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSPersistentVolumeSource type for use +// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use // with apply. type CephFSPersistentVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSPersistentVolumeSource type for use with +// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with // apply. func CephFSPersistentVolumeSource() *CephFSPersistentVolumeSourceApplyConfiguration { return &CephFSPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go index 0ea070ba5..77d53d6eb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CephFSVolumeSourceApplyConfiguration represents an declarative configuration of the CephFSVolumeSource type for use +// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use // with apply. type CephFSVolumeSourceApplyConfiguration struct { Monitors []string `json:"monitors,omitempty"` @@ -29,7 +29,7 @@ type CephFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// CephFSVolumeSourceApplyConfiguration constructs an declarative configuration of the CephFSVolumeSource type for use with +// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with // apply. func CephFSVolumeSource() *CephFSVolumeSourceApplyConfiguration { return &CephFSVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go index 7754cf92f..b26573488 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CinderPersistentVolumeSource type for use +// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use // with apply. type CinderPersistentVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderPersistentVolumeSourceApplyConfiguration struct { SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderPersistentVolumeSource type for use with +// CinderPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderPersistentVolumeSource type for use with // apply. func CinderPersistentVolumeSource() *CinderPersistentVolumeSourceApplyConfiguration { return &CinderPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go index 51271e279..131cbf219 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CinderVolumeSourceApplyConfiguration represents an declarative configuration of the CinderVolumeSource type for use +// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use // with apply. type CinderVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -27,7 +27,7 @@ type CinderVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// CinderVolumeSourceApplyConfiguration constructs an declarative configuration of the CinderVolumeSource type for use with +// CinderVolumeSourceApplyConfiguration constructs a declarative configuration of the CinderVolumeSource type for use with // apply. func CinderVolumeSource() *CinderVolumeSourceApplyConfiguration { return &CinderVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go deleted file mode 100644 index 2153570fc..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// ClaimSourceApplyConfiguration represents an declarative configuration of the ClaimSource type for use -// with apply. -type ClaimSourceApplyConfiguration struct { - ResourceClaimName *string `json:"resourceClaimName,omitempty"` - ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` -} - -// ClaimSourceApplyConfiguration constructs an declarative configuration of the ClaimSource type for use with -// apply. -func ClaimSource() *ClaimSourceApplyConfiguration { - return &ClaimSourceApplyConfiguration{} -} - -// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimName = &value - return b -} - -// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. -func (b *ClaimSourceApplyConfiguration) WithResourceClaimTemplateName(value string) *ClaimSourceApplyConfiguration { - b.ResourceClaimTemplateName = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go index a666e8faa..02c4e55e1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ClientIPConfigApplyConfiguration represents an declarative configuration of the ClientIPConfig type for use +// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use // with apply. type ClientIPConfigApplyConfiguration struct { TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"` } -// ClientIPConfigApplyConfiguration constructs an declarative configuration of the ClientIPConfig type for use with +// ClientIPConfigApplyConfiguration constructs a declarative configuration of the ClientIPConfig type for use with // apply. func ClientIPConfig() *ClientIPConfigApplyConfiguration { return &ClientIPConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go index 5aa686782..ab1c578c8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use +// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use // with apply. type ClusterTrustBundleProjectionApplyConfiguration struct { - Name *string `json:"name,omitempty"` - SignerName *string `json:"signerName,omitempty"` - LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - Optional *bool `json:"optional,omitempty"` - Path *string `json:"path,omitempty"` + Name *string `json:"name,omitempty"` + SignerName *string `json:"signerName,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Optional *bool `json:"optional,omitempty"` + Path *string `json:"path,omitempty"` } -// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with +// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with // apply. func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { return &ClusterTrustBundleProjectionApplyConfiguration{} @@ -57,7 +57,7 @@ func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value st // WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LabelSelector field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { +func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { b.LabelSelector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go index 1ef65f5a0..60be6fe80 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ComponentConditionApplyConfiguration represents an declarative configuration of the ComponentCondition type for use +// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use // with apply. type ComponentConditionApplyConfiguration struct { - Type *v1.ComponentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - Message *string `json:"message,omitempty"` - Error *string `json:"error,omitempty"` + Type *corev1.ComponentConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + Message *string `json:"message,omitempty"` + Error *string `json:"error,omitempty"` } -// ComponentConditionApplyConfiguration constructs an declarative configuration of the ComponentCondition type for use with +// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with // apply. func ComponentCondition() *ComponentConditionApplyConfiguration { return &ComponentConditionApplyConfiguration{} @@ -40,7 +40,7 @@ func ComponentCondition() *ComponentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentConditionType) *ComponentConditionApplyConfiguration { +func (b *ComponentConditionApplyConfiguration) WithType(value corev1.ComponentConditionType) *ComponentConditionApplyConfiguration { b.Type = &value return b } @@ -48,7 +48,7 @@ func (b *ComponentConditionApplyConfiguration) WithType(value v1.ComponentCondit // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ComponentConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ComponentConditionApplyConfiguration { +func (b *ComponentConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ComponentConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go index 300e52694..340a55e2d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ComponentStatusApplyConfiguration represents an declarative configuration of the ComponentStatus type for use +// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use // with apply. type ComponentStatusApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"` } -// ComponentStatus constructs an declarative configuration of the ComponentStatus type for use with +// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with // apply. func ComponentStatus(name string) *ComponentStatusApplyConfiguration { b := &ComponentStatusApplyConfiguration{} @@ -56,18 +56,18 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { +func ExtractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { return extractComponentStatus(componentStatus, fieldManager, "") } // ExtractComponentStatusStatus is the same as ExtractComponentStatus except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractComponentStatusStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { +func ExtractComponentStatusStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) { return extractComponentStatus(componentStatus, fieldManager, "status") } -func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) { +func extractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) { b := &ComponentStatusApplyConfiguration{} err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldMan // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentStatusApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *ComponentStatusApplyConfiguration) WithKind(value string) *ComponentSta // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *ComponentStatusApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *ComponentStatusApplyConfiguration) WithAPIVersion(value string) *Compon // If called multiple times, the Name field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *ComponentStatusApplyConfiguration) WithName(value string) *ComponentSta // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *ComponentStatusApplyConfiguration) WithGenerateName(value string) *Comp // If called multiple times, the Namespace field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *ComponentStatusApplyConfiguration) WithNamespace(value string) *Compone // If called multiple times, the UID field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *ComponentStatusApplyConfiguration) WithUID(value types.UID) *ComponentS // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *ComponentStatusApplyConfiguration) WithResourceVersion(value string) *C // If called multiple times, the Generation field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithGeneration(value int64) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *ComponentStatusApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *ComponentStatusApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *ComponentStatusApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration { +func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *ComponentStatusApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *ComponentStatusApplyConfiguration) WithFinalizers(values ...string) *ComponentStatusApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ComponentStatusApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -250,3 +250,9 @@ func (b *ComponentStatusApplyConfiguration) WithConditions(values ...*ComponentC } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ComponentStatusApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go index f4cc7024d..2ff2c4d20 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go @@ -20,24 +20,24 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ConfigMapApplyConfiguration represents an declarative configuration of the ConfigMap type for use +// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use // with apply. type ConfigMapApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Immutable *bool `json:"immutable,omitempty"` - Data map[string]string `json:"data,omitempty"` - BinaryData map[string][]byte `json:"binaryData,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Immutable *bool `json:"immutable,omitempty"` + Data map[string]string `json:"data,omitempty"` + BinaryData map[string][]byte `json:"binaryData,omitempty"` } -// ConfigMap constructs an declarative configuration of the ConfigMap type for use with +// ConfigMap constructs a declarative configuration of the ConfigMap type for use with // apply. func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration { b := &ConfigMapApplyConfiguration{} @@ -88,7 +88,7 @@ func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ConfigMapApplyConfiguration) WithKind(value string) *ConfigMapApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ConfigMapApplyConfiguration) WithAPIVersion(value string) *ConfigMapApp // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ConfigMapApplyConfiguration) WithName(value string) *ConfigMapApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ConfigMapApplyConfiguration) WithGenerateName(value string) *ConfigMapA // If called multiple times, the Namespace field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ConfigMapApplyConfiguration) WithNamespace(value string) *ConfigMapAppl // If called multiple times, the UID field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ConfigMapApplyConfiguration) WithUID(value types.UID) *ConfigMapApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ConfigMapApplyConfiguration) WithResourceVersion(value string) *ConfigM // If called multiple times, the Generation field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithGeneration(value int64) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ConfigMapApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ConfigMapApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ConfigMapApplyConfiguration) WithLabels(entries map[string]string) *Con // overwriting an existing map entries in Annotations field with the same key. func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ConfigMapApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration { +func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ConfigMapApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *ConfigMapApplyConfiguration) WithFinalizers(values ...string) *ConfigMapApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ConfigMapApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -277,3 +277,9 @@ func (b *ConfigMapApplyConfiguration) WithBinaryData(entries map[string][]byte) } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ConfigMapApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go index 8802fff48..4c0d2cbdd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ConfigMapEnvSourceApplyConfiguration represents an declarative configuration of the ConfigMapEnvSource type for use +// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use // with apply. type ConfigMapEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// ConfigMapEnvSourceApplyConfiguration constructs an declarative configuration of the ConfigMapEnvSource type for use with +// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with // apply. func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { return &ConfigMapEnvSourceApplyConfiguration{} @@ -35,7 +35,7 @@ func ConfigMapEnvSource() *ConfigMapEnvSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapEnvSourceApplyConfiguration) WithName(value string) *ConfigMapEnvSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go index 2a8c800af..97c0e7210 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapKeySelectorApplyConfiguration represents an declarative configuration of the ConfigMapKeySelector type for use +// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use // with apply. type ConfigMapKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapKeySelectorApplyConfiguration constructs an declarative configuration of the ConfigMapKeySelector type for use with +// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with // apply. func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { return &ConfigMapKeySelectorApplyConfiguration{} @@ -36,7 +36,7 @@ func ConfigMapKeySelector() *ConfigMapKeySelectorApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapKeySelectorApplyConfiguration) WithName(value string) *ConfigMapKeySelectorApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go index da9655a54..135bb7d42 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ConfigMapNodeConfigSourceApplyConfiguration represents an declarative configuration of the ConfigMapNodeConfigSource type for use +// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use // with apply. type ConfigMapNodeConfigSourceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` @@ -32,7 +32,7 @@ type ConfigMapNodeConfigSourceApplyConfiguration struct { KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"` } -// ConfigMapNodeConfigSourceApplyConfiguration constructs an declarative configuration of the ConfigMapNodeConfigSource type for use with +// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with // apply. func ConfigMapNodeConfigSource() *ConfigMapNodeConfigSourceApplyConfiguration { return &ConfigMapNodeConfigSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go index 7297d3a43..d8c5e21d3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapProjectionApplyConfiguration represents an declarative configuration of the ConfigMapProjection type for use +// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use // with apply. type ConfigMapProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type ConfigMapProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapProjectionApplyConfiguration constructs an declarative configuration of the ConfigMapProjection type for use with +// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with // apply. func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { return &ConfigMapProjectionApplyConfiguration{} @@ -36,7 +36,7 @@ func ConfigMapProjection() *ConfigMapProjectionApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapProjectionApplyConfiguration) WithName(value string) *ConfigMapProjectionApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go index deaebde31..b5f410397 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ConfigMapVolumeSourceApplyConfiguration represents an declarative configuration of the ConfigMapVolumeSource type for use +// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use // with apply. type ConfigMapVolumeSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -27,7 +27,7 @@ type ConfigMapVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// ConfigMapVolumeSourceApplyConfiguration constructs an declarative configuration of the ConfigMapVolumeSource type for use with +// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with // apply. func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { return &ConfigMapVolumeSourceApplyConfiguration{} @@ -37,7 +37,7 @@ func ConfigMapVolumeSource() *ConfigMapVolumeSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *ConfigMapVolumeSourceApplyConfiguration) WithName(value string) *ConfigMapVolumeSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go index 32d715606..eed5f7d02 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerApplyConfiguration represents an declarative configuration of the Container type for use +// ContainerApplyConfiguration represents a declarative configuration of the Container type for use // with apply. type ContainerApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type ContainerApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// ContainerApplyConfiguration constructs an declarative configuration of the Container type for use with +// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with // apply. func Container() *ContainerApplyConfiguration { return &ContainerApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go index d5c874a7c..bc9428fd1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerImageApplyConfiguration represents an declarative configuration of the ContainerImage type for use +// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use // with apply. type ContainerImageApplyConfiguration struct { Names []string `json:"names,omitempty"` SizeBytes *int64 `json:"sizeBytes,omitempty"` } -// ContainerImageApplyConfiguration constructs an declarative configuration of the ContainerImage type for use with +// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with // apply. func ContainerImage() *ContainerImageApplyConfiguration { return &ContainerImageApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go index a23ad9268..2ad47b3a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ContainerPortApplyConfiguration represents an declarative configuration of the ContainerPort type for use +// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use // with apply. type ContainerPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - HostPort *int32 `json:"hostPort,omitempty"` - ContainerPort *int32 `json:"containerPort,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - HostIP *string `json:"hostIP,omitempty"` + Name *string `json:"name,omitempty"` + HostPort *int32 `json:"hostPort,omitempty"` + ContainerPort *int32 `json:"containerPort,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + HostIP *string `json:"hostIP,omitempty"` } -// ContainerPortApplyConfiguration constructs an declarative configuration of the ContainerPort type for use with +// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with // apply. func ContainerPort() *ContainerPortApplyConfiguration { return &ContainerPortApplyConfiguration{} @@ -65,7 +65,7 @@ func (b *ContainerPortApplyConfiguration) WithContainerPort(value int32) *Contai // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *ContainerPortApplyConfiguration) WithProtocol(value v1.Protocol) *ContainerPortApplyConfiguration { +func (b *ContainerPortApplyConfiguration) WithProtocol(value corev1.Protocol) *ContainerPortApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go index bbbcbc9f1..d45dbceaf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ContainerResizePolicyApplyConfiguration represents an declarative configuration of the ContainerResizePolicy type for use +// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use // with apply. type ContainerResizePolicyApplyConfiguration struct { - ResourceName *v1.ResourceName `json:"resourceName,omitempty"` - RestartPolicy *v1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` + ResourceName *corev1.ResourceName `json:"resourceName,omitempty"` + RestartPolicy *corev1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"` } -// ContainerResizePolicyApplyConfiguration constructs an declarative configuration of the ContainerResizePolicy type for use with +// ContainerResizePolicyApplyConfiguration constructs a declarative configuration of the ContainerResizePolicy type for use with // apply. func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { return &ContainerResizePolicyApplyConfiguration{} @@ -38,7 +38,7 @@ func ContainerResizePolicy() *ContainerResizePolicyApplyConfiguration { // WithResourceName sets the ResourceName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceName field is set to the value of the last call. -func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.ResourceName) *ContainerResizePolicyApplyConfiguration { +func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value corev1.ResourceName) *ContainerResizePolicyApplyConfiguration { b.ResourceName = &value return b } @@ -46,7 +46,7 @@ func (b *ContainerResizePolicyApplyConfiguration) WithResourceName(value v1.Reso // WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RestartPolicy field is set to the value of the last call. -func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value v1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { +func (b *ContainerResizePolicyApplyConfiguration) WithRestartPolicy(value corev1.ResourceResizeRestartPolicy) *ContainerResizePolicyApplyConfiguration { b.RestartPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go index 6cbfc7fd9..b958e0177 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ContainerStateApplyConfiguration represents an declarative configuration of the ContainerState type for use +// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use // with apply. type ContainerStateApplyConfiguration struct { Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"` @@ -26,7 +26,7 @@ type ContainerStateApplyConfiguration struct { Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"` } -// ContainerStateApplyConfiguration constructs an declarative configuration of the ContainerState type for use with +// ContainerStateApplyConfiguration constructs a declarative configuration of the ContainerState type for use with // apply. func ContainerState() *ContainerStateApplyConfiguration { return &ContainerStateApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go index 6c1d7311e..0ed59c177 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateRunningApplyConfiguration represents an declarative configuration of the ContainerStateRunning type for use +// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use // with apply. type ContainerStateRunningApplyConfiguration struct { - StartedAt *v1.Time `json:"startedAt,omitempty"` + StartedAt *metav1.Time `json:"startedAt,omitempty"` } -// ContainerStateRunningApplyConfiguration constructs an declarative configuration of the ContainerStateRunning type for use with +// ContainerStateRunningApplyConfiguration constructs a declarative configuration of the ContainerStateRunning type for use with // apply. func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { return &ContainerStateRunningApplyConfiguration{} @@ -37,7 +37,7 @@ func ContainerStateRunning() *ContainerStateRunningApplyConfiguration { // WithStartedAt sets the StartedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartedAt field is set to the value of the last call. -func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateRunningApplyConfiguration { +func (b *ContainerStateRunningApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateRunningApplyConfiguration { b.StartedAt = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go index 0383c9dd9..cfadd93c9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ContainerStateTerminatedApplyConfiguration represents an declarative configuration of the ContainerStateTerminated type for use +// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use // with apply. type ContainerStateTerminatedApplyConfiguration struct { - ExitCode *int32 `json:"exitCode,omitempty"` - Signal *int32 `json:"signal,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - StartedAt *v1.Time `json:"startedAt,omitempty"` - FinishedAt *v1.Time `json:"finishedAt,omitempty"` - ContainerID *string `json:"containerID,omitempty"` + ExitCode *int32 `json:"exitCode,omitempty"` + Signal *int32 `json:"signal,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + StartedAt *metav1.Time `json:"startedAt,omitempty"` + FinishedAt *metav1.Time `json:"finishedAt,omitempty"` + ContainerID *string `json:"containerID,omitempty"` } -// ContainerStateTerminatedApplyConfiguration constructs an declarative configuration of the ContainerStateTerminated type for use with +// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with // apply. func ContainerStateTerminated() *ContainerStateTerminatedApplyConfiguration { return &ContainerStateTerminatedApplyConfiguration{} @@ -75,7 +75,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithMessage(value string) * // WithStartedAt sets the StartedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartedAt field is set to the value of the last call. -func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration { +func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration { b.StartedAt = &value return b } @@ -83,7 +83,7 @@ func (b *ContainerStateTerminatedApplyConfiguration) WithStartedAt(value v1.Time // WithFinishedAt sets the FinishedAt field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FinishedAt field is set to the value of the last call. -func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value v1.Time) *ContainerStateTerminatedApplyConfiguration { +func (b *ContainerStateTerminatedApplyConfiguration) WithFinishedAt(value metav1.Time) *ContainerStateTerminatedApplyConfiguration { b.FinishedAt = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go index e51b778c0..7756c7da0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ContainerStateWaitingApplyConfiguration represents an declarative configuration of the ContainerStateWaiting type for use +// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use // with apply. type ContainerStateWaitingApplyConfiguration struct { Reason *string `json:"reason,omitempty"` Message *string `json:"message,omitempty"` } -// ContainerStateWaitingApplyConfiguration constructs an declarative configuration of the ContainerStateWaiting type for use with +// ContainerStateWaitingApplyConfiguration constructs a declarative configuration of the ContainerStateWaiting type for use with // apply. func ContainerStateWaiting() *ContainerStateWaitingApplyConfiguration { return &ContainerStateWaitingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go index e3f774bbb..6a28939c2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go @@ -22,24 +22,26 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ContainerStatusApplyConfiguration represents an declarative configuration of the ContainerStatus type for use +// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use // with apply. type ContainerStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - State *ContainerStateApplyConfiguration `json:"state,omitempty"` - LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` - Ready *bool `json:"ready,omitempty"` - RestartCount *int32 `json:"restartCount,omitempty"` - Image *string `json:"image,omitempty"` - ImageID *string `json:"imageID,omitempty"` - ContainerID *string `json:"containerID,omitempty"` - Started *bool `json:"started,omitempty"` - AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` - Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` -} - -// ContainerStatusApplyConfiguration constructs an declarative configuration of the ContainerStatus type for use with + Name *string `json:"name,omitempty"` + State *ContainerStateApplyConfiguration `json:"state,omitempty"` + LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"` + Ready *bool `json:"ready,omitempty"` + RestartCount *int32 `json:"restartCount,omitempty"` + Image *string `json:"image,omitempty"` + ImageID *string `json:"imageID,omitempty"` + ContainerID *string `json:"containerID,omitempty"` + Started *bool `json:"started,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"` + User *ContainerUserApplyConfiguration `json:"user,omitempty"` + AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"` +} + +// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with // apply. func ContainerStatus() *ContainerStatusApplyConfiguration { return &ContainerStatusApplyConfiguration{} @@ -145,3 +147,24 @@ func (b *ContainerStatusApplyConfiguration) WithVolumeMounts(values ...*VolumeMo } return b } + +// WithUser sets the User field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the User field is set to the value of the last call. +func (b *ContainerStatusApplyConfiguration) WithUser(value *ContainerUserApplyConfiguration) *ContainerStatusApplyConfiguration { + b.User = value + return b +} + +// WithAllocatedResourcesStatus adds the given value to the AllocatedResourcesStatus field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllocatedResourcesStatus field. +func (b *ContainerStatusApplyConfiguration) WithAllocatedResourcesStatus(values ...*ResourceStatusApplyConfiguration) *ContainerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllocatedResourcesStatus") + } + b.AllocatedResourcesStatus = append(b.AllocatedResourcesStatus, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go new file mode 100644 index 000000000..34ec8e414 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use +// with apply. +type ContainerUserApplyConfiguration struct { + Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"` +} + +// ContainerUserApplyConfiguration constructs a declarative configuration of the ContainerUser type for use with +// apply. +func ContainerUser() *ContainerUserApplyConfiguration { + return &ContainerUserApplyConfiguration{} +} + +// WithLinux sets the Linux field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Linux field is set to the value of the last call. +func (b *ContainerUserApplyConfiguration) WithLinux(value *LinuxContainerUserApplyConfiguration) *ContainerUserApplyConfiguration { + b.Linux = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go index 2fc681604..a614d1080 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the CSIPersistentVolumeSource type for use +// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use // with apply. type CSIPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -33,7 +33,7 @@ type CSIPersistentVolumeSourceApplyConfiguration struct { NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"` } -// CSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIPersistentVolumeSource type for use with +// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with // apply. func CSIPersistentVolumeSource() *CSIPersistentVolumeSourceApplyConfiguration { return &CSIPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go index c2a32df8d..b58d9bbb4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSIVolumeSourceApplyConfiguration represents an declarative configuration of the CSIVolumeSource type for use +// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use // with apply. type CSIVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type CSIVolumeSourceApplyConfiguration struct { NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"` } -// CSIVolumeSourceApplyConfiguration constructs an declarative configuration of the CSIVolumeSource type for use with +// CSIVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIVolumeSource type for use with // apply. func CSIVolumeSource() *CSIVolumeSourceApplyConfiguration { return &CSIVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go index 13a2e948f..5be27ec0c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DaemonEndpointApplyConfiguration represents an declarative configuration of the DaemonEndpoint type for use +// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use // with apply. type DaemonEndpointApplyConfiguration struct { Port *int32 `json:"Port,omitempty"` } -// DaemonEndpointApplyConfiguration constructs an declarative configuration of the DaemonEndpoint type for use with +// DaemonEndpointApplyConfiguration constructs a declarative configuration of the DaemonEndpoint type for use with // apply. func DaemonEndpoint() *DaemonEndpointApplyConfiguration { return &DaemonEndpointApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go index f88a87c0b..ed6b8b1bb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// DownwardAPIProjectionApplyConfiguration represents an declarative configuration of the DownwardAPIProjection type for use +// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use // with apply. type DownwardAPIProjectionApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` } -// DownwardAPIProjectionApplyConfiguration constructs an declarative configuration of the DownwardAPIProjection type for use with +// DownwardAPIProjectionApplyConfiguration constructs a declarative configuration of the DownwardAPIProjection type for use with // apply. func DownwardAPIProjection() *DownwardAPIProjectionApplyConfiguration { return &DownwardAPIProjectionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go index b25ff25fa..ec9d013dd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// DownwardAPIVolumeFileApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeFile type for use +// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use // with apply. type DownwardAPIVolumeFileApplyConfiguration struct { Path *string `json:"path,omitempty"` @@ -27,7 +27,7 @@ type DownwardAPIVolumeFileApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// DownwardAPIVolumeFileApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeFile type for use with +// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with // apply. func DownwardAPIVolumeFile() *DownwardAPIVolumeFileApplyConfiguration { return &DownwardAPIVolumeFileApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go index 6913bb521..eef9d7ef8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// DownwardAPIVolumeSourceApplyConfiguration represents an declarative configuration of the DownwardAPIVolumeSource type for use +// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use // with apply. type DownwardAPIVolumeSourceApplyConfiguration struct { Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// DownwardAPIVolumeSourceApplyConfiguration constructs an declarative configuration of the DownwardAPIVolumeSource type for use with +// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with // apply. func DownwardAPIVolumeSource() *DownwardAPIVolumeSourceApplyConfiguration { return &DownwardAPIVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go index 021280daf..63e9f56ab 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" resource "k8s.io/apimachinery/pkg/api/resource" ) -// EmptyDirVolumeSourceApplyConfiguration represents an declarative configuration of the EmptyDirVolumeSource type for use +// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use // with apply. type EmptyDirVolumeSourceApplyConfiguration struct { - Medium *v1.StorageMedium `json:"medium,omitempty"` - SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` + Medium *corev1.StorageMedium `json:"medium,omitempty"` + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"` } -// EmptyDirVolumeSourceApplyConfiguration constructs an declarative configuration of the EmptyDirVolumeSource type for use with +// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with // apply. func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { return &EmptyDirVolumeSourceApplyConfiguration{} @@ -39,7 +39,7 @@ func EmptyDirVolumeSource() *EmptyDirVolumeSourceApplyConfiguration { // WithMedium sets the Medium field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Medium field is set to the value of the last call. -func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value v1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration { +func (b *EmptyDirVolumeSourceApplyConfiguration) WithMedium(value corev1.StorageMedium) *EmptyDirVolumeSourceApplyConfiguration { b.Medium = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go index 52a54b600..536e697a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointAddressApplyConfiguration represents an declarative configuration of the EndpointAddress type for use +// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use // with apply. type EndpointAddressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -27,7 +27,7 @@ type EndpointAddressApplyConfiguration struct { TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"` } -// EndpointAddressApplyConfiguration constructs an declarative configuration of the EndpointAddress type for use with +// EndpointAddressApplyConfiguration constructs a declarative configuration of the EndpointAddress type for use with // apply. func EndpointAddress() *EndpointAddressApplyConfiguration { return &EndpointAddressApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go index cc00d0e49..05ee64ddc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - AppProtocol *string `json:"appProtocol,omitempty"` + Name *string `json:"name,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} @@ -56,7 +56,7 @@ func (b *EndpointPortApplyConfiguration) WithPort(value int32) *EndpointPortAppl // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration { +func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go index b98fed085..d2f910196 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointsApplyConfiguration represents an declarative configuration of the Endpoints type for use +// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use // with apply. type EndpointsApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"` } -// Endpoints constructs an declarative configuration of the Endpoints type for use with +// Endpoints constructs a declarative configuration of the Endpoints type for use with // apply. func Endpoints(name, namespace string) *EndpointsApplyConfiguration { b := &EndpointsApplyConfiguration{} @@ -57,18 +57,18 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { +func ExtractEndpoints(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { return extractEndpoints(endpoints, fieldManager, "") } // ExtractEndpointsStatus is the same as ExtractEndpoints except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEndpointsStatus(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { +func ExtractEndpointsStatus(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) { return extractEndpoints(endpoints, fieldManager, "status") } -func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) { +func extractEndpoints(endpoints *corev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) { b := &EndpointsApplyConfiguration{} err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EndpointsApplyConfiguration) WithKind(value string) *EndpointsApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EndpointsApplyConfiguration) WithAPIVersion(value string) *EndpointsApp // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EndpointsApplyConfiguration) WithName(value string) *EndpointsApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EndpointsApplyConfiguration) WithGenerateName(value string) *EndpointsA // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EndpointsApplyConfiguration) WithNamespace(value string) *EndpointsAppl // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EndpointsApplyConfiguration) WithUID(value types.UID) *EndpointsApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *EndpointsApplyConfiguration) WithResourceVersion(value string) *Endpoin // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithGeneration(value int64) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EndpointsApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EndpointsApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EndpointsApplyConfiguration) WithLabels(entries map[string]string) *End // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *EndpointsApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration { +func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *EndpointsApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *EndpointsApplyConfiguration) WithFinalizers(values ...string) *EndpointsApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EndpointsApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -252,3 +252,9 @@ func (b *EndpointsApplyConfiguration) WithSubsets(values ...*EndpointSubsetApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointsApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go index cd0657a80..33cd8496a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointSubsetApplyConfiguration represents an declarative configuration of the EndpointSubset type for use +// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use // with apply. type EndpointSubsetApplyConfiguration struct { Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"` @@ -26,7 +26,7 @@ type EndpointSubsetApplyConfiguration struct { Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSubsetApplyConfiguration constructs an declarative configuration of the EndpointSubset type for use with +// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with // apply. func EndpointSubset() *EndpointSubsetApplyConfiguration { return &EndpointSubsetApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go index 9e46d25de..7aa181cf1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvFromSourceApplyConfiguration represents an declarative configuration of the EnvFromSource type for use +// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use // with apply. type EnvFromSourceApplyConfiguration struct { Prefix *string `json:"prefix,omitempty"` @@ -26,7 +26,7 @@ type EnvFromSourceApplyConfiguration struct { SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"` } -// EnvFromSourceApplyConfiguration constructs an declarative configuration of the EnvFromSource type for use with +// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with // apply. func EnvFromSource() *EnvFromSourceApplyConfiguration { return &EnvFromSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go index a83528a28..5894166ca 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use +// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use // with apply. type EnvVarApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -26,7 +26,7 @@ type EnvVarApplyConfiguration struct { ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"` } -// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with +// EnvVarApplyConfiguration constructs a declarative configuration of the EnvVar type for use with // apply. func EnvVar() *EnvVarApplyConfiguration { return &EnvVarApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go index 70c695bd5..a3a55ea7a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EnvVarSourceApplyConfiguration represents an declarative configuration of the EnvVarSource type for use +// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use // with apply. type EnvVarSourceApplyConfiguration struct { FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"` @@ -27,7 +27,7 @@ type EnvVarSourceApplyConfiguration struct { SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"` } -// EnvVarSourceApplyConfiguration constructs an declarative configuration of the EnvVarSource type for use with +// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with // apply. func EnvVarSource() *EnvVarSourceApplyConfiguration { return &EnvVarSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go index 5fa79a246..4b74439fc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerApplyConfiguration represents an declarative configuration of the EphemeralContainer type for use +// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use // with apply. type EphemeralContainerApplyConfiguration struct { EphemeralContainerCommonApplyConfiguration `json:",inline"` TargetContainerName *string `json:"targetContainerName,omitempty"` } -// EphemeralContainerApplyConfiguration constructs an declarative configuration of the EphemeralContainer type for use with +// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with // apply. func EphemeralContainer() *EphemeralContainerApplyConfiguration { return &EphemeralContainerApplyConfiguration{} @@ -39,7 +39,7 @@ func EphemeralContainer() *EphemeralContainerApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithName(value string) *EphemeralContainerApplyConfiguration { - b.Name = &value + b.EphemeralContainerCommonApplyConfiguration.Name = &value return b } @@ -47,7 +47,7 @@ func (b *EphemeralContainerApplyConfiguration) WithName(value string) *Ephemeral // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Image field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *EphemeralContainerApplyConfiguration { - b.Image = &value + b.EphemeralContainerCommonApplyConfiguration.Image = &value return b } @@ -56,7 +56,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImage(value string) *Ephemera // If called multiple times, values provided by each call will be appended to the Command field. func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *EphemeralContainerApplyConfiguration { for i := range values { - b.Command = append(b.Command, values[i]) + b.EphemeralContainerCommonApplyConfiguration.Command = append(b.EphemeralContainerCommonApplyConfiguration.Command, values[i]) } return b } @@ -66,7 +66,7 @@ func (b *EphemeralContainerApplyConfiguration) WithCommand(values ...string) *Ep // If called multiple times, values provided by each call will be appended to the Args field. func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *EphemeralContainerApplyConfiguration { for i := range values { - b.Args = append(b.Args, values[i]) + b.EphemeralContainerCommonApplyConfiguration.Args = append(b.EphemeralContainerCommonApplyConfiguration.Args, values[i]) } return b } @@ -75,7 +75,7 @@ func (b *EphemeralContainerApplyConfiguration) WithArgs(values ...string) *Ephem // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WorkingDir field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithWorkingDir(value string) *EphemeralContainerApplyConfiguration { - b.WorkingDir = &value + b.EphemeralContainerCommonApplyConfiguration.WorkingDir = &value return b } @@ -87,7 +87,7 @@ func (b *EphemeralContainerApplyConfiguration) WithPorts(values ...*ContainerPor if values[i] == nil { panic("nil value passed to WithPorts") } - b.Ports = append(b.Ports, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.Ports = append(b.EphemeralContainerCommonApplyConfiguration.Ports, *values[i]) } return b } @@ -100,7 +100,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnvFrom(values ...*EnvFromSou if values[i] == nil { panic("nil value passed to WithEnvFrom") } - b.EnvFrom = append(b.EnvFrom, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.EnvFrom = append(b.EphemeralContainerCommonApplyConfiguration.EnvFrom, *values[i]) } return b } @@ -113,7 +113,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon if values[i] == nil { panic("nil value passed to WithEnv") } - b.Env = append(b.Env, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.Env = append(b.EphemeralContainerCommonApplyConfiguration.Env, *values[i]) } return b } @@ -122,7 +122,7 @@ func (b *EphemeralContainerApplyConfiguration) WithEnv(values ...*EnvVarApplyCon // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.Resources = value + b.EphemeralContainerCommonApplyConfiguration.Resources = value return b } @@ -134,7 +134,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta if values[i] == nil { panic("nil value passed to WithResizePolicy") } - b.ResizePolicy = append(b.ResizePolicy, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.ResizePolicy = append(b.EphemeralContainerCommonApplyConfiguration.ResizePolicy, *values[i]) } return b } @@ -143,7 +143,7 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RestartPolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration { - b.RestartPolicy = &value + b.EphemeralContainerCommonApplyConfiguration.RestartPolicy = &value return b } @@ -155,7 +155,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeMounts(values ...*Volum if values[i] == nil { panic("nil value passed to WithVolumeMounts") } - b.VolumeMounts = append(b.VolumeMounts, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.VolumeMounts = append(b.EphemeralContainerCommonApplyConfiguration.VolumeMounts, *values[i]) } return b } @@ -168,7 +168,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu if values[i] == nil { panic("nil value passed to WithVolumeDevices") } - b.VolumeDevices = append(b.VolumeDevices, *values[i]) + b.EphemeralContainerCommonApplyConfiguration.VolumeDevices = append(b.EphemeralContainerCommonApplyConfiguration.VolumeDevices, *values[i]) } return b } @@ -177,7 +177,7 @@ func (b *EphemeralContainerApplyConfiguration) WithVolumeDevices(values ...*Volu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LivenessProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.LivenessProbe = value + b.EphemeralContainerCommonApplyConfiguration.LivenessProbe = value return b } @@ -185,7 +185,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLivenessProbe(value *ProbeApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ReadinessProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.ReadinessProbe = value + b.EphemeralContainerCommonApplyConfiguration.ReadinessProbe = value return b } @@ -193,7 +193,7 @@ func (b *EphemeralContainerApplyConfiguration) WithReadinessProbe(value *ProbeAp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StartupProbe field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.StartupProbe = value + b.EphemeralContainerCommonApplyConfiguration.StartupProbe = value return b } @@ -201,7 +201,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStartupProbe(value *ProbeAppl // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Lifecycle field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.Lifecycle = value + b.EphemeralContainerCommonApplyConfiguration.Lifecycle = value return b } @@ -209,7 +209,7 @@ func (b *EphemeralContainerApplyConfiguration) WithLifecycle(value *LifecycleApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TerminationMessagePath field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value string) *EphemeralContainerApplyConfiguration { - b.TerminationMessagePath = &value + b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePath = &value return b } @@ -217,7 +217,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePath(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TerminationMessagePolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(value corev1.TerminationMessagePolicy) *EphemeralContainerApplyConfiguration { - b.TerminationMessagePolicy = &value + b.EphemeralContainerCommonApplyConfiguration.TerminationMessagePolicy = &value return b } @@ -225,7 +225,7 @@ func (b *EphemeralContainerApplyConfiguration) WithTerminationMessagePolicy(valu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ImagePullPolicy field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1.PullPolicy) *EphemeralContainerApplyConfiguration { - b.ImagePullPolicy = &value + b.EphemeralContainerCommonApplyConfiguration.ImagePullPolicy = &value return b } @@ -233,7 +233,7 @@ func (b *EphemeralContainerApplyConfiguration) WithImagePullPolicy(value corev1. // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the SecurityContext field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *SecurityContextApplyConfiguration) *EphemeralContainerApplyConfiguration { - b.SecurityContext = value + b.EphemeralContainerCommonApplyConfiguration.SecurityContext = value return b } @@ -241,7 +241,7 @@ func (b *EphemeralContainerApplyConfiguration) WithSecurityContext(value *Securi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Stdin field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralContainerApplyConfiguration { - b.Stdin = &value + b.EphemeralContainerCommonApplyConfiguration.Stdin = &value return b } @@ -249,7 +249,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdin(value bool) *EphemeralC // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StdinOnce field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *EphemeralContainerApplyConfiguration { - b.StdinOnce = &value + b.EphemeralContainerCommonApplyConfiguration.StdinOnce = &value return b } @@ -257,7 +257,7 @@ func (b *EphemeralContainerApplyConfiguration) WithStdinOnce(value bool) *Epheme // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TTY field is set to the value of the last call. func (b *EphemeralContainerApplyConfiguration) WithTTY(value bool) *EphemeralContainerApplyConfiguration { - b.TTY = &value + b.EphemeralContainerCommonApplyConfiguration.TTY = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go index 8cded29a9..d5d13d27a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// EphemeralContainerCommonApplyConfiguration represents an declarative configuration of the EphemeralContainerCommon type for use +// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use // with apply. type EphemeralContainerCommonApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -51,7 +51,7 @@ type EphemeralContainerCommonApplyConfiguration struct { TTY *bool `json:"tty,omitempty"` } -// EphemeralContainerCommonApplyConfiguration constructs an declarative configuration of the EphemeralContainerCommon type for use with +// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with // apply. func EphemeralContainerCommon() *EphemeralContainerCommonApplyConfiguration { return &EphemeralContainerCommonApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go index 31859404c..d2c8c6722 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EphemeralVolumeSourceApplyConfiguration represents an declarative configuration of the EphemeralVolumeSource type for use +// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use // with apply. type EphemeralVolumeSourceApplyConfiguration struct { VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"` } -// EphemeralVolumeSourceApplyConfiguration constructs an declarative configuration of the EphemeralVolumeSource type for use with +// EphemeralVolumeSourceApplyConfiguration constructs a declarative configuration of the EphemeralVolumeSource type for use with // apply. func EphemeralVolumeSource() *EphemeralVolumeSourceApplyConfiguration { return &EphemeralVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go index 60aff6b5b..9496ea773 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go @@ -19,36 +19,36 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` - Source *EventSourceApplyConfiguration `json:"source,omitempty"` - FirstTimestamp *metav1.Time `json:"firstTimestamp,omitempty"` - LastTimestamp *metav1.Time `json:"lastTimestamp,omitempty"` - Count *int32 `json:"count,omitempty"` - Type *string `json:"type,omitempty"` - EventTime *metav1.MicroTime `json:"eventTime,omitempty"` - Series *EventSeriesApplyConfiguration `json:"series,omitempty"` - Action *string `json:"action,omitempty"` - Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"` - ReportingController *string `json:"reportingComponent,omitempty"` - ReportingInstance *string `json:"reportingInstance,omitempty"` -} - -// Event constructs an declarative configuration of the Event type for use with + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + Source *EventSourceApplyConfiguration `json:"source,omitempty"` + FirstTimestamp *apismetav1.Time `json:"firstTimestamp,omitempty"` + LastTimestamp *apismetav1.Time `json:"lastTimestamp,omitempty"` + Count *int32 `json:"count,omitempty"` + Type *string `json:"type,omitempty"` + EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"` + Series *EventSeriesApplyConfiguration `json:"series,omitempty"` + Action *string `json:"action,omitempty"` + Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"` + ReportingController *string `json:"reportingComponent,omitempty"` + ReportingInstance *string `json:"reportingInstance,omitempty"` +} + +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -70,18 +70,18 @@ func Event(name, namespace string) *EventApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEvent(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEvent(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "") } // ExtractEventStatus is the same as ExtractEvent except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEventStatus(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEventStatus(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "status") } -func extractEvent(event *apicorev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { +func extractEvent(event *corev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource) if err != nil { @@ -99,7 +99,7 @@ func extractEvent(event *apicorev1.Event, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -107,7 +107,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -116,7 +116,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -125,7 +125,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -134,7 +134,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -143,7 +143,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -152,7 +152,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -161,25 +161,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -188,7 +188,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -198,11 +198,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -213,11 +213,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -225,13 +225,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -242,14 +242,14 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -288,7 +288,7 @@ func (b *EventApplyConfiguration) WithSource(value *EventSourceApplyConfiguratio // WithFirstTimestamp sets the FirstTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FirstTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.FirstTimestamp = &value return b } @@ -296,7 +296,7 @@ func (b *EventApplyConfiguration) WithFirstTimestamp(value metav1.Time) *EventAp // WithLastTimestamp sets the LastTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithLastTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithLastTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.LastTimestamp = &value return b } @@ -320,7 +320,7 @@ func (b *EventApplyConfiguration) WithType(value string) *EventApplyConfiguratio // WithEventTime sets the EventTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EventTime field is set to the value of the last call. -func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration { b.EventTime = &value return b } @@ -364,3 +364,9 @@ func (b *EventApplyConfiguration) WithReportingInstance(value string) *EventAppl b.ReportingInstance = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go index e66fb4127..c90954bcc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { - Count *int32 `json:"count,omitempty"` - LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` + Count *int32 `json:"count,omitempty"` + LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} @@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply // WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastObservedTime field is set to the value of the last call. -func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration { +func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration { b.LastObservedTime = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go index 2eb4aa8e4..97edb0493 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// EventSourceApplyConfiguration represents an declarative configuration of the EventSource type for use +// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use // with apply. type EventSourceApplyConfiguration struct { Component *string `json:"component,omitempty"` Host *string `json:"host,omitempty"` } -// EventSourceApplyConfiguration constructs an declarative configuration of the EventSource type for use with +// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with // apply. func EventSource() *EventSourceApplyConfiguration { return &EventSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go index 1df52144d..b7208a91c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ExecActionApplyConfiguration represents an declarative configuration of the ExecAction type for use +// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use // with apply. type ExecActionApplyConfiguration struct { Command []string `json:"command,omitempty"` } -// ExecActionApplyConfiguration constructs an declarative configuration of the ExecAction type for use with +// ExecActionApplyConfiguration constructs a declarative configuration of the ExecAction type for use with // apply. func ExecAction() *ExecActionApplyConfiguration { return &ExecActionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go index 43069de9a..000ff2cc6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FCVolumeSourceApplyConfiguration represents an declarative configuration of the FCVolumeSource type for use +// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use // with apply. type FCVolumeSourceApplyConfiguration struct { TargetWWNs []string `json:"targetWWNs,omitempty"` @@ -28,7 +28,7 @@ type FCVolumeSourceApplyConfiguration struct { WWIDs []string `json:"wwids,omitempty"` } -// FCVolumeSourceApplyConfiguration constructs an declarative configuration of the FCVolumeSource type for use with +// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with // apply. func FCVolumeSource() *FCVolumeSourceApplyConfiguration { return &FCVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go index 47e7c746e..355c2c82d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the FlexPersistentVolumeSource type for use +// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use // with apply. type FlexPersistentVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexPersistentVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexPersistentVolumeSource type for use with +// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with // apply. func FlexPersistentVolumeSource() *FlexPersistentVolumeSourceApplyConfiguration { return &FlexPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go index 7c09516a9..08ae9e1be 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlexVolumeSourceApplyConfiguration represents an declarative configuration of the FlexVolumeSource type for use +// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use // with apply. type FlexVolumeSourceApplyConfiguration struct { Driver *string `json:"driver,omitempty"` @@ -28,7 +28,7 @@ type FlexVolumeSourceApplyConfiguration struct { Options map[string]string `json:"options,omitempty"` } -// FlexVolumeSourceApplyConfiguration constructs an declarative configuration of the FlexVolumeSource type for use with +// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with // apply. func FlexVolumeSource() *FlexVolumeSourceApplyConfiguration { return &FlexVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go index 74896d55a..e4ecbba0e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// FlockerVolumeSourceApplyConfiguration represents an declarative configuration of the FlockerVolumeSource type for use +// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use // with apply. type FlockerVolumeSourceApplyConfiguration struct { DatasetName *string `json:"datasetName,omitempty"` DatasetUUID *string `json:"datasetUUID,omitempty"` } -// FlockerVolumeSourceApplyConfiguration constructs an declarative configuration of the FlockerVolumeSource type for use with +// FlockerVolumeSourceApplyConfiguration constructs a declarative configuration of the FlockerVolumeSource type for use with // apply. func FlockerVolumeSource() *FlockerVolumeSourceApplyConfiguration { return &FlockerVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go index 0869d3eaa..56c4d03fa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GCEPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the GCEPersistentDiskVolumeSource type for use +// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use // with apply. type GCEPersistentDiskVolumeSourceApplyConfiguration struct { PDName *string `json:"pdName,omitempty"` @@ -27,7 +27,7 @@ type GCEPersistentDiskVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GCEPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the GCEPersistentDiskVolumeSource type for use with +// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with // apply. func GCEPersistentDiskVolumeSource() *GCEPersistentDiskVolumeSourceApplyConfiguration { return &GCEPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go index 825e02e4e..4ed92317c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GitRepoVolumeSourceApplyConfiguration represents an declarative configuration of the GitRepoVolumeSource type for use +// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use // with apply. type GitRepoVolumeSourceApplyConfiguration struct { Repository *string `json:"repository,omitempty"` @@ -26,7 +26,7 @@ type GitRepoVolumeSourceApplyConfiguration struct { Directory *string `json:"directory,omitempty"` } -// GitRepoVolumeSourceApplyConfiguration constructs an declarative configuration of the GitRepoVolumeSource type for use with +// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with // apply. func GitRepoVolumeSource() *GitRepoVolumeSourceApplyConfiguration { return &GitRepoVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go index 21a3925e5..c9a23ca5d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsPersistentVolumeSource type for use +// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use // with apply. type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -27,7 +27,7 @@ type GlusterfsPersistentVolumeSourceApplyConfiguration struct { EndpointsNamespace *string `json:"endpointsNamespace,omitempty"` } -// GlusterfsPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsPersistentVolumeSource type for use with +// GlusterfsPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsPersistentVolumeSource type for use with // apply. func GlusterfsPersistentVolumeSource() *GlusterfsPersistentVolumeSourceApplyConfiguration { return &GlusterfsPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go index 7ce6f0b39..8c27f8c70 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// GlusterfsVolumeSourceApplyConfiguration represents an declarative configuration of the GlusterfsVolumeSource type for use +// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use // with apply. type GlusterfsVolumeSourceApplyConfiguration struct { EndpointsName *string `json:"endpoints,omitempty"` @@ -26,7 +26,7 @@ type GlusterfsVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// GlusterfsVolumeSourceApplyConfiguration constructs an declarative configuration of the GlusterfsVolumeSource type for use with +// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with // apply. func GlusterfsVolumeSource() *GlusterfsVolumeSourceApplyConfiguration { return &GlusterfsVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go index f94e55937..0f3a88671 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// GRPCActionApplyConfiguration represents an declarative configuration of the GRPCAction type for use +// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use // with apply. type GRPCActionApplyConfiguration struct { Port *int32 `json:"port,omitempty"` Service *string `json:"service,omitempty"` } -// GRPCActionApplyConfiguration constructs an declarative configuration of the GRPCAction type for use with +// GRPCActionApplyConfiguration constructs a declarative configuration of the GRPCAction type for use with // apply. func GRPCAction() *GRPCActionApplyConfiguration { return &GRPCActionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go index 861508ef5..ec9ea1741 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HostAliasApplyConfiguration represents an declarative configuration of the HostAlias type for use +// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use // with apply. type HostAliasApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostnames []string `json:"hostnames,omitempty"` } -// HostAliasApplyConfiguration constructs an declarative configuration of the HostAlias type for use with +// HostAliasApplyConfiguration constructs a declarative configuration of the HostAlias type for use with // apply. func HostAlias() *HostAliasApplyConfiguration { return &HostAliasApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go index c2a42cf74..439b5ce2d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use +// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use // with apply. type HostIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with +// HostIPApplyConfiguration constructs a declarative configuration of the HostIP type for use with // apply. func HostIP() *HostIPApplyConfiguration { return &HostIPApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go index 8b15689ee..6a41d67cd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// HostPathVolumeSourceApplyConfiguration represents an declarative configuration of the HostPathVolumeSource type for use +// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use // with apply. type HostPathVolumeSourceApplyConfiguration struct { - Path *string `json:"path,omitempty"` - Type *v1.HostPathType `json:"type,omitempty"` + Path *string `json:"path,omitempty"` + Type *corev1.HostPathType `json:"type,omitempty"` } -// HostPathVolumeSourceApplyConfiguration constructs an declarative configuration of the HostPathVolumeSource type for use with +// HostPathVolumeSourceApplyConfiguration constructs a declarative configuration of the HostPathVolumeSource type for use with // apply. func HostPathVolumeSource() *HostPathVolumeSourceApplyConfiguration { return &HostPathVolumeSourceApplyConfiguration{} @@ -46,7 +46,7 @@ func (b *HostPathVolumeSourceApplyConfiguration) WithPath(value string) *HostPat // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *HostPathVolumeSourceApplyConfiguration) WithType(value v1.HostPathType) *HostPathVolumeSourceApplyConfiguration { +func (b *HostPathVolumeSourceApplyConfiguration) WithType(value corev1.HostPathType) *HostPathVolumeSourceApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go index e4ecdd430..ca61c5ae2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// HTTPGetActionApplyConfiguration represents an declarative configuration of the HTTPGetAction type for use +// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use // with apply. type HTTPGetActionApplyConfiguration struct { Path *string `json:"path,omitempty"` Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` - Scheme *v1.URIScheme `json:"scheme,omitempty"` + Scheme *corev1.URIScheme `json:"scheme,omitempty"` HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"` } -// HTTPGetActionApplyConfiguration constructs an declarative configuration of the HTTPGetAction type for use with +// HTTPGetActionApplyConfiguration constructs a declarative configuration of the HTTPGetAction type for use with // apply. func HTTPGetAction() *HTTPGetActionApplyConfiguration { return &HTTPGetActionApplyConfiguration{} @@ -66,7 +66,7 @@ func (b *HTTPGetActionApplyConfiguration) WithHost(value string) *HTTPGetActionA // WithScheme sets the Scheme field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scheme field is set to the value of the last call. -func (b *HTTPGetActionApplyConfiguration) WithScheme(value v1.URIScheme) *HTTPGetActionApplyConfiguration { +func (b *HTTPGetActionApplyConfiguration) WithScheme(value corev1.URIScheme) *HTTPGetActionApplyConfiguration { b.Scheme = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go index d55f36bfd..252637166 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// HTTPHeaderApplyConfiguration represents an declarative configuration of the HTTPHeader type for use +// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use // with apply. type HTTPHeaderApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// HTTPHeaderApplyConfiguration constructs an declarative configuration of the HTTPHeader type for use with +// HTTPHeaderApplyConfiguration constructs a declarative configuration of the HTTPHeader type for use with // apply. func HTTPHeader() *HTTPHeaderApplyConfiguration { return &HTTPHeaderApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go new file mode 100644 index 000000000..9a146e685 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use +// with apply. +type ImageVolumeSourceApplyConfiguration struct { + Reference *string `json:"reference,omitempty"` + PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty"` +} + +// ImageVolumeSourceApplyConfiguration constructs a declarative configuration of the ImageVolumeSource type for use with +// apply. +func ImageVolumeSource() *ImageVolumeSourceApplyConfiguration { + return &ImageVolumeSourceApplyConfiguration{} +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithReference(value string) *ImageVolumeSourceApplyConfiguration { + b.Reference = &value + return b +} + +// WithPullPolicy sets the PullPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullPolicy field is set to the value of the last call. +func (b *ImageVolumeSourceApplyConfiguration) WithPullPolicy(value corev1.PullPolicy) *ImageVolumeSourceApplyConfiguration { + b.PullPolicy = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go index c7b248181..42f420c56 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIPersistentVolumeSource type for use +// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use // with apply. type ISCSIPersistentVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIPersistentVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIPersistentVolumeSource type for use with +// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with // apply. func ISCSIPersistentVolumeSource() *ISCSIPersistentVolumeSourceApplyConfiguration { return &ISCSIPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go index c95941a9c..61055434b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ISCSIVolumeSourceApplyConfiguration represents an declarative configuration of the ISCSIVolumeSource type for use +// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use // with apply. type ISCSIVolumeSourceApplyConfiguration struct { TargetPortal *string `json:"targetPortal,omitempty"` @@ -34,7 +34,7 @@ type ISCSIVolumeSourceApplyConfiguration struct { InitiatorName *string `json:"initiatorName,omitempty"` } -// ISCSIVolumeSourceApplyConfiguration constructs an declarative configuration of the ISCSIVolumeSource type for use with +// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with // apply. func ISCSIVolumeSource() *ISCSIVolumeSourceApplyConfiguration { return &ISCSIVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go index d58676d34..c961b0795 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// KeyToPathApplyConfiguration represents an declarative configuration of the KeyToPath type for use +// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use // with apply. type KeyToPathApplyConfiguration struct { Key *string `json:"key,omitempty"` @@ -26,7 +26,7 @@ type KeyToPathApplyConfiguration struct { Mode *int32 `json:"mode,omitempty"` } -// KeyToPathApplyConfiguration constructs an declarative configuration of the KeyToPath type for use with +// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with // apply. func KeyToPath() *KeyToPathApplyConfiguration { return &KeyToPathApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go index db9abf8af..e37a30f59 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LifecycleApplyConfiguration represents an declarative configuration of the Lifecycle type for use +// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use // with apply. type LifecycleApplyConfiguration struct { PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"` PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"` } -// LifecycleApplyConfiguration constructs an declarative configuration of the Lifecycle type for use with +// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with // apply. func Lifecycle() *LifecycleApplyConfiguration { return &LifecycleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index e4ae9c49f..b7c706d58 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LifecycleHandlerApplyConfiguration represents an declarative configuration of the LifecycleHandler type for use +// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use // with apply. type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type LifecycleHandlerApplyConfiguration struct { Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } -// LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with +// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with // apply. func LifecycleHandler() *LifecycleHandlerApplyConfiguration { return &LifecycleHandlerApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go index eaf635c76..517cc4cd3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// LimitRangeApplyConfiguration represents an declarative configuration of the LimitRange type for use +// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use // with apply. type LimitRangeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"` } -// LimitRange constructs an declarative configuration of the LimitRange type for use with +// LimitRange constructs a declarative configuration of the LimitRange type for use with // apply. func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { b := &LimitRangeApplyConfiguration{} @@ -57,18 +57,18 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { +func ExtractLimitRange(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { return extractLimitRange(limitRange, fieldManager, "") } // ExtractLimitRangeStatus is the same as ExtractLimitRange except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractLimitRangeStatus(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { +func ExtractLimitRangeStatus(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) { return extractLimitRange(limitRange, fieldManager, "status") } -func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) { +func extractLimitRange(limitRange *corev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) { b := &LimitRangeApplyConfiguration{} err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *LimitRangeApplyConfiguration) WithKind(value string) *LimitRangeApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *LimitRangeApplyConfiguration) WithAPIVersion(value string) *LimitRangeA // If called multiple times, the Name field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *LimitRangeApplyConfiguration) WithName(value string) *LimitRangeApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *LimitRangeApplyConfiguration) WithGenerateName(value string) *LimitRang // If called multiple times, the Namespace field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *LimitRangeApplyConfiguration) WithNamespace(value string) *LimitRangeAp // If called multiple times, the UID field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *LimitRangeApplyConfiguration) WithUID(value types.UID) *LimitRangeApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *LimitRangeApplyConfiguration) WithResourceVersion(value string) *LimitR // If called multiple times, the Generation field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithGeneration(value int64) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *LimitRangeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *LimitRangeApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *LimitRangeApplyConfiguration) WithLabels(entries map[string]string) *Li // overwriting an existing map entries in Annotations field with the same key. func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *LimitRangeApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration { +func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *LimitRangeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *LimitRangeApplyConfiguration) WithFinalizers(values ...string) *LimitRangeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *LimitRangeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -247,3 +247,9 @@ func (b *LimitRangeApplyConfiguration) WithSpec(value *LimitRangeSpecApplyConfig b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *LimitRangeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go index 084650fda..5ad8ac0e6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// LimitRangeItemApplyConfiguration represents an declarative configuration of the LimitRangeItem type for use +// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use // with apply. type LimitRangeItemApplyConfiguration struct { - Type *v1.LimitType `json:"type,omitempty"` - Max *v1.ResourceList `json:"max,omitempty"` - Min *v1.ResourceList `json:"min,omitempty"` - Default *v1.ResourceList `json:"default,omitempty"` - DefaultRequest *v1.ResourceList `json:"defaultRequest,omitempty"` - MaxLimitRequestRatio *v1.ResourceList `json:"maxLimitRequestRatio,omitempty"` + Type *corev1.LimitType `json:"type,omitempty"` + Max *corev1.ResourceList `json:"max,omitempty"` + Min *corev1.ResourceList `json:"min,omitempty"` + Default *corev1.ResourceList `json:"default,omitempty"` + DefaultRequest *corev1.ResourceList `json:"defaultRequest,omitempty"` + MaxLimitRequestRatio *corev1.ResourceList `json:"maxLimitRequestRatio,omitempty"` } -// LimitRangeItemApplyConfiguration constructs an declarative configuration of the LimitRangeItem type for use with +// LimitRangeItemApplyConfiguration constructs a declarative configuration of the LimitRangeItem type for use with // apply. func LimitRangeItem() *LimitRangeItemApplyConfiguration { return &LimitRangeItemApplyConfiguration{} @@ -42,7 +42,7 @@ func LimitRangeItem() *LimitRangeItemApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithType(value corev1.LimitType) *LimitRangeItemApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *LimitRangeItemApplyConfiguration) WithType(value v1.LimitType) *LimitRa // WithMax sets the Max field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Max field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMax(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Max = &value return b } @@ -58,7 +58,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMax(value v1.ResourceList) *Limit // WithMin sets the Min field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Min field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMin(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Min = &value return b } @@ -66,7 +66,7 @@ func (b *LimitRangeItemApplyConfiguration) WithMin(value v1.ResourceList) *Limit // WithDefault sets the Default field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Default field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithDefault(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.Default = &value return b } @@ -74,7 +74,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefault(value v1.ResourceList) *L // WithDefaultRequest sets the DefaultRequest field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DefaultRequest field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.DefaultRequest = &value return b } @@ -82,7 +82,7 @@ func (b *LimitRangeItemApplyConfiguration) WithDefaultRequest(value v1.ResourceL // WithMaxLimitRequestRatio sets the MaxLimitRequestRatio field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MaxLimitRequestRatio field is set to the value of the last call. -func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value v1.ResourceList) *LimitRangeItemApplyConfiguration { +func (b *LimitRangeItemApplyConfiguration) WithMaxLimitRequestRatio(value corev1.ResourceList) *LimitRangeItemApplyConfiguration { b.MaxLimitRequestRatio = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go index 5eee5c498..8d69c1c0c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LimitRangeSpecApplyConfiguration represents an declarative configuration of the LimitRangeSpec type for use +// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use // with apply. type LimitRangeSpecApplyConfiguration struct { Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"` } -// LimitRangeSpecApplyConfiguration constructs an declarative configuration of the LimitRangeSpec type for use with +// LimitRangeSpecApplyConfiguration constructs a declarative configuration of the LimitRangeSpec type for use with // apply. func LimitRangeSpec() *LimitRangeSpecApplyConfiguration { return &LimitRangeSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go new file mode 100644 index 000000000..fbab4815a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use +// with apply. +type LinuxContainerUserApplyConfiguration struct { + UID *int64 `json:"uid,omitempty"` + GID *int64 `json:"gid,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` +} + +// LinuxContainerUserApplyConfiguration constructs a declarative configuration of the LinuxContainerUser type for use with +// apply. +func LinuxContainerUser() *LinuxContainerUserApplyConfiguration { + return &LinuxContainerUserApplyConfiguration{} +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithUID(value int64) *LinuxContainerUserApplyConfiguration { + b.UID = &value + return b +} + +// WithGID sets the GID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GID field is set to the value of the last call. +func (b *LinuxContainerUserApplyConfiguration) WithGID(value int64) *LinuxContainerUserApplyConfiguration { + b.GID = &value + return b +} + +// WithSupplementalGroups adds the given value to the SupplementalGroups field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the SupplementalGroups field. +func (b *LinuxContainerUserApplyConfiguration) WithSupplementalGroups(values ...int64) *LinuxContainerUserApplyConfiguration { + for i := range values { + b.SupplementalGroups = append(b.SupplementalGroups, values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index a48dac681..ae5c410a2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use +// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` - IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` + IPMode *corev1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } -// LoadBalancerIngressApplyConfiguration constructs an declarative configuration of the LoadBalancerIngress type for use with +// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with // apply. func LoadBalancerIngress() *LoadBalancerIngressApplyConfiguration { return &LoadBalancerIngressApplyConfiguration{} @@ -56,7 +56,7 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load // WithIPMode sets the IPMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the IPMode field is set to the value of the last call. -func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { +func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value corev1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { b.IPMode = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go index 2fcc0cad1..bb3d616c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LoadBalancerStatusApplyConfiguration represents an declarative configuration of the LoadBalancerStatus type for use +// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use // with apply. type LoadBalancerStatusApplyConfiguration struct { Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// LoadBalancerStatusApplyConfiguration constructs an declarative configuration of the LoadBalancerStatus type for use with +// LoadBalancerStatusApplyConfiguration constructs a declarative configuration of the LoadBalancerStatus type for use with // apply. func LoadBalancerStatus() *LoadBalancerStatusApplyConfiguration { return &LoadBalancerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go index 7662e32b3..c55d6803d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// LocalObjectReferenceApplyConfiguration represents an declarative configuration of the LocalObjectReference type for use +// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use // with apply. type LocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// LocalObjectReferenceApplyConfiguration constructs an declarative configuration of the LocalObjectReference type for use with +// LocalObjectReferenceApplyConfiguration constructs a declarative configuration of the LocalObjectReference type for use with // apply. func LocalObjectReference() *LocalObjectReferenceApplyConfiguration { return &LocalObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go index 5d289bd12..db711d993 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LocalVolumeSourceApplyConfiguration represents an declarative configuration of the LocalVolumeSource type for use +// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use // with apply. type LocalVolumeSourceApplyConfiguration struct { Path *string `json:"path,omitempty"` FSType *string `json:"fsType,omitempty"` } -// LocalVolumeSourceApplyConfiguration constructs an declarative configuration of the LocalVolumeSource type for use with +// LocalVolumeSourceApplyConfiguration constructs a declarative configuration of the LocalVolumeSource type for use with // apply. func LocalVolumeSource() *LocalVolumeSourceApplyConfiguration { return &LocalVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go index 4ff1d040c..9a1a6af2a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use +// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use // with apply. type ModifyVolumeStatusApplyConfiguration struct { - TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` - Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` + TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` + Status *corev1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` } -// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with +// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with // apply. func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { return &ModifyVolumeStatusApplyConfiguration{} @@ -46,7 +46,7 @@ func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassNa // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { +func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value corev1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go index bdc9ef167..0aba283ce 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NamespaceApplyConfiguration represents an declarative configuration of the Namespace type for use +// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use // with apply. type NamespaceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"` - Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"` + Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"` } -// Namespace constructs an declarative configuration of the Namespace type for use with +// Namespace constructs a declarative configuration of the Namespace type for use with // apply. func Namespace(name string) *NamespaceApplyConfiguration { b := &NamespaceApplyConfiguration{} @@ -57,18 +57,18 @@ func Namespace(name string) *NamespaceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNamespace(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { +func ExtractNamespace(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { return extractNamespace(namespace, fieldManager, "") } // ExtractNamespaceStatus is the same as ExtractNamespace except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNamespaceStatus(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { +func ExtractNamespaceStatus(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) { return extractNamespace(namespace, fieldManager, "status") } -func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) { +func extractNamespace(namespace *corev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) { b := &NamespaceApplyConfiguration{} err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *NamespaceApplyConfiguration) WithKind(value string) *NamespaceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *NamespaceApplyConfiguration) WithAPIVersion(value string) *NamespaceApp // If called multiple times, the Name field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *NamespaceApplyConfiguration) WithName(value string) *NamespaceApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *NamespaceApplyConfiguration) WithGenerateName(value string) *NamespaceA // If called multiple times, the Namespace field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *NamespaceApplyConfiguration) WithNamespace(value string) *NamespaceAppl // If called multiple times, the UID field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *NamespaceApplyConfiguration) WithUID(value types.UID) *NamespaceApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *NamespaceApplyConfiguration) WithResourceVersion(value string) *Namespa // If called multiple times, the Generation field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithGeneration(value int64) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *NamespaceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *NamespaceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *NamespaceApplyConfiguration) WithLabels(entries map[string]string) *Nam // overwriting an existing map entries in Annotations field with the same key. func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *NamespaceApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration { +func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *NamespaceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *NamespaceApplyConfiguration) WithFinalizers(values ...string) *NamespaceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NamespaceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *NamespaceApplyConfiguration) WithStatus(value *NamespaceStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NamespaceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go index 8651978b0..82b4cc1ca 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NamespaceConditionApplyConfiguration represents an declarative configuration of the NamespaceCondition type for use +// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use // with apply. type NamespaceConditionApplyConfiguration struct { - Type *v1.NamespaceConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.NamespaceConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// NamespaceConditionApplyConfiguration constructs an declarative configuration of the NamespaceCondition type for use with +// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with // apply. func NamespaceCondition() *NamespaceConditionApplyConfiguration { return &NamespaceConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func NamespaceCondition() *NamespaceConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceConditionType) *NamespaceConditionApplyConfiguration { +func (b *NamespaceConditionApplyConfiguration) WithType(value corev1.NamespaceConditionType) *NamespaceConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *NamespaceConditionApplyConfiguration) WithType(value v1.NamespaceCondit // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *NamespaceConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NamespaceConditionApplyConfiguration { +func (b *NamespaceConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NamespaceConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go index 9bc02d1fa..1f8fcaf9a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// NamespaceSpecApplyConfiguration represents an declarative configuration of the NamespaceSpec type for use +// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use // with apply. type NamespaceSpecApplyConfiguration struct { - Finalizers []v1.FinalizerName `json:"finalizers,omitempty"` + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty"` } -// NamespaceSpecApplyConfiguration constructs an declarative configuration of the NamespaceSpec type for use with +// NamespaceSpecApplyConfiguration constructs a declarative configuration of the NamespaceSpec type for use with // apply. func NamespaceSpec() *NamespaceSpecApplyConfiguration { return &NamespaceSpecApplyConfiguration{} @@ -37,7 +37,7 @@ func NamespaceSpec() *NamespaceSpecApplyConfiguration { // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...v1.FinalizerName) *NamespaceSpecApplyConfiguration { +func (b *NamespaceSpecApplyConfiguration) WithFinalizers(values ...corev1.FinalizerName) *NamespaceSpecApplyConfiguration { for i := range values { b.Finalizers = append(b.Finalizers, values[i]) } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go index d950fd316..1484be684 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// NamespaceStatusApplyConfiguration represents an declarative configuration of the NamespaceStatus type for use +// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use // with apply. type NamespaceStatusApplyConfiguration struct { - Phase *v1.NamespacePhase `json:"phase,omitempty"` + Phase *corev1.NamespacePhase `json:"phase,omitempty"` Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"` } -// NamespaceStatusApplyConfiguration constructs an declarative configuration of the NamespaceStatus type for use with +// NamespaceStatusApplyConfiguration constructs a declarative configuration of the NamespaceStatus type for use with // apply. func NamespaceStatus() *NamespaceStatusApplyConfiguration { return &NamespaceStatusApplyConfiguration{} @@ -38,7 +38,7 @@ func NamespaceStatus() *NamespaceStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *NamespaceStatusApplyConfiguration) WithPhase(value v1.NamespacePhase) *NamespaceStatusApplyConfiguration { +func (b *NamespaceStatusApplyConfiguration) WithPhase(value corev1.NamespacePhase) *NamespaceStatusApplyConfiguration { b.Phase = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go index cb300ee81..ed49a87a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NFSVolumeSourceApplyConfiguration represents an declarative configuration of the NFSVolumeSource type for use +// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use // with apply. type NFSVolumeSourceApplyConfiguration struct { Server *string `json:"server,omitempty"` @@ -26,7 +26,7 @@ type NFSVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// NFSVolumeSourceApplyConfiguration constructs an declarative configuration of the NFSVolumeSource type for use with +// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with // apply. func NFSVolumeSource() *NFSVolumeSourceApplyConfiguration { return &NFSVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go index 047f4ac1c..d365047b7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NodeApplyConfiguration represents an declarative configuration of the Node type for use +// NodeApplyConfiguration represents a declarative configuration of the Node type for use // with apply. type NodeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` - Status *NodeStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"` + Status *NodeStatusApplyConfiguration `json:"status,omitempty"` } -// Node constructs an declarative configuration of the Node type for use with +// Node constructs a declarative configuration of the Node type for use with // apply. func Node(name string) *NodeApplyConfiguration { b := &NodeApplyConfiguration{} @@ -57,18 +57,18 @@ func Node(name string) *NodeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNode(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNode(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "") } // ExtractNodeStatus is the same as ExtractNode except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNodeStatus(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { +func ExtractNodeStatus(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) { return extractNode(node, fieldManager, "status") } -func extractNode(node *apicorev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { +func extractNode(node *corev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) { b := &NodeApplyConfiguration{} err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractNode(node *apicorev1.Node, fieldManager string, subresource string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *NodeApplyConfiguration) WithKind(value string) *NodeApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *NodeApplyConfiguration) WithAPIVersion(value string) *NodeApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *NodeApplyConfiguration) WithName(value string) *NodeApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *NodeApplyConfiguration) WithGenerateName(value string) *NodeApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *NodeApplyConfiguration) WithNamespace(value string) *NodeApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *NodeApplyConfiguration) WithUID(value types.UID) *NodeApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *NodeApplyConfiguration) WithResourceVersion(value string) *NodeApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *NodeApplyConfiguration) WithGeneration(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *NodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NodeA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *NodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *No // overwriting an existing map entries in Labels field with the same key. func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *NodeApplyConfiguration) WithLabels(entries map[string]string) *NodeAppl // overwriting an existing map entries in Annotations field with the same key. func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *NodeApplyConfiguration) WithAnnotations(entries map[string]string) *Nod // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { +func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *NodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *NodeApplyConfiguration) WithFinalizers(values ...string) *NodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *NodeApplyConfiguration) WithStatus(value *NodeStatusApplyConfiguration) b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go index a1d4fbe04..779fe0e2f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// NodeAddressApplyConfiguration represents an declarative configuration of the NodeAddress type for use +// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use // with apply. type NodeAddressApplyConfiguration struct { - Type *v1.NodeAddressType `json:"type,omitempty"` - Address *string `json:"address,omitempty"` + Type *corev1.NodeAddressType `json:"type,omitempty"` + Address *string `json:"address,omitempty"` } -// NodeAddressApplyConfiguration constructs an declarative configuration of the NodeAddress type for use with +// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with // apply. func NodeAddress() *NodeAddressApplyConfiguration { return &NodeAddressApplyConfiguration{} @@ -38,7 +38,7 @@ func NodeAddress() *NodeAddressApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeAddressApplyConfiguration) WithType(value v1.NodeAddressType) *NodeAddressApplyConfiguration { +func (b *NodeAddressApplyConfiguration) WithType(value corev1.NodeAddressType) *NodeAddressApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go index e28ced6e4..5d11d746d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeAffinityApplyConfiguration represents an declarative configuration of the NodeAffinity type for use +// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use // with apply. type NodeAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// NodeAffinityApplyConfiguration constructs an declarative configuration of the NodeAffinity type for use with +// NodeAffinityApplyConfiguration constructs a declarative configuration of the NodeAffinity type for use with // apply. func NodeAffinity() *NodeAffinityApplyConfiguration { return &NodeAffinityApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go index eb81ca543..e3a2d3bb0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// NodeConditionApplyConfiguration represents an declarative configuration of the NodeCondition type for use +// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use // with apply. type NodeConditionApplyConfiguration struct { - Type *v1.NodeConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.NodeConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// NodeConditionApplyConfiguration constructs an declarative configuration of the NodeCondition type for use with +// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with // apply. func NodeCondition() *NodeConditionApplyConfiguration { return &NodeConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func NodeCondition() *NodeConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) *NodeConditionApplyConfiguration { +func (b *NodeConditionApplyConfiguration) WithType(value corev1.NodeConditionType) *NodeConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *NodeConditionApplyConfiguration) WithType(value v1.NodeConditionType) * // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *NodeConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *NodeConditionApplyConfiguration { +func (b *NodeConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *NodeConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go index 60567aa43..00a671fc0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeConfigSourceApplyConfiguration represents an declarative configuration of the NodeConfigSource type for use +// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use // with apply. type NodeConfigSourceApplyConfiguration struct { ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"` } -// NodeConfigSourceApplyConfiguration constructs an declarative configuration of the NodeConfigSource type for use with +// NodeConfigSourceApplyConfiguration constructs a declarative configuration of the NodeConfigSource type for use with // apply. func NodeConfigSource() *NodeConfigSourceApplyConfiguration { return &NodeConfigSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go index 71447fe9c..d5ccc45c6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeConfigStatusApplyConfiguration represents an declarative configuration of the NodeConfigStatus type for use +// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use // with apply. type NodeConfigStatusApplyConfiguration struct { Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"` @@ -27,7 +27,7 @@ type NodeConfigStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// NodeConfigStatusApplyConfiguration constructs an declarative configuration of the NodeConfigStatus type for use with +// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with // apply. func NodeConfigStatus() *NodeConfigStatusApplyConfiguration { return &NodeConfigStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go index 4cabc7f52..11228b369 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeDaemonEndpointsApplyConfiguration represents an declarative configuration of the NodeDaemonEndpoints type for use +// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use // with apply. type NodeDaemonEndpointsApplyConfiguration struct { KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"` } -// NodeDaemonEndpointsApplyConfiguration constructs an declarative configuration of the NodeDaemonEndpoints type for use with +// NodeDaemonEndpointsApplyConfiguration constructs a declarative configuration of the NodeDaemonEndpoints type for use with // apply. func NodeDaemonEndpoints() *NodeDaemonEndpointsApplyConfiguration { return &NodeDaemonEndpointsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go new file mode 100644 index 000000000..678b0e36d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use +// with apply. +type NodeFeaturesApplyConfiguration struct { + SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"` +} + +// NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with +// apply. +func NodeFeatures() *NodeFeaturesApplyConfiguration { + return &NodeFeaturesApplyConfiguration{} +} + +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go index 9ada0a18e..c7c664974 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeRuntimeHandlerApplyConfiguration represents an declarative configuration of the NodeRuntimeHandler type for use +// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use // with apply. type NodeRuntimeHandlerApplyConfiguration struct { Name *string `json:"name,omitempty"` Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeRuntimeHandlerApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandler type for use with +// NodeRuntimeHandlerApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandler type for use with // apply. func NodeRuntimeHandler() *NodeRuntimeHandlerApplyConfiguration { return &NodeRuntimeHandlerApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go index a3e3a52e8..a295b6096 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go @@ -18,13 +18,14 @@ limitations under the License. package v1 -// NodeRuntimeHandlerFeaturesApplyConfiguration represents an declarative configuration of the NodeRuntimeHandlerFeatures type for use +// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use // with apply. type NodeRuntimeHandlerFeaturesApplyConfiguration struct { RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"` + UserNamespaces *bool `json:"userNamespaces,omitempty"` } -// NodeRuntimeHandlerFeaturesApplyConfiguration constructs an declarative configuration of the NodeRuntimeHandlerFeatures type for use with +// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with // apply. func NodeRuntimeHandlerFeatures() *NodeRuntimeHandlerFeaturesApplyConfiguration { return &NodeRuntimeHandlerFeaturesApplyConfiguration{} @@ -37,3 +38,11 @@ func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithRecursiveReadOnlyMoun b.RecursiveReadOnlyMounts = &value return b } + +// WithUserNamespaces sets the UserNamespaces field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UserNamespaces field is set to the value of the last call. +func (b *NodeRuntimeHandlerFeaturesApplyConfiguration) WithUserNamespaces(value bool) *NodeRuntimeHandlerFeaturesApplyConfiguration { + b.UserNamespaces = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go index 5489097f5..6eab10979 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// NodeSelectorApplyConfiguration represents an declarative configuration of the NodeSelector type for use +// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use // with apply. type NodeSelectorApplyConfiguration struct { NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"` } -// NodeSelectorApplyConfiguration constructs an declarative configuration of the NodeSelector type for use with +// NodeSelectorApplyConfiguration constructs a declarative configuration of the NodeSelector type for use with // apply. func NodeSelector() *NodeSelectorApplyConfiguration { return &NodeSelectorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go index a6e43e607..4dcbc9a2e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// NodeSelectorRequirementApplyConfiguration represents an declarative configuration of the NodeSelectorRequirement type for use +// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use // with apply. type NodeSelectorRequirementApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.NodeSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + Key *string `json:"key,omitempty"` + Operator *corev1.NodeSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } -// NodeSelectorRequirementApplyConfiguration constructs an declarative configuration of the NodeSelectorRequirement type for use with +// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with // apply. func NodeSelectorRequirement() *NodeSelectorRequirementApplyConfiguration { return &NodeSelectorRequirementApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *NodeSelectorRequirementApplyConfiguration) WithKey(value string) *NodeS // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value v1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration { +func (b *NodeSelectorRequirementApplyConfiguration) WithOperator(value corev1.NodeSelectorOperator) *NodeSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go index 13b3ddbc1..9d0d780f3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NodeSelectorTermApplyConfiguration represents an declarative configuration of the NodeSelectorTerm type for use +// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use // with apply. type NodeSelectorTermApplyConfiguration struct { MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"` } -// NodeSelectorTermApplyConfiguration constructs an declarative configuration of the NodeSelectorTerm type for use with +// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with // apply. func NodeSelectorTerm() *NodeSelectorTermApplyConfiguration { return &NodeSelectorTermApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go index 63b61078d..8ac349712 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSpecApplyConfiguration represents an declarative configuration of the NodeSpec type for use +// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use // with apply. type NodeSpecApplyConfiguration struct { PodCIDR *string `json:"podCIDR,omitempty"` @@ -30,7 +30,7 @@ type NodeSpecApplyConfiguration struct { DoNotUseExternalID *string `json:"externalID,omitempty"` } -// NodeSpecApplyConfiguration constructs an declarative configuration of the NodeSpec type for use with +// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with // apply. func NodeSpec() *NodeSpecApplyConfiguration { return &NodeSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go index a4a30a268..3859ccd50 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go @@ -19,27 +19,28 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// NodeStatusApplyConfiguration represents an declarative configuration of the NodeStatus type for use +// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use // with apply. type NodeStatusApplyConfiguration struct { - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Allocatable *v1.ResourceList `json:"allocatable,omitempty"` - Phase *v1.NodePhase `json:"phase,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` + Allocatable *corev1.ResourceList `json:"allocatable,omitempty"` + Phase *corev1.NodePhase `json:"phase,omitempty"` Conditions []NodeConditionApplyConfiguration `json:"conditions,omitempty"` Addresses []NodeAddressApplyConfiguration `json:"addresses,omitempty"` DaemonEndpoints *NodeDaemonEndpointsApplyConfiguration `json:"daemonEndpoints,omitempty"` NodeInfo *NodeSystemInfoApplyConfiguration `json:"nodeInfo,omitempty"` Images []ContainerImageApplyConfiguration `json:"images,omitempty"` - VolumesInUse []v1.UniqueVolumeName `json:"volumesInUse,omitempty"` + VolumesInUse []corev1.UniqueVolumeName `json:"volumesInUse,omitempty"` VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"` Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"` RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` + Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"` } -// NodeStatusApplyConfiguration constructs an declarative configuration of the NodeStatus type for use with +// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with // apply. func NodeStatus() *NodeStatusApplyConfiguration { return &NodeStatusApplyConfiguration{} @@ -48,7 +49,7 @@ func NodeStatus() *NodeStatusApplyConfiguration { // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *NodeStatusApplyConfiguration { b.Capacity = &value return b } @@ -56,7 +57,7 @@ func (b *NodeStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *Node // WithAllocatable sets the Allocatable field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Allocatable field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithAllocatable(value corev1.ResourceList) *NodeStatusApplyConfiguration { b.Allocatable = &value return b } @@ -64,7 +65,7 @@ func (b *NodeStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *N // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *NodeStatusApplyConfiguration) WithPhase(value v1.NodePhase) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithPhase(value corev1.NodePhase) *NodeStatusApplyConfiguration { b.Phase = &value return b } @@ -127,7 +128,7 @@ func (b *NodeStatusApplyConfiguration) WithImages(values ...*ContainerImageApply // WithVolumesInUse adds the given value to the VolumesInUse field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumesInUse field. -func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...v1.UniqueVolumeName) *NodeStatusApplyConfiguration { +func (b *NodeStatusApplyConfiguration) WithVolumesInUse(values ...corev1.UniqueVolumeName) *NodeStatusApplyConfiguration { for i := range values { b.VolumesInUse = append(b.VolumesInUse, values[i]) } @@ -167,3 +168,11 @@ func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntim } return b } + +// WithFeatures sets the Features field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Features field is set to the value of the last call. +func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration { + b.Features = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go index 2634ea984..11ac50713 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// NodeSystemInfoApplyConfiguration represents an declarative configuration of the NodeSystemInfo type for use +// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use // with apply. type NodeSystemInfoApplyConfiguration struct { MachineID *string `json:"machineID,omitempty"` @@ -33,7 +33,7 @@ type NodeSystemInfoApplyConfiguration struct { Architecture *string `json:"architecture,omitempty"` } -// NodeSystemInfoApplyConfiguration constructs an declarative configuration of the NodeSystemInfo type for use with +// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with // apply. func NodeSystemInfo() *NodeSystemInfoApplyConfiguration { return &NodeSystemInfoApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go index 0c2402b3c..c129c998b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ObjectFieldSelectorApplyConfiguration represents an declarative configuration of the ObjectFieldSelector type for use +// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use // with apply. type ObjectFieldSelectorApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectFieldSelectorApplyConfiguration constructs an declarative configuration of the ObjectFieldSelector type for use with +// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with // apply. func ObjectFieldSelector() *ObjectFieldSelectorApplyConfiguration { return &ObjectFieldSelectorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go index 667fa84a8..4cd3f226e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// ObjectReferenceApplyConfiguration represents an declarative configuration of the ObjectReference type for use +// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use // with apply. type ObjectReferenceApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -34,7 +34,7 @@ type ObjectReferenceApplyConfiguration struct { FieldPath *string `json:"fieldPath,omitempty"` } -// ObjectReferenceApplyConfiguration constructs an declarative configuration of the ObjectReference type for use with +// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with // apply. func ObjectReference() *ObjectReferenceApplyConfiguration { return &ObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go index 2599c197e..6840c1c88 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeApplyConfiguration represents an declarative configuration of the PersistentVolume type for use +// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use // with apply. type PersistentVolumeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"` - Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"` + Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolume constructs an declarative configuration of the PersistentVolume type for use with +// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with // apply. func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { b := &PersistentVolumeApplyConfiguration{} @@ -57,18 +57,18 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { +func ExtractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { return extractPersistentVolume(persistentVolume, fieldManager, "") } // ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPersistentVolumeStatus(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { +func ExtractPersistentVolumeStatus(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) { return extractPersistentVolume(persistentVolume, fieldManager, "status") } -func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) { +func extractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) { b := &PersistentVolumeApplyConfiguration{} err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, field // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentVolumeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PersistentVolumeApplyConfiguration) WithKind(value string) *PersistentV // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PersistentVolumeApplyConfiguration) WithAPIVersion(value string) *Persi // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PersistentVolumeApplyConfiguration) WithName(value string) *PersistentV // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PersistentVolumeApplyConfiguration) WithGenerateName(value string) *Per // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PersistentVolumeApplyConfiguration) WithNamespace(value string) *Persis // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PersistentVolumeApplyConfiguration) WithUID(value types.UID) *Persisten // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *PersistentVolumeApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithGeneration(value int64) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PersistentVolumeApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PersistentVolumeApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *PersistentVolumeApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration { +func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *PersistentVolumeApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *PersistentVolumeApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *PersistentVolumeApplyConfiguration) WithStatus(value *PersistentVolumeS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go index a0a001701..93b8b69d4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimApplyConfiguration represents an declarative configuration of the PersistentVolumeClaim type for use +// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use // with apply. type PersistentVolumeClaimApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` - Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"` } -// PersistentVolumeClaim constructs an declarative configuration of the PersistentVolumeClaim type for use with +// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with // apply. func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyConfiguration { b := &PersistentVolumeClaimApplyConfiguration{} @@ -58,18 +58,18 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { +func ExtractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "") } // ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { +func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) { return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status") } -func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) { +func extractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) { b := &PersistentVolumeClaimApplyConfiguration{} err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *PersistentVolumeClaimApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithKind(value string) *Persis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) *PersistentVolumeClaimApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithName(value string) *Persis // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithNamespace(value string) *P // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithUID(value types.UID) *Pers // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration { +func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithOwnerReferences(values ... func (b *PersistentVolumeClaimApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *PersistentVolumeClaimApplyConfiguration) WithStatus(value *PersistentVo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go index 65449e92e..40025d533 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeClaimConditionApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimCondition type for use +// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use // with apply. type PersistentVolumeClaimConditionApplyConfiguration struct { - Type *v1.PersistentVolumeClaimConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.PersistentVolumeClaimConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PersistentVolumeClaimConditionApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimCondition type for use with +// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with // apply. func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfiguration { return &PersistentVolumeClaimConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func PersistentVolumeClaimCondition() *PersistentVolumeClaimConditionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration { +func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value corev1.PersistentVolumeClaimConditionType) *PersistentVolumeClaimConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimConditionApplyConfiguration) WithType(value v1.Per // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration { +func (b *PersistentVolumeClaimConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PersistentVolumeClaimConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index 4db12fadb..2c2be16b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -19,25 +19,25 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use +// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` VolumeName *string `json:"volumeName,omitempty"` StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with +// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with // apply. func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { return &PersistentVolumeClaimSpecApplyConfiguration{} @@ -46,7 +46,7 @@ func PersistentVolumeClaimSpec() *PersistentVolumeClaimSpecApplyConfiguration { // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimSpecApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -88,7 +88,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithStorageClassName(value // WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeMode field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeClaimSpecApplyConfiguration { b.VolumeMode = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 1f6d5ae32..6cea23a2c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use +// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` + Phase *corev1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` + AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[corev1.ResourceName]corev1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` } -// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with +// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with // apply. func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguration { return &PersistentVolumeClaimStatusApplyConfiguration{} @@ -44,7 +44,7 @@ func PersistentVolumeClaimStatus() *PersistentVolumeClaimStatusApplyConfiguratio // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumeClaimPhase) *PersistentVolumeClaimStatusApplyConfiguration { b.Phase = &value return b } @@ -52,7 +52,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithPhase(value v1.Persi // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeClaimStatusApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -62,7 +62,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAccessModes(values . // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { b.Capacity = &value return b } @@ -83,7 +83,7 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithConditions(values .. // WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AllocatedResources field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value v1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *PersistentVolumeClaimStatusApplyConfiguration { b.AllocatedResources = &value return b } @@ -92,9 +92,9 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field, // overwriting an existing map entries in AllocatedResourceStatuses field with the same key. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { +func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[corev1.ResourceName]corev1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration { if b.AllocatedResourceStatuses == nil && len(entries) > 0 { - b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries)) + b.AllocatedResourceStatuses = make(map[corev1.ResourceName]corev1.ClaimResourceStatus, len(entries)) } for k, v := range entries { b.AllocatedResourceStatuses[k] = v diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go index 894d04f0b..4db3cbf12 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PersistentVolumeClaimTemplateApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimTemplate type for use +// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use // with apply. type PersistentVolumeClaimTemplateApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"` } -// PersistentVolumeClaimTemplateApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimTemplate type for use with +// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with // apply. func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfiguration { return &PersistentVolumeClaimTemplateApplyConfiguration{} @@ -42,7 +42,7 @@ func PersistentVolumeClaimTemplate() *PersistentVolumeClaimTemplateApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithName(value string) // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGenerateName(value // If called multiple times, the Namespace field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithNamespace(value st // If called multiple times, the UID field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UID) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithUID(value types.UI // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(value string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithResourceVersion(va // If called multiple times, the Generation field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithGeneration(value int64) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionTimestamp( // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithDeletionGracePerio // overwriting an existing map entries in Labels field with the same key. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithLabels(entries map // overwriting an existing map entries in Annotations field with the same key. func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithAnnotations(entrie // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration { +func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithOwnerReferences(va func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *PersistentVolumeClaimTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PersistentVolumeClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -186,3 +186,9 @@ func (b *PersistentVolumeClaimTemplateApplyConfiguration) WithSpec(value *Persis b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PersistentVolumeClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go index a498fa6a5..ccccdfb49 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PersistentVolumeClaimVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimVolumeSource type for use +// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use // with apply. type PersistentVolumeClaimVolumeSourceApplyConfiguration struct { ClaimName *string `json:"claimName,omitempty"` ReadOnly *bool `json:"readOnly,omitempty"` } -// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimVolumeSource type for use with +// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with // apply. func PersistentVolumeClaimVolumeSource() *PersistentVolumeClaimVolumeSourceApplyConfiguration { return &PersistentVolumeClaimVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go index 0576e7dd3..aba012462 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PersistentVolumeSourceApplyConfiguration represents an declarative configuration of the PersistentVolumeSource type for use +// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use // with apply. type PersistentVolumeSourceApplyConfiguration struct { GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"` @@ -45,7 +45,7 @@ type PersistentVolumeSourceApplyConfiguration struct { CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"` } -// PersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the PersistentVolumeSource type for use with +// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with // apply. func PersistentVolumeSource() *PersistentVolumeSourceApplyConfiguration { return &PersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index 8a30dab64..792e3b944 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -19,25 +19,25 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PersistentVolumeSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeSpec type for use +// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use // with apply. type PersistentVolumeSpecApplyConfiguration struct { - Capacity *v1.ResourceList `json:"capacity,omitempty"` + Capacity *corev1.ResourceList `json:"capacity,omitempty"` PersistentVolumeSourceApplyConfiguration `json:",inline"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` ClaimRef *ObjectReferenceApplyConfiguration `json:"claimRef,omitempty"` - PersistentVolumeReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` + PersistentVolumeReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` StorageClassName *string `json:"storageClassName,omitempty"` MountOptions []string `json:"mountOptions,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } -// PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with +// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with // apply. func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { return &PersistentVolumeSpecApplyConfiguration{} @@ -46,7 +46,7 @@ func PersistentVolumeSpec() *PersistentVolumeSpecApplyConfiguration { // WithCapacity sets the Capacity field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Capacity field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceList) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value corev1.ResourceList) *PersistentVolumeSpecApplyConfiguration { b.Capacity = &value return b } @@ -55,7 +55,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCapacity(value v1.ResourceL // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GCEPersistentDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.GCEPersistentDisk = value + b.PersistentVolumeSourceApplyConfiguration.GCEPersistentDisk = value return b } @@ -63,7 +63,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGCEPersistentDisk(value *GC // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AWSElasticBlockStore field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AWSElasticBlockStore = value + b.PersistentVolumeSourceApplyConfiguration.AWSElasticBlockStore = value return b } @@ -71,7 +71,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAWSElasticBlockStore(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HostPath field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.HostPath = value + b.PersistentVolumeSourceApplyConfiguration.HostPath = value return b } @@ -79,7 +79,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithHostPath(value *HostPathVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Glusterfs field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Glusterfs = value + b.PersistentVolumeSourceApplyConfiguration.Glusterfs = value return b } @@ -87,7 +87,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithGlusterfs(value *GlusterfsP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NFS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.NFS = value + b.PersistentVolumeSourceApplyConfiguration.NFS = value return b } @@ -95,7 +95,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNFS(value *NFSVolumeSourceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RBD field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.RBD = value + b.PersistentVolumeSourceApplyConfiguration.RBD = value return b } @@ -103,7 +103,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithRBD(value *RBDPersistentVol // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ISCSI field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.ISCSI = value + b.PersistentVolumeSourceApplyConfiguration.ISCSI = value return b } @@ -111,7 +111,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithISCSI(value *ISCSIPersisten // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Cinder field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Cinder = value + b.PersistentVolumeSourceApplyConfiguration.Cinder = value return b } @@ -119,7 +119,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCinder(value *CinderPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CephFS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.CephFS = value + b.PersistentVolumeSourceApplyConfiguration.CephFS = value return b } @@ -127,7 +127,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithCephFS(value *CephFSPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FC field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.FC = value + b.PersistentVolumeSourceApplyConfiguration.FC = value return b } @@ -135,7 +135,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFC(value *FCVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Flocker field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Flocker = value + b.PersistentVolumeSourceApplyConfiguration.Flocker = value return b } @@ -143,7 +143,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlocker(value *FlockerVolum // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FlexVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.FlexVolume = value + b.PersistentVolumeSourceApplyConfiguration.FlexVolume = value return b } @@ -151,7 +151,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithFlexVolume(value *FlexPersi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureFile field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFilePersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AzureFile = value + b.PersistentVolumeSourceApplyConfiguration.AzureFile = value return b } @@ -159,7 +159,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureFile(value *AzureFileP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VsphereVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.VsphereVolume = value + b.PersistentVolumeSourceApplyConfiguration.VsphereVolume = value return b } @@ -167,7 +167,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithVsphereVolume(value *Vspher // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Quobyte field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Quobyte = value + b.PersistentVolumeSourceApplyConfiguration.Quobyte = value return b } @@ -175,7 +175,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithQuobyte(value *QuobyteVolum // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.AzureDisk = value + b.PersistentVolumeSourceApplyConfiguration.AzureDisk = value return b } @@ -183,7 +183,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithAzureDisk(value *AzureDiskV // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PhotonPersistentDisk field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.PhotonPersistentDisk = value + b.PersistentVolumeSourceApplyConfiguration.PhotonPersistentDisk = value return b } @@ -191,7 +191,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPhotonPersistentDisk(value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PortworxVolume field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.PortworxVolume = value + b.PersistentVolumeSourceApplyConfiguration.PortworxVolume = value return b } @@ -199,7 +199,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithPortworxVolume(value *Portw // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScaleIO field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.ScaleIO = value + b.PersistentVolumeSourceApplyConfiguration.ScaleIO = value return b } @@ -207,7 +207,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithScaleIO(value *ScaleIOPersi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Local field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.Local = value + b.PersistentVolumeSourceApplyConfiguration.Local = value return b } @@ -215,7 +215,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithLocal(value *LocalVolumeSou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StorageOS field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.StorageOS = value + b.PersistentVolumeSourceApplyConfiguration.StorageOS = value return b } @@ -223,14 +223,14 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithStorageOS(value *StorageOSP // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CSI field is set to the value of the last call. func (b *PersistentVolumeSpecApplyConfiguration) WithCSI(value *CSIPersistentVolumeSourceApplyConfiguration) *PersistentVolumeSpecApplyConfiguration { - b.CSI = value + b.PersistentVolumeSourceApplyConfiguration.CSI = value return b } // WithAccessModes adds the given value to the AccessModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the AccessModes field. -func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...v1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithAccessModes(values ...corev1.PersistentVolumeAccessMode) *PersistentVolumeSpecApplyConfiguration { for i := range values { b.AccessModes = append(b.AccessModes, values[i]) } @@ -248,7 +248,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithClaimRef(value *ObjectRefer // WithPersistentVolumeReclaimPolicy sets the PersistentVolumeReclaimPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PersistentVolumeReclaimPolicy field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value v1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithPersistentVolumeReclaimPolicy(value corev1.PersistentVolumeReclaimPolicy) *PersistentVolumeSpecApplyConfiguration { b.PersistentVolumeReclaimPolicy = &value return b } @@ -274,7 +274,7 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithMountOptions(values ...stri // WithVolumeMode sets the VolumeMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeMode field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value v1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration { +func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeMode(value corev1.PersistentVolumeMode) *PersistentVolumeSpecApplyConfiguration { b.VolumeMode = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go index a473c0e92..0bb077ae0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use +// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use // with apply. type PersistentVolumeStatusApplyConfiguration struct { - Phase *v1.PersistentVolumePhase `json:"phase,omitempty"` - Message *string `json:"message,omitempty"` - Reason *string `json:"reason,omitempty"` - LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` + Phase *corev1.PersistentVolumePhase `json:"phase,omitempty"` + Message *string `json:"message,omitempty"` + Reason *string `json:"reason,omitempty"` + LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"` } -// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with +// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with // apply. func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { return &PersistentVolumeStatusApplyConfiguration{} @@ -41,7 +41,7 @@ func PersistentVolumeStatus() *PersistentVolumeStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value v1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration { +func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value corev1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration { b.Phase = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go index 43587d676..d8dc103e2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PhotonPersistentDiskVolumeSourceApplyConfiguration represents an declarative configuration of the PhotonPersistentDiskVolumeSource type for use +// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use // with apply. type PhotonPersistentDiskVolumeSourceApplyConfiguration struct { PdID *string `json:"pdID,omitempty"` FSType *string `json:"fsType,omitempty"` } -// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the PhotonPersistentDiskVolumeSource type for use with +// PhotonPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the PhotonPersistentDiskVolumeSource type for use with // apply. func PhotonPersistentDiskVolumeSource() *PhotonPersistentDiskVolumeSourceApplyConfiguration { return &PhotonPersistentDiskVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go index 7210bd983..29526709f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodApplyConfiguration represents an declarative configuration of the Pod type for use +// PodApplyConfiguration represents a declarative configuration of the Pod type for use // with apply. type PodApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodStatusApplyConfiguration `json:"status,omitempty"` } -// Pod constructs an declarative configuration of the Pod type for use with +// Pod constructs a declarative configuration of the Pod type for use with // apply. func Pod(name, namespace string) *PodApplyConfiguration { b := &PodApplyConfiguration{} @@ -58,18 +58,18 @@ func Pod(name, namespace string) *PodApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPod(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { +func ExtractPod(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { return extractPod(pod, fieldManager, "") } // ExtractPodStatus is the same as ExtractPod except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodStatus(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { +func ExtractPodStatus(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) { return extractPod(pod, fieldManager, "status") } -func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) { +func extractPod(pod *corev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) { b := &PodApplyConfiguration{} err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodApplyConfiguration) WithKind(value string) *PodApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodApplyConfiguration) WithAPIVersion(value string) *PodApplyConfigurat // If called multiple times, the Name field is set to the value of the last call. func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodApplyConfiguration) WithName(value string) *PodApplyConfiguration { // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodApplyConfiguration) WithGenerateName(value string) *PodApplyConfigur // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodApplyConfiguration) WithNamespace(value string) *PodApplyConfigurati // If called multiple times, the UID field is set to the value of the last call. func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodApplyConfiguration) WithUID(value types.UID) *PodApplyConfiguration // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PodApplyConfiguration) WithResourceVersion(value string) *PodApplyConfi // If called multiple times, the Generation field is set to the value of the last call. func (b *PodApplyConfiguration) WithGeneration(value int64) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodApp // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Pod // overwriting an existing map entries in Labels field with the same key. func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodApplyConfiguration) WithLabels(entries map[string]string) *PodApplyC // overwriting an existing map entries in Annotations field with the same key. func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PodApplyConfiguration) WithAnnotations(entries map[string]string) *PodA // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration { +func (b *PodApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PodApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReference func (b *PodApplyConfiguration) WithFinalizers(values ...string) *PodApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *PodApplyConfiguration) WithStatus(value *PodStatusApplyConfiguration) * b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go index 7049c6212..23fed9546 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAffinityApplyConfiguration represents an declarative configuration of the PodAffinity type for use +// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use // with apply. type PodAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAffinityApplyConfiguration constructs an declarative configuration of the PodAffinity type for use with +// PodAffinityApplyConfiguration constructs a declarative configuration of the PodAffinity type for use with // apply. func PodAffinity() *PodAffinityApplyConfiguration { return &PodAffinityApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index ac1eab3d8..1cc1ca0d0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodAffinityTermApplyConfiguration represents an declarative configuration of the PodAffinityTerm type for use +// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use // with apply. type PodAffinityTermApplyConfiguration struct { - LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - Namespaces []string `json:"namespaces,omitempty"` - TopologyKey *string `json:"topologyKey,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` - MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` + LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + TopologyKey *string `json:"topologyKey,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` + MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } -// PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with +// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with // apply. func PodAffinityTerm() *PodAffinityTermApplyConfiguration { return &PodAffinityTermApplyConfiguration{} @@ -42,7 +42,7 @@ func PodAffinityTerm() *PodAffinityTermApplyConfiguration { // WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LabelSelector field is set to the value of the last call. -func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { +func (b *PodAffinityTermApplyConfiguration) WithLabelSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { b.LabelSelector = value return b } @@ -68,7 +68,7 @@ func (b *PodAffinityTermApplyConfiguration) WithTopologyKey(value string) *PodAf // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { +func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *PodAffinityTermApplyConfiguration { b.NamespaceSelector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go index 42681c54c..ae9848963 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodAntiAffinityApplyConfiguration represents an declarative configuration of the PodAntiAffinity type for use +// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use // with apply. type PodAntiAffinityApplyConfiguration struct { RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// PodAntiAffinityApplyConfiguration constructs an declarative configuration of the PodAntiAffinity type for use with +// PodAntiAffinityApplyConfiguration constructs a declarative configuration of the PodAntiAffinity type for use with // apply. func PodAntiAffinity() *PodAntiAffinityApplyConfiguration { return &PodAntiAffinityApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go index 610209f3c..67cd1bd09 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodConditionApplyConfiguration represents an declarative configuration of the PodCondition type for use +// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use // with apply. type PodConditionApplyConfiguration struct { - Type *v1.PodConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.PodConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PodConditionApplyConfiguration constructs an declarative configuration of the PodCondition type for use with +// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with // apply. func PodCondition() *PodConditionApplyConfiguration { return &PodConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func PodCondition() *PodConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *PodConditionApplyConfiguration { +func (b *PodConditionApplyConfiguration) WithType(value corev1.PodConditionType) *PodConditionApplyConfiguration { b.Type = &value return b } @@ -51,7 +51,7 @@ func (b *PodConditionApplyConfiguration) WithType(value v1.PodConditionType) *Po // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PodConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PodConditionApplyConfiguration { +func (b *PodConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *PodConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go index 0fe6a0834..2e0ce9a91 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PodDNSConfigApplyConfiguration represents an declarative configuration of the PodDNSConfig type for use +// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use // with apply. type PodDNSConfigApplyConfiguration struct { Nameservers []string `json:"nameservers,omitempty"` @@ -26,7 +26,7 @@ type PodDNSConfigApplyConfiguration struct { Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"` } -// PodDNSConfigApplyConfiguration constructs an declarative configuration of the PodDNSConfig type for use with +// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with // apply. func PodDNSConfig() *PodDNSConfigApplyConfiguration { return &PodDNSConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go index 327bf803b..458b333bf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodDNSConfigOptionApplyConfiguration represents an declarative configuration of the PodDNSConfigOption type for use +// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use // with apply. type PodDNSConfigOptionApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// PodDNSConfigOptionApplyConfiguration constructs an declarative configuration of the PodDNSConfigOption type for use with +// PodDNSConfigOptionApplyConfiguration constructs a declarative configuration of the PodDNSConfigOption type for use with // apply. func PodDNSConfigOption() *PodDNSConfigOptionApplyConfiguration { return &PodDNSConfigOptionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go index 3c6e6b87a..73f089856 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodIPApplyConfiguration represents an declarative configuration of the PodIP type for use +// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use // with apply. type PodIPApplyConfiguration struct { IP *string `json:"ip,omitempty"` } -// PodIPApplyConfiguration constructs an declarative configuration of the PodIP type for use with +// PodIPApplyConfiguration constructs a declarative configuration of the PodIP type for use with // apply. func PodIP() *PodIPApplyConfiguration { return &PodIPApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go index a5315d636..22a745601 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PodOSApplyConfiguration represents an declarative configuration of the PodOS type for use +// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use // with apply. type PodOSApplyConfiguration struct { - Name *v1.OSName `json:"name,omitempty"` + Name *corev1.OSName `json:"name,omitempty"` } -// PodOSApplyConfiguration constructs an declarative configuration of the PodOS type for use with +// PodOSApplyConfiguration constructs a declarative configuration of the PodOS type for use with // apply. func PodOS() *PodOSApplyConfiguration { return &PodOSApplyConfiguration{} @@ -37,7 +37,7 @@ func PodOS() *PodOSApplyConfiguration { // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *PodOSApplyConfiguration) WithName(value v1.OSName) *PodOSApplyConfiguration { +func (b *PodOSApplyConfiguration) WithName(value corev1.OSName) *PodOSApplyConfiguration { b.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go index 9d3ad458a..4298b1ca6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PodReadinessGateApplyConfiguration represents an declarative configuration of the PodReadinessGate type for use +// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use // with apply. type PodReadinessGateApplyConfiguration struct { - ConditionType *v1.PodConditionType `json:"conditionType,omitempty"` + ConditionType *corev1.PodConditionType `json:"conditionType,omitempty"` } -// PodReadinessGateApplyConfiguration constructs an declarative configuration of the PodReadinessGate type for use with +// PodReadinessGateApplyConfiguration constructs a declarative configuration of the PodReadinessGate type for use with // apply. func PodReadinessGate() *PodReadinessGateApplyConfiguration { return &PodReadinessGateApplyConfiguration{} @@ -37,7 +37,7 @@ func PodReadinessGate() *PodReadinessGateApplyConfiguration { // WithConditionType sets the ConditionType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConditionType field is set to the value of the last call. -func (b *PodReadinessGateApplyConfiguration) WithConditionType(value v1.PodConditionType) *PodReadinessGateApplyConfiguration { +func (b *PodReadinessGateApplyConfiguration) WithConditionType(value corev1.PodConditionType) *PodReadinessGateApplyConfiguration { b.ConditionType = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go index 69b250d47..b0bd67fa1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go @@ -18,14 +18,15 @@ limitations under the License. package v1 -// PodResourceClaimApplyConfiguration represents an declarative configuration of the PodResourceClaim type for use +// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use // with apply. type PodResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Source *ClaimSourceApplyConfiguration `json:"source,omitempty"` + Name *string `json:"name,omitempty"` + ResourceClaimName *string `json:"resourceClaimName,omitempty"` + ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"` } -// PodResourceClaimApplyConfiguration constructs an declarative configuration of the PodResourceClaim type for use with +// PodResourceClaimApplyConfiguration constructs a declarative configuration of the PodResourceClaim type for use with // apply. func PodResourceClaim() *PodResourceClaimApplyConfiguration { return &PodResourceClaimApplyConfiguration{} @@ -39,10 +40,18 @@ func (b *PodResourceClaimApplyConfiguration) WithName(value string) *PodResource return b } -// WithSource sets the Source field in the declarative configuration to the given value +// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Source field is set to the value of the last call. -func (b *PodResourceClaimApplyConfiguration) WithSource(value *ClaimSourceApplyConfiguration) *PodResourceClaimApplyConfiguration { - b.Source = value +// If called multiple times, the ResourceClaimName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimName = &value + return b +} + +// WithResourceClaimTemplateName sets the ResourceClaimTemplateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceClaimTemplateName field is set to the value of the last call. +func (b *PodResourceClaimApplyConfiguration) WithResourceClaimTemplateName(value string) *PodResourceClaimApplyConfiguration { + b.ResourceClaimTemplateName = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go index ae79ca01b..f60ad4b05 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use +// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use // with apply. type PodResourceClaimStatusApplyConfiguration struct { Name *string `json:"name,omitempty"` ResourceClaimName *string `json:"resourceClaimName,omitempty"` } -// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with +// PodResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodResourceClaimStatus type for use with // apply. func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration { return &PodResourceClaimStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go index f7649c2e9..3d9109277 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PodSchedulingGateApplyConfiguration represents an declarative configuration of the PodSchedulingGate type for use +// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use // with apply. type PodSchedulingGateApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PodSchedulingGateApplyConfiguration constructs an declarative configuration of the PodSchedulingGate type for use with +// PodSchedulingGateApplyConfiguration constructs a declarative configuration of the PodSchedulingGate type for use with // apply. func PodSchedulingGate() *PodSchedulingGateApplyConfiguration { return &PodSchedulingGateApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go index 6b340294e..f0a3e662c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go @@ -22,23 +22,25 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSecurityContextApplyConfiguration represents an declarative configuration of the PodSecurityContext type for use +// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use // with apply. type PodSecurityContextApplyConfiguration struct { - SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` - WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` - RunAsUser *int64 `json:"runAsUser,omitempty"` - RunAsGroup *int64 `json:"runAsGroup,omitempty"` - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` - FSGroup *int64 `json:"fsGroup,omitempty"` - Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` - FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` - SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` - AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` + SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` + WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"` + RunAsUser *int64 `json:"runAsUser,omitempty"` + RunAsGroup *int64 `json:"runAsGroup,omitempty"` + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` + SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` + SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"` + FSGroup *int64 `json:"fsGroup,omitempty"` + Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"` + FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"` + SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"` + AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` + SELinuxChangePolicy *corev1.PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty"` } -// PodSecurityContextApplyConfiguration constructs an declarative configuration of the PodSecurityContext type for use with +// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with // apply. func PodSecurityContext() *PodSecurityContextApplyConfiguration { return &PodSecurityContextApplyConfiguration{} @@ -94,6 +96,14 @@ func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroups(values ... return b } +// WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSupplementalGroupsPolicy(value corev1.SupplementalGroupsPolicy) *PodSecurityContextApplyConfiguration { + b.SupplementalGroupsPolicy = &value + return b +} + // WithFSGroup sets the FSGroup field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroup field is set to the value of the last call. @@ -138,3 +148,11 @@ func (b *PodSecurityContextApplyConfiguration) WithAppArmorProfile(value *AppArm b.AppArmorProfile = value return b } + +// WithSELinuxChangePolicy sets the SELinuxChangePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxChangePolicy field is set to the value of the last call. +func (b *PodSecurityContextApplyConfiguration) WithSELinuxChangePolicy(value corev1.PodSELinuxChangePolicy) *PodSecurityContextApplyConfiguration { + b.SELinuxChangePolicy = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go index a9acd36fc..96f6eb94b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// PodSpecApplyConfiguration represents an declarative configuration of the PodSpec type for use +// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use // with apply. type PodSpecApplyConfiguration struct { Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"` @@ -64,9 +64,10 @@ type PodSpecApplyConfiguration struct { HostUsers *bool `json:"hostUsers,omitempty"` SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"` ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` } -// PodSpecApplyConfiguration constructs an declarative configuration of the PodSpec type for use with +// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with // apply. func PodSpec() *PodSpecApplyConfiguration { return &PodSpecApplyConfiguration{} @@ -444,3 +445,11 @@ func (b *PodSpecApplyConfiguration) WithResourceClaims(values ...*PodResourceCla } return b } + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *PodSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PodSpecApplyConfiguration { + b.Resources = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go index 1a58ab6be..b79e1210a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go @@ -19,14 +19,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use +// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use // with apply. type PodStatusApplyConfiguration struct { - Phase *v1.PodPhase `json:"phase,omitempty"` + Phase *corev1.PodPhase `json:"phase,omitempty"` Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"` Message *string `json:"message,omitempty"` Reason *string `json:"reason,omitempty"` @@ -38,13 +38,13 @@ type PodStatusApplyConfiguration struct { StartTime *metav1.Time `json:"startTime,omitempty"` InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"` ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"` - QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"` + QOSClass *corev1.PodQOSClass `json:"qosClass,omitempty"` EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"` - Resize *v1.PodResizeStatus `json:"resize,omitempty"` + Resize *corev1.PodResizeStatus `json:"resize,omitempty"` ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"` } -// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with +// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with // apply. func PodStatus() *PodStatusApplyConfiguration { return &PodStatusApplyConfiguration{} @@ -53,7 +53,7 @@ func PodStatus() *PodStatusApplyConfiguration { // WithPhase sets the Phase field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Phase field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithPhase(value v1.PodPhase) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithPhase(value corev1.PodPhase) *PodStatusApplyConfiguration { b.Phase = &value return b } @@ -174,7 +174,7 @@ func (b *PodStatusApplyConfiguration) WithContainerStatuses(values ...*Container // WithQOSClass sets the QOSClass field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the QOSClass field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithQOSClass(value v1.PodQOSClass) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithQOSClass(value corev1.PodQOSClass) *PodStatusApplyConfiguration { b.QOSClass = &value return b } @@ -195,7 +195,7 @@ func (b *PodStatusApplyConfiguration) WithEphemeralContainerStatuses(values ...* // WithResize sets the Resize field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resize field is set to the value of the last call. -func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodStatusApplyConfiguration { +func (b *PodStatusApplyConfiguration) WithResize(value corev1.PodResizeStatus) *PodStatusApplyConfiguration { b.Resize = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go index 7fe51d9e1..7886ea2d9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateApplyConfiguration represents an declarative configuration of the PodTemplate type for use +// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use // with apply. type PodTemplateApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// PodTemplate constructs an declarative configuration of the PodTemplate type for use with +// PodTemplate constructs a declarative configuration of the PodTemplate type for use with // apply. func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { b := &PodTemplateApplyConfiguration{} @@ -57,18 +57,18 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { +func ExtractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { return extractPodTemplate(podTemplate, fieldManager, "") } // ExtractPodTemplateStatus is the same as ExtractPodTemplate except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodTemplateStatus(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { +func ExtractPodTemplateStatus(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) { return extractPodTemplate(podTemplate, fieldManager, "status") } -func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) { +func extractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) { b := &PodTemplateApplyConfiguration{} err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *PodTemplateApplyConfiguration) WithKind(value string) *PodTemplateApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplateApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *PodTemplateApplyConfiguration) WithAPIVersion(value string) *PodTemplat // If called multiple times, the Name field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *PodTemplateApplyConfiguration) WithName(value string) *PodTemplateApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *PodTemplateApplyConfiguration) WithGenerateName(value string) *PodTempl // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *PodTemplateApplyConfiguration) WithNamespace(value string) *PodTemplate // If called multiple times, the UID field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *PodTemplateApplyConfiguration) WithUID(value types.UID) *PodTemplateApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *PodTemplateApplyConfiguration) WithResourceVersion(value string) *PodTe // If called multiple times, the Generation field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithGeneration(value int64) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *PodTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *PodTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *PodTemplateApplyConfiguration) WithLabels(entries map[string]string) *P // overwriting an existing map entries in Annotations field with the same key. func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *PodTemplateApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration { +func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *PodTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *PodTemplateApplyConfiguration) WithFinalizers(values ...string) *PodTemplateApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -247,3 +247,9 @@ func (b *PodTemplateApplyConfiguration) WithTemplate(value *PodTemplateSpecApply b.Template = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go index 82878a9ac..2e0904a24 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodTemplateSpecApplyConfiguration represents an declarative configuration of the PodTemplateSpec type for use +// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use // with apply. type PodTemplateSpecApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodSpecApplyConfiguration `json:"spec,omitempty"` } -// PodTemplateSpecApplyConfiguration constructs an declarative configuration of the PodTemplateSpec type for use with +// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with // apply. func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { return &PodTemplateSpecApplyConfiguration{} @@ -42,7 +42,7 @@ func PodTemplateSpec() *PodTemplateSpecApplyConfiguration { // If called multiple times, the Name field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -51,7 +51,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithName(value string) *PodTemplateS // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -60,7 +60,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithGenerateName(value string) *PodT // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -69,7 +69,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithNamespace(value string) *PodTemp // If called multiple times, the UID field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -78,7 +78,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithUID(value types.UID) *PodTemplat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -87,25 +87,25 @@ func (b *PodTemplateSpecApplyConfiguration) WithResourceVersion(value string) *P // If called multiple times, the Generation field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithGeneration(value int64) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -114,7 +114,7 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.T // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -124,11 +124,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -139,11 +139,11 @@ func (b *PodTemplateSpecApplyConfiguration) WithLabels(entries map[string]string // overwriting an existing map entries in Annotations field with the same key. func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -151,13 +151,13 @@ func (b *PodTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]s // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration { +func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -168,14 +168,14 @@ func (b *PodTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.Ow func (b *PodTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *PodTemplateSpecApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -186,3 +186,9 @@ func (b *PodTemplateSpecApplyConfiguration) WithSpec(value *PodSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go index 8c70c8f6c..eff8fc2ac 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// PortStatusApplyConfiguration represents an declarative configuration of the PortStatus type for use +// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use // with apply. type PortStatusApplyConfiguration struct { - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Error *string `json:"error,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` } -// PortStatusApplyConfiguration constructs an declarative configuration of the PortStatus type for use with +// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with // apply. func PortStatus() *PortStatusApplyConfiguration { return &PortStatusApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *PortStatusApplyConfiguration) WithPort(value int32) *PortStatusApplyCon // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *PortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *PortStatusApplyConfiguration { +func (b *PortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *PortStatusApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go index 19cbb82ed..29715e021 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PortworxVolumeSourceApplyConfiguration represents an declarative configuration of the PortworxVolumeSource type for use +// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use // with apply. type PortworxVolumeSourceApplyConfiguration struct { VolumeID *string `json:"volumeID,omitempty"` @@ -26,7 +26,7 @@ type PortworxVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// PortworxVolumeSourceApplyConfiguration constructs an declarative configuration of the PortworxVolumeSource type for use with +// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with // apply. func PortworxVolumeSource() *PortworxVolumeSourceApplyConfiguration { return &PortworxVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go index a373e4afe..b88a3646f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// PreferredSchedulingTermApplyConfiguration represents an declarative configuration of the PreferredSchedulingTerm type for use +// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use // with apply. type PreferredSchedulingTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"` } -// PreferredSchedulingTermApplyConfiguration constructs an declarative configuration of the PreferredSchedulingTerm type for use with +// PreferredSchedulingTermApplyConfiguration constructs a declarative configuration of the PreferredSchedulingTerm type for use with // apply. func PreferredSchedulingTerm() *PreferredSchedulingTermApplyConfiguration { return &PreferredSchedulingTermApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go index 10730557a..d6c654689 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeApplyConfiguration represents an declarative configuration of the Probe type for use +// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use // with apply. type ProbeApplyConfiguration struct { ProbeHandlerApplyConfiguration `json:",inline"` @@ -30,7 +30,7 @@ type ProbeApplyConfiguration struct { TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` } -// ProbeApplyConfiguration constructs an declarative configuration of the Probe type for use with +// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with // apply. func Probe() *ProbeApplyConfiguration { return &ProbeApplyConfiguration{} @@ -40,7 +40,7 @@ func Probe() *ProbeApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Exec field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) *ProbeApplyConfiguration { - b.Exec = value + b.ProbeHandlerApplyConfiguration.Exec = value return b } @@ -48,7 +48,7 @@ func (b *ProbeApplyConfiguration) WithExec(value *ExecActionApplyConfiguration) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTPGet field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfiguration) *ProbeApplyConfiguration { - b.HTTPGet = value + b.ProbeHandlerApplyConfiguration.HTTPGet = value return b } @@ -56,7 +56,7 @@ func (b *ProbeApplyConfiguration) WithHTTPGet(value *HTTPGetActionApplyConfigura // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the TCPSocket field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfiguration) *ProbeApplyConfiguration { - b.TCPSocket = value + b.ProbeHandlerApplyConfiguration.TCPSocket = value return b } @@ -64,7 +64,7 @@ func (b *ProbeApplyConfiguration) WithTCPSocket(value *TCPSocketActionApplyConfi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GRPC field is set to the value of the last call. func (b *ProbeApplyConfiguration) WithGRPC(value *GRPCActionApplyConfiguration) *ProbeApplyConfiguration { - b.GRPC = value + b.ProbeHandlerApplyConfiguration.GRPC = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go index 54f3344ac..1f88745ea 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ProbeHandlerApplyConfiguration represents an declarative configuration of the ProbeHandler type for use +// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use // with apply. type ProbeHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` @@ -27,7 +27,7 @@ type ProbeHandlerApplyConfiguration struct { GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"` } -// ProbeHandlerApplyConfiguration constructs an declarative configuration of the ProbeHandler type for use with +// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with // apply. func ProbeHandler() *ProbeHandlerApplyConfiguration { return &ProbeHandlerApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go index 0a9d1d88e..c922ec8cc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ProjectedVolumeSourceApplyConfiguration represents an declarative configuration of the ProjectedVolumeSource type for use +// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use // with apply. type ProjectedVolumeSourceApplyConfiguration struct { Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"` DefaultMode *int32 `json:"defaultMode,omitempty"` } -// ProjectedVolumeSourceApplyConfiguration constructs an declarative configuration of the ProjectedVolumeSource type for use with +// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with // apply. func ProjectedVolumeSource() *ProjectedVolumeSourceApplyConfiguration { return &ProjectedVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go index 646052ea4..9a042a0a1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QuobyteVolumeSourceApplyConfiguration represents an declarative configuration of the QuobyteVolumeSource type for use +// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use // with apply. type QuobyteVolumeSourceApplyConfiguration struct { Registry *string `json:"registry,omitempty"` @@ -29,7 +29,7 @@ type QuobyteVolumeSourceApplyConfiguration struct { Tenant *string `json:"tenant,omitempty"` } -// QuobyteVolumeSourceApplyConfiguration constructs an declarative configuration of the QuobyteVolumeSource type for use with +// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with // apply. func QuobyteVolumeSource() *QuobyteVolumeSourceApplyConfiguration { return &QuobyteVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go index ffcb836eb..64f25724a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the RBDPersistentVolumeSource type for use +// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use // with apply. type RBDPersistentVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDPersistentVolumeSource type for use with +// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with // apply. func RBDPersistentVolumeSource() *RBDPersistentVolumeSourceApplyConfiguration { return &RBDPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go index 8e7c81732..8dae198c0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RBDVolumeSourceApplyConfiguration represents an declarative configuration of the RBDVolumeSource type for use +// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use // with apply. type RBDVolumeSourceApplyConfiguration struct { CephMonitors []string `json:"monitors,omitempty"` @@ -31,7 +31,7 @@ type RBDVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// RBDVolumeSourceApplyConfiguration constructs an declarative configuration of the RBDVolumeSource type for use with +// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with // apply. func RBDVolumeSource() *RBDVolumeSourceApplyConfiguration { return &RBDVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go index 7cd71460a..4ef551914 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicationControllerApplyConfiguration represents an declarative configuration of the ReplicationController type for use +// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use // with apply. type ReplicationControllerApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"` - Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"` + Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicationController constructs an declarative configuration of the ReplicationController type for use with +// ReplicationController constructs a declarative configuration of the ReplicationController type for use with // apply. func ReplicationController(name, namespace string) *ReplicationControllerApplyConfiguration { b := &ReplicationControllerApplyConfiguration{} @@ -58,18 +58,18 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { +func ExtractReplicationController(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { return extractReplicationController(replicationController, fieldManager, "") } // ExtractReplicationControllerStatus is the same as ExtractReplicationController except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractReplicationControllerStatus(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { +func ExtractReplicationControllerStatus(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) { return extractReplicationController(replicationController, fieldManager, "status") } -func extractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) { +func extractReplicationController(replicationController *corev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) { b := &ReplicationControllerApplyConfiguration{} err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractReplicationController(replicationController *apicorev1.ReplicationCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *ReplicationControllerApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicationControllerApplyConfiguration) WithKind(value string) *Replic // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) *ReplicationControllerApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicationControllerApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithName(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicationControllerApplyConfiguration) WithName(value string) *Replic // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicationControllerApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicationControllerApplyConfiguration) WithNamespace(value string) *R // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicationControllerApplyConfiguration) WithUID(value types.UID) *Repl // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ReplicationControllerApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithGeneration(value int64) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicationControllerApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string]string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicationControllerApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ReplicationControllerApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration { +func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ReplicationControllerApplyConfiguration) WithOwnerReferences(values ... func (b *ReplicationControllerApplyConfiguration) WithFinalizers(values ...string) *ReplicationControllerApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ReplicationControllerApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *ReplicationControllerApplyConfiguration) WithStatus(value *ReplicationC b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicationControllerApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go index c3d56cc69..dfcecc053 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicationControllerConditionApplyConfiguration represents an declarative configuration of the ReplicationControllerCondition type for use +// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use // with apply. type ReplicationControllerConditionApplyConfiguration struct { - Type *v1.ReplicationControllerConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *corev1.ReplicationControllerConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// ReplicationControllerConditionApplyConfiguration constructs an declarative configuration of the ReplicationControllerCondition type for use with +// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with // apply. func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfiguration { return &ReplicationControllerConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func ReplicationControllerCondition() *ReplicationControllerConditionApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration { +func (b *ReplicationControllerConditionApplyConfiguration) WithType(value corev1.ReplicationControllerConditionType) *ReplicationControllerConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *ReplicationControllerConditionApplyConfiguration) WithType(value v1.Rep // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration { +func (b *ReplicationControllerConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *ReplicationControllerConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go index dd4e081d9..07bac9f4c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerSpecApplyConfiguration represents an declarative configuration of the ReplicationControllerSpec type for use +// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use // with apply. type ReplicationControllerSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -27,7 +27,7 @@ type ReplicationControllerSpecApplyConfiguration struct { Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicationControllerSpecApplyConfiguration constructs an declarative configuration of the ReplicationControllerSpec type for use with +// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with // apply. func ReplicationControllerSpec() *ReplicationControllerSpecApplyConfiguration { return &ReplicationControllerSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go index 1b994cfb8..c8046aa5a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ReplicationControllerStatusApplyConfiguration represents an declarative configuration of the ReplicationControllerStatus type for use +// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use // with apply. type ReplicationControllerStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicationControllerStatusApplyConfiguration struct { Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicationControllerStatusApplyConfiguration constructs an declarative configuration of the ReplicationControllerStatus type for use with +// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with // apply. func ReplicationControllerStatus() *ReplicationControllerStatusApplyConfiguration { return &ReplicationControllerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go index 064dd4e2e..b00c69248 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go @@ -18,13 +18,14 @@ limitations under the License. package v1 -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use // with apply. type ResourceClaimApplyConfiguration struct { - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` + Request *string `json:"request,omitempty"` } -// ResourceClaimApplyConfiguration constructs an declarative configuration of the ResourceClaim type for use with +// ResourceClaimApplyConfiguration constructs a declarative configuration of the ResourceClaim type for use with // apply. func ResourceClaim() *ResourceClaimApplyConfiguration { return &ResourceClaimApplyConfiguration{} @@ -37,3 +38,11 @@ func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimA b.Name = &value return b } + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithRequest(value string) *ResourceClaimApplyConfiguration { + b.Request = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go index 2741227dd..1b4918a63 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go @@ -22,7 +22,7 @@ import ( resource "k8s.io/apimachinery/pkg/api/resource" ) -// ResourceFieldSelectorApplyConfiguration represents an declarative configuration of the ResourceFieldSelector type for use +// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use // with apply. type ResourceFieldSelectorApplyConfiguration struct { ContainerName *string `json:"containerName,omitempty"` @@ -30,7 +30,7 @@ type ResourceFieldSelectorApplyConfiguration struct { Divisor *resource.Quantity `json:"divisor,omitempty"` } -// ResourceFieldSelectorApplyConfiguration constructs an declarative configuration of the ResourceFieldSelector type for use with +// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with // apply. func ResourceFieldSelector() *ResourceFieldSelectorApplyConfiguration { return &ResourceFieldSelectorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go new file mode 100644 index 000000000..0338780b3 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use +// with apply. +type ResourceHealthApplyConfiguration struct { + ResourceID *corev1.ResourceID `json:"resourceID,omitempty"` + Health *corev1.ResourceHealthStatus `json:"health,omitempty"` +} + +// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with +// apply. +func ResourceHealth() *ResourceHealthApplyConfiguration { + return &ResourceHealthApplyConfiguration{} +} + +// WithResourceID sets the ResourceID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceID field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithResourceID(value corev1.ResourceID) *ResourceHealthApplyConfiguration { + b.ResourceID = &value + return b +} + +// WithHealth sets the Health field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Health field is set to the value of the last call. +func (b *ResourceHealthApplyConfiguration) WithHealth(value corev1.ResourceHealthStatus) *ResourceHealthApplyConfiguration { + b.Health = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go index 6b22ebdc5..cd67f104c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ResourceQuotaApplyConfiguration represents an declarative configuration of the ResourceQuota type for use +// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use // with apply. type ResourceQuotaApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"` - Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"` } -// ResourceQuota constructs an declarative configuration of the ResourceQuota type for use with +// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with // apply. func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { b := &ResourceQuotaApplyConfiguration{} @@ -58,18 +58,18 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { +func ExtractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { return extractResourceQuota(resourceQuota, fieldManager, "") } // ExtractResourceQuotaStatus is the same as ExtractResourceQuota except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractResourceQuotaStatus(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { +func ExtractResourceQuotaStatus(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) { return extractResourceQuota(resourceQuota, fieldManager, "status") } -func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) { +func extractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) { b := &ResourceQuotaApplyConfiguration{} err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ResourceQuotaApplyConfiguration) WithKind(value string) *ResourceQuotaA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *ResourceQuotaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ResourceQuotaApplyConfiguration) WithAPIVersion(value string) *Resource // If called multiple times, the Name field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ResourceQuotaApplyConfiguration) WithName(value string) *ResourceQuotaA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ResourceQuotaApplyConfiguration) WithGenerateName(value string) *Resour // If called multiple times, the Namespace field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ResourceQuotaApplyConfiguration) WithNamespace(value string) *ResourceQ // If called multiple times, the UID field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ResourceQuotaApplyConfiguration) WithUID(value types.UID) *ResourceQuot // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ResourceQuotaApplyConfiguration) WithResourceVersion(value string) *Res // If called multiple times, the Generation field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithGeneration(value int64) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ResourceQuotaApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ResourceQuotaApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ResourceQuotaApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration { +func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ResourceQuotaApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *ResourceQuotaApplyConfiguration) WithFinalizers(values ...string) *ResourceQuotaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ResourceQuotaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *ResourceQuotaApplyConfiguration) WithStatus(value *ResourceQuotaStatusA b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceQuotaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go index feb454bc4..36d342fcd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ResourceQuotaSpecApplyConfiguration represents an declarative configuration of the ResourceQuotaSpec type for use +// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use // with apply. type ResourceQuotaSpecApplyConfiguration struct { - Hard *v1.ResourceList `json:"hard,omitempty"` - Scopes []v1.ResourceQuotaScope `json:"scopes,omitempty"` + Hard *corev1.ResourceList `json:"hard,omitempty"` + Scopes []corev1.ResourceQuotaScope `json:"scopes,omitempty"` ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"` } -// ResourceQuotaSpecApplyConfiguration constructs an declarative configuration of the ResourceQuotaSpec type for use with +// ResourceQuotaSpecApplyConfiguration constructs a declarative configuration of the ResourceQuotaSpec type for use with // apply. func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { return &ResourceQuotaSpecApplyConfiguration{} @@ -39,7 +39,7 @@ func ResourceQuotaSpec() *ResourceQuotaSpecApplyConfiguration { // WithHard sets the Hard field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Hard field is set to the value of the last call. -func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaSpecApplyConfiguration { +func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaSpecApplyConfiguration { b.Hard = &value return b } @@ -47,7 +47,7 @@ func (b *ResourceQuotaSpecApplyConfiguration) WithHard(value v1.ResourceList) *R // WithScopes adds the given value to the Scopes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Scopes field. -func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...v1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration { +func (b *ResourceQuotaSpecApplyConfiguration) WithScopes(values ...corev1.ResourceQuotaScope) *ResourceQuotaSpecApplyConfiguration { for i := range values { b.Scopes = append(b.Scopes, values[i]) } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go index 4dced90f7..6338a1308 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ResourceQuotaStatusApplyConfiguration represents an declarative configuration of the ResourceQuotaStatus type for use +// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use // with apply. type ResourceQuotaStatusApplyConfiguration struct { - Hard *v1.ResourceList `json:"hard,omitempty"` - Used *v1.ResourceList `json:"used,omitempty"` + Hard *corev1.ResourceList `json:"hard,omitempty"` + Used *corev1.ResourceList `json:"used,omitempty"` } -// ResourceQuotaStatusApplyConfiguration constructs an declarative configuration of the ResourceQuotaStatus type for use with +// ResourceQuotaStatusApplyConfiguration constructs a declarative configuration of the ResourceQuotaStatus type for use with // apply. func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { return &ResourceQuotaStatusApplyConfiguration{} @@ -38,7 +38,7 @@ func ResourceQuotaStatus() *ResourceQuotaStatusApplyConfiguration { // WithHard sets the Hard field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Hard field is set to the value of the last call. -func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration { +func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration { b.Hard = &value return b } @@ -46,7 +46,7 @@ func (b *ResourceQuotaStatusApplyConfiguration) WithHard(value v1.ResourceList) // WithUsed sets the Used field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Used field is set to the value of the last call. -func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value v1.ResourceList) *ResourceQuotaStatusApplyConfiguration { +func (b *ResourceQuotaStatusApplyConfiguration) WithUsed(value corev1.ResourceList) *ResourceQuotaStatusApplyConfiguration { b.Used = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go index 9482b8d71..ea77647a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ResourceRequirementsApplyConfiguration represents an declarative configuration of the ResourceRequirements type for use +// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use // with apply. type ResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *corev1.ResourceList `json:"limits,omitempty"` + Requests *corev1.ResourceList `json:"requests,omitempty"` Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"` } -// ResourceRequirementsApplyConfiguration constructs an declarative configuration of the ResourceRequirements type for use with +// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with // apply. func ResourceRequirements() *ResourceRequirementsApplyConfiguration { return &ResourceRequirementsApplyConfiguration{} @@ -39,7 +39,7 @@ func ResourceRequirements() *ResourceRequirementsApplyConfiguration { // WithLimits sets the Limits field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Limits field is set to the value of the last call. -func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *ResourceRequirementsApplyConfiguration { +func (b *ResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration { b.Limits = &value return b } @@ -47,7 +47,7 @@ func (b *ResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceLis // WithRequests sets the Requests field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Requests field is set to the value of the last call. -func (b *ResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *ResourceRequirementsApplyConfiguration { +func (b *ResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *ResourceRequirementsApplyConfiguration { b.Requests = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go new file mode 100644 index 000000000..e99586659 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use +// with apply. +type ResourceStatusApplyConfiguration struct { + Name *corev1.ResourceName `json:"name,omitempty"` + Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"` +} + +// ResourceStatusApplyConfiguration constructs a declarative configuration of the ResourceStatus type for use with +// apply. +func ResourceStatus() *ResourceStatusApplyConfiguration { + return &ResourceStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceStatusApplyConfiguration) WithName(value corev1.ResourceName) *ResourceStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ResourceStatusApplyConfiguration) WithResources(values ...*ResourceHealthApplyConfiguration) *ResourceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go index fffb5b186..b07f46de9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOPersistentVolumeSource type for use +// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use // with apply. type ScaleIOPersistentVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOPersistentVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOPersistentVolumeSource type for use with +// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with // apply. func ScaleIOPersistentVolumeSource() *ScaleIOPersistentVolumeSourceApplyConfiguration { return &ScaleIOPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go index b54e1161e..740c05ebb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ScaleIOVolumeSourceApplyConfiguration represents an declarative configuration of the ScaleIOVolumeSource type for use +// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use // with apply. type ScaleIOVolumeSourceApplyConfiguration struct { Gateway *string `json:"gateway,omitempty"` @@ -33,7 +33,7 @@ type ScaleIOVolumeSourceApplyConfiguration struct { ReadOnly *bool `json:"readOnly,omitempty"` } -// ScaleIOVolumeSourceApplyConfiguration constructs an declarative configuration of the ScaleIOVolumeSource type for use with +// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with // apply. func ScaleIOVolumeSource() *ScaleIOVolumeSourceApplyConfiguration { return &ScaleIOVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go index c901a2ae6..c2481f490 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// ScopedResourceSelectorRequirementApplyConfiguration represents an declarative configuration of the ScopedResourceSelectorRequirement type for use +// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use // with apply. type ScopedResourceSelectorRequirementApplyConfiguration struct { - ScopeName *v1.ResourceQuotaScope `json:"scopeName,omitempty"` - Operator *v1.ScopeSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + ScopeName *corev1.ResourceQuotaScope `json:"scopeName,omitempty"` + Operator *corev1.ScopeSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } -// ScopedResourceSelectorRequirementApplyConfiguration constructs an declarative configuration of the ScopedResourceSelectorRequirement type for use with +// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with // apply. func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApplyConfiguration { return &ScopedResourceSelectorRequirementApplyConfiguration{} @@ -39,7 +39,7 @@ func ScopedResourceSelectorRequirement() *ScopedResourceSelectorRequirementApply // WithScopeName sets the ScopeName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScopeName field is set to the value of the last call. -func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value v1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration { +func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(value corev1.ResourceQuotaScope) *ScopedResourceSelectorRequirementApplyConfiguration { b.ScopeName = &value return b } @@ -47,7 +47,7 @@ func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithScopeName(valu // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value v1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration { +func (b *ScopedResourceSelectorRequirementApplyConfiguration) WithOperator(value corev1.ScopeSelectorOperator) *ScopedResourceSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go index 3251e9dc1..a9fb9a1b1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ScopeSelectorApplyConfiguration represents an declarative configuration of the ScopeSelector type for use +// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use // with apply. type ScopeSelectorApplyConfiguration struct { MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// ScopeSelectorApplyConfiguration constructs an declarative configuration of the ScopeSelector type for use with +// ScopeSelectorApplyConfiguration constructs a declarative configuration of the ScopeSelector type for use with // apply. func ScopeSelector() *ScopeSelectorApplyConfiguration { return &ScopeSelectorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go index 9818a00e7..754bfd1b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// SeccompProfileApplyConfiguration represents an declarative configuration of the SeccompProfile type for use +// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use // with apply. type SeccompProfileApplyConfiguration struct { - Type *v1.SeccompProfileType `json:"type,omitempty"` - LocalhostProfile *string `json:"localhostProfile,omitempty"` + Type *corev1.SeccompProfileType `json:"type,omitempty"` + LocalhostProfile *string `json:"localhostProfile,omitempty"` } -// SeccompProfileApplyConfiguration constructs an declarative configuration of the SeccompProfile type for use with +// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with // apply. func SeccompProfile() *SeccompProfileApplyConfiguration { return &SeccompProfileApplyConfiguration{} @@ -38,7 +38,7 @@ func SeccompProfile() *SeccompProfileApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *SeccompProfileApplyConfiguration) WithType(value v1.SeccompProfileType) *SeccompProfileApplyConfiguration { +func (b *SeccompProfileApplyConfiguration) WithType(value corev1.SeccompProfileType) *SeccompProfileApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go index 3f7e1eb03..9c8532d20 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go @@ -20,25 +20,25 @@ package v1 import ( corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// SecretApplyConfiguration represents an declarative configuration of the Secret type for use +// SecretApplyConfiguration represents a declarative configuration of the Secret type for use // with apply. type SecretApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Immutable *bool `json:"immutable,omitempty"` - Data map[string][]byte `json:"data,omitempty"` - StringData map[string]string `json:"stringData,omitempty"` - Type *corev1.SecretType `json:"type,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Immutable *bool `json:"immutable,omitempty"` + Data map[string][]byte `json:"data,omitempty"` + StringData map[string]string `json:"stringData,omitempty"` + Type *corev1.SecretType `json:"type,omitempty"` } -// Secret constructs an declarative configuration of the Secret type for use with +// Secret constructs a declarative configuration of the Secret type for use with // apply. func Secret(name, namespace string) *SecretApplyConfiguration { b := &SecretApplyConfiguration{} @@ -89,7 +89,7 @@ func extractSecret(secret *corev1.Secret, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -97,7 +97,7 @@ func (b *SecretApplyConfiguration) WithKind(value string) *SecretApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -106,7 +106,7 @@ func (b *SecretApplyConfiguration) WithAPIVersion(value string) *SecretApplyConf // If called multiple times, the Name field is set to the value of the last call. func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -115,7 +115,7 @@ func (b *SecretApplyConfiguration) WithName(value string) *SecretApplyConfigurat // If called multiple times, the GenerateName field is set to the value of the last call. func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -124,7 +124,7 @@ func (b *SecretApplyConfiguration) WithGenerateName(value string) *SecretApplyCo // If called multiple times, the Namespace field is set to the value of the last call. func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -133,7 +133,7 @@ func (b *SecretApplyConfiguration) WithNamespace(value string) *SecretApplyConfi // If called multiple times, the UID field is set to the value of the last call. func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -142,7 +142,7 @@ func (b *SecretApplyConfiguration) WithUID(value types.UID) *SecretApplyConfigur // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -151,25 +151,25 @@ func (b *SecretApplyConfiguration) WithResourceVersion(value string) *SecretAppl // If called multiple times, the Generation field is set to the value of the last call. func (b *SecretApplyConfiguration) WithGeneration(value int64) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -178,7 +178,7 @@ func (b *SecretApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Sec // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -188,11 +188,11 @@ func (b *SecretApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) * // overwriting an existing map entries in Labels field with the same key. func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -203,11 +203,11 @@ func (b *SecretApplyConfiguration) WithLabels(entries map[string]string) *Secret // overwriting an existing map entries in Annotations field with the same key. func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -215,13 +215,13 @@ func (b *SecretApplyConfiguration) WithAnnotations(entries map[string]string) *S // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration { +func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -232,14 +232,14 @@ func (b *SecretApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefere func (b *SecretApplyConfiguration) WithFinalizers(values ...string) *SecretApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *SecretApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -286,3 +286,9 @@ func (b *SecretApplyConfiguration) WithType(value corev1.SecretType) *SecretAppl b.Type = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *SecretApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go index 7b22a8d0b..d3cc9f6a6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretEnvSourceApplyConfiguration represents an declarative configuration of the SecretEnvSource type for use +// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use // with apply. type SecretEnvSourceApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` Optional *bool `json:"optional,omitempty"` } -// SecretEnvSourceApplyConfiguration constructs an declarative configuration of the SecretEnvSource type for use with +// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with // apply. func SecretEnvSource() *SecretEnvSourceApplyConfiguration { return &SecretEnvSourceApplyConfiguration{} @@ -35,7 +35,7 @@ func SecretEnvSource() *SecretEnvSourceApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretEnvSourceApplyConfiguration) WithName(value string) *SecretEnvSourceApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go index b8464a348..f1cd8b2d3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretKeySelectorApplyConfiguration represents an declarative configuration of the SecretKeySelector type for use +// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use // with apply. type SecretKeySelectorApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretKeySelectorApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretKeySelectorApplyConfiguration constructs an declarative configuration of the SecretKeySelector type for use with +// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with // apply. func SecretKeySelector() *SecretKeySelectorApplyConfiguration { return &SecretKeySelectorApplyConfiguration{} @@ -36,7 +36,7 @@ func SecretKeySelector() *SecretKeySelectorApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretKeySelectorApplyConfiguration) WithName(value string) *SecretKeySelectorApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go index e8edc6127..99fa36ecc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretProjectionApplyConfiguration represents an declarative configuration of the SecretProjection type for use +// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use // with apply. type SecretProjectionApplyConfiguration struct { LocalObjectReferenceApplyConfiguration `json:",inline"` @@ -26,7 +26,7 @@ type SecretProjectionApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretProjectionApplyConfiguration constructs an declarative configuration of the SecretProjection type for use with +// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with // apply. func SecretProjection() *SecretProjectionApplyConfiguration { return &SecretProjectionApplyConfiguration{} @@ -36,7 +36,7 @@ func SecretProjection() *SecretProjectionApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. func (b *SecretProjectionApplyConfiguration) WithName(value string) *SecretProjectionApplyConfiguration { - b.Name = &value + b.LocalObjectReferenceApplyConfiguration.Name = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go index 95579d003..f5e0de23a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SecretReferenceApplyConfiguration represents an declarative configuration of the SecretReference type for use +// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use // with apply. type SecretReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` Namespace *string `json:"namespace,omitempty"` } -// SecretReferenceApplyConfiguration constructs an declarative configuration of the SecretReference type for use with +// SecretReferenceApplyConfiguration constructs a declarative configuration of the SecretReference type for use with // apply. func SecretReference() *SecretReferenceApplyConfiguration { return &SecretReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go index bcb441e9f..9f765d354 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SecretVolumeSourceApplyConfiguration represents an declarative configuration of the SecretVolumeSource type for use +// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use // with apply. type SecretVolumeSourceApplyConfiguration struct { SecretName *string `json:"secretName,omitempty"` @@ -27,7 +27,7 @@ type SecretVolumeSourceApplyConfiguration struct { Optional *bool `json:"optional,omitempty"` } -// SecretVolumeSourceApplyConfiguration constructs an declarative configuration of the SecretVolumeSource type for use with +// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with // apply. func SecretVolumeSource() *SecretVolumeSourceApplyConfiguration { return &SecretVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go index 4146b765d..99faab72d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// SecurityContextApplyConfiguration represents an declarative configuration of the SecurityContext type for use +// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use // with apply. type SecurityContextApplyConfiguration struct { Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"` @@ -39,7 +39,7 @@ type SecurityContextApplyConfiguration struct { AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"` } -// SecurityContextApplyConfiguration constructs an declarative configuration of the SecurityContext type for use with +// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with // apply. func SecurityContext() *SecurityContextApplyConfiguration { return &SecurityContextApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go index 2938faa18..bad01300f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SELinuxOptionsApplyConfiguration represents an declarative configuration of the SELinuxOptions type for use +// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use // with apply. type SELinuxOptionsApplyConfiguration struct { User *string `json:"user,omitempty"` @@ -27,7 +27,7 @@ type SELinuxOptionsApplyConfiguration struct { Level *string `json:"level,omitempty"` } -// SELinuxOptionsApplyConfiguration constructs an declarative configuration of the SELinuxOptions type for use with +// SELinuxOptionsApplyConfiguration constructs a declarative configuration of the SELinuxOptions type for use with // apply. func SELinuxOptions() *SELinuxOptionsApplyConfiguration { return &SELinuxOptionsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go index 3fa119523..85f6b25a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceApplyConfiguration represents an declarative configuration of the Service type for use +// ServiceApplyConfiguration represents a declarative configuration of the Service type for use // with apply. type ServiceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceStatusApplyConfiguration `json:"status,omitempty"` } -// Service constructs an declarative configuration of the Service type for use with +// Service constructs a declarative configuration of the Service type for use with // apply. func Service(name, namespace string) *ServiceApplyConfiguration { b := &ServiceApplyConfiguration{} @@ -58,18 +58,18 @@ func Service(name, namespace string) *ServiceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractService(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { +func ExtractService(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { return extractService(service, fieldManager, "") } // ExtractServiceStatus is the same as ExtractService except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceStatus(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { +func ExtractServiceStatus(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) { return extractService(service, fieldManager, "status") } -func extractService(service *apicorev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) { +func extractService(service *corev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) { b := &ServiceApplyConfiguration{} err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractService(service *apicorev1.Service, fieldManager string, subresource // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ServiceApplyConfiguration) WithKind(value string) *ServiceApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ServiceApplyConfiguration) WithAPIVersion(value string) *ServiceApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ServiceApplyConfiguration) WithName(value string) *ServiceApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ServiceApplyConfiguration) WithGenerateName(value string) *ServiceApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ServiceApplyConfiguration) WithNamespace(value string) *ServiceApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ServiceApplyConfiguration) WithUID(value types.UID) *ServiceApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *ServiceApplyConfiguration) WithResourceVersion(value string) *ServiceAp // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithGeneration(value int64) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ServiceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Se // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ServiceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ServiceApplyConfiguration) WithLabels(entries map[string]string) *Servi // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *ServiceApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration { +func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *ServiceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *ServiceApplyConfiguration) WithFinalizers(values ...string) *ServiceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *ServiceApplyConfiguration) WithStatus(value *ServiceStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go index 53a819375..0d80ded9e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go @@ -19,25 +19,25 @@ limitations under the License. package v1 import ( - apicorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceAccountApplyConfiguration represents an declarative configuration of the ServiceAccount type for use +// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use // with apply. type ServiceAccountApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"` - ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"` - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"` + ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"` + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"` } -// ServiceAccount constructs an declarative configuration of the ServiceAccount type for use with +// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with // apply. func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { b := &ServiceAccountApplyConfiguration{} @@ -59,18 +59,18 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { +func ExtractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { return extractServiceAccount(serviceAccount, fieldManager, "") } // ExtractServiceAccountStatus is the same as ExtractServiceAccount except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceAccountStatus(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { +func ExtractServiceAccountStatus(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) { return extractServiceAccount(serviceAccount, fieldManager, "status") } -func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) { +func extractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) { b := &ServiceAccountApplyConfiguration{} err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccountApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *ServiceAccountApplyConfiguration) WithKind(value string) *ServiceAccoun // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *ServiceAccountApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *ServiceAccountApplyConfiguration) WithAPIVersion(value string) *Service // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *ServiceAccountApplyConfiguration) WithName(value string) *ServiceAccoun // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *ServiceAccountApplyConfiguration) WithGenerateName(value string) *Servi // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *ServiceAccountApplyConfiguration) WithNamespace(value string) *ServiceA // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *ServiceAccountApplyConfiguration) WithUID(value types.UID) *ServiceAcco // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *ServiceAccountApplyConfiguration) WithResourceVersion(value string) *Se // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithGeneration(value int64) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionTimestamp(value metav1.Ti // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *ServiceAccountApplyConfiguration) WithDeletionGracePeriodSeconds(value // overwriting an existing map entries in Labels field with the same key. func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *ServiceAccountApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *ServiceAccountApplyConfiguration) WithAnnotations(entries map[string]st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration { +func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *ServiceAccountApplyConfiguration) WithOwnerReferences(values ...*v1.Own func (b *ServiceAccountApplyConfiguration) WithFinalizers(values ...string) *ServiceAccountApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ServiceAccountApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -275,3 +275,9 @@ func (b *ServiceAccountApplyConfiguration) WithAutomountServiceAccountToken(valu b.AutomountServiceAccountToken = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceAccountApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go index a52fad7d8..fab81bf8a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ServiceAccountTokenProjectionApplyConfiguration represents an declarative configuration of the ServiceAccountTokenProjection type for use +// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use // with apply. type ServiceAccountTokenProjectionApplyConfiguration struct { Audience *string `json:"audience,omitempty"` @@ -26,7 +26,7 @@ type ServiceAccountTokenProjectionApplyConfiguration struct { Path *string `json:"path,omitempty"` } -// ServiceAccountTokenProjectionApplyConfiguration constructs an declarative configuration of the ServiceAccountTokenProjection type for use with +// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with // apply. func ServiceAccountTokenProjection() *ServiceAccountTokenProjectionApplyConfiguration { return &ServiceAccountTokenProjectionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go index 8bc63bd95..4d5774d8d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// ServicePortApplyConfiguration represents an declarative configuration of the ServicePort type for use +// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use // with apply. type ServicePortApplyConfiguration struct { Name *string `json:"name,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` AppProtocol *string `json:"appProtocol,omitempty"` Port *int32 `json:"port,omitempty"` TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` NodePort *int32 `json:"nodePort,omitempty"` } -// ServicePortApplyConfiguration constructs an declarative configuration of the ServicePort type for use with +// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with // apply. func ServicePort() *ServicePortApplyConfiguration { return &ServicePortApplyConfiguration{} @@ -51,7 +51,7 @@ func (b *ServicePortApplyConfiguration) WithName(value string) *ServicePortApply // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *ServicePortApplyConfiguration) WithProtocol(value v1.Protocol) *ServicePortApplyConfiguration { +func (b *ServicePortApplyConfiguration) WithProtocol(value corev1.Protocol) *ServicePortApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go index 5cfbcb700..41367dce4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ServiceSpecApplyConfiguration represents an declarative configuration of the ServiceSpec type for use +// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use // with apply. type ServiceSpecApplyConfiguration struct { Ports []ServicePortApplyConfiguration `json:"ports,omitempty"` @@ -47,7 +47,7 @@ type ServiceSpecApplyConfiguration struct { TrafficDistribution *string `json:"trafficDistribution,omitempty"` } -// ServiceSpecApplyConfiguration constructs an declarative configuration of the ServiceSpec type for use with +// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with // apply. func ServiceSpec() *ServiceSpecApplyConfiguration { return &ServiceSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go index 2347cec67..11c3f8a80 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go @@ -22,14 +22,14 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceStatusApplyConfiguration represents an declarative configuration of the ServiceStatus type for use +// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use // with apply. type ServiceStatusApplyConfiguration struct { LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceStatusApplyConfiguration constructs an declarative configuration of the ServiceStatus type for use with +// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with // apply. func ServiceStatus() *ServiceStatusApplyConfiguration { return &ServiceStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go index 7016f836a..13b045fff 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SessionAffinityConfigApplyConfiguration represents an declarative configuration of the SessionAffinityConfig type for use +// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use // with apply. type SessionAffinityConfigApplyConfiguration struct { ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"` } -// SessionAffinityConfigApplyConfiguration constructs an declarative configuration of the SessionAffinityConfig type for use with +// SessionAffinityConfigApplyConfiguration constructs a declarative configuration of the SessionAffinityConfig type for use with // apply. func SessionAffinityConfig() *SessionAffinityConfigApplyConfiguration { return &SessionAffinityConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go index 8b3284536..b4115609b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use +// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use // with apply. type SleepActionApplyConfiguration struct { Seconds *int64 `json:"seconds,omitempty"` } -// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with +// SleepActionApplyConfiguration constructs a declarative configuration of the SleepAction type for use with // apply. func SleepAction() *SleepActionApplyConfiguration { return &SleepActionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go index 00ed39ccb..7381a498e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSPersistentVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSPersistentVolumeSource type for use +// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use // with apply. type StorageOSPersistentVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSPersistentVolumeSourceApplyConfiguration struct { SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSPersistentVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSPersistentVolumeSource type for use with +// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with // apply. func StorageOSPersistentVolumeSource() *StorageOSPersistentVolumeSourceApplyConfiguration { return &StorageOSPersistentVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go index 7f3b810cf..81d9373c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// StorageOSVolumeSourceApplyConfiguration represents an declarative configuration of the StorageOSVolumeSource type for use +// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use // with apply. type StorageOSVolumeSourceApplyConfiguration struct { VolumeName *string `json:"volumeName,omitempty"` @@ -28,7 +28,7 @@ type StorageOSVolumeSourceApplyConfiguration struct { SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"` } -// StorageOSVolumeSourceApplyConfiguration constructs an declarative configuration of the StorageOSVolumeSource type for use with +// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with // apply. func StorageOSVolumeSource() *StorageOSVolumeSourceApplyConfiguration { return &StorageOSVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go index deab9e0b3..7719eb7d6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// SysctlApplyConfiguration represents an declarative configuration of the Sysctl type for use +// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use // with apply. type SysctlApplyConfiguration struct { Name *string `json:"name,omitempty"` Value *string `json:"value,omitempty"` } -// SysctlApplyConfiguration constructs an declarative configuration of the Sysctl type for use with +// SysctlApplyConfiguration constructs a declarative configuration of the Sysctl type for use with // apply. func Sysctl() *SysctlApplyConfiguration { return &SysctlApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go index 4672b8742..4b9e43051 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// TaintApplyConfiguration represents an declarative configuration of the Taint type for use +// TaintApplyConfiguration represents a declarative configuration of the Taint type for use // with apply. type TaintApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` - Effect *v1.TaintEffect `json:"effect,omitempty"` - TimeAdded *metav1.Time `json:"timeAdded,omitempty"` + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + Effect *corev1.TaintEffect `json:"effect,omitempty"` + TimeAdded *metav1.Time `json:"timeAdded,omitempty"` } -// TaintApplyConfiguration constructs an declarative configuration of the Taint type for use with +// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with // apply. func Taint() *TaintApplyConfiguration { return &TaintApplyConfiguration{} @@ -57,7 +57,7 @@ func (b *TaintApplyConfiguration) WithValue(value string) *TaintApplyConfigurati // WithEffect sets the Effect field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Effect field is set to the value of the last call. -func (b *TaintApplyConfiguration) WithEffect(value v1.TaintEffect) *TaintApplyConfiguration { +func (b *TaintApplyConfiguration) WithEffect(value corev1.TaintEffect) *TaintApplyConfiguration { b.Effect = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go index bd038fc3a..cba1a7d08 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// TCPSocketActionApplyConfiguration represents an declarative configuration of the TCPSocketAction type for use +// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use // with apply. type TCPSocketActionApplyConfiguration struct { Port *intstr.IntOrString `json:"port,omitempty"` Host *string `json:"host,omitempty"` } -// TCPSocketActionApplyConfiguration constructs an declarative configuration of the TCPSocketAction type for use with +// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with // apply. func TCPSocketAction() *TCPSocketActionApplyConfiguration { return &TCPSocketActionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go index 1a92a8c66..a0a0aac00 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// TolerationApplyConfiguration represents an declarative configuration of the Toleration type for use +// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use // with apply. type TolerationApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.TolerationOperator `json:"operator,omitempty"` - Value *string `json:"value,omitempty"` - Effect *v1.TaintEffect `json:"effect,omitempty"` - TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` + Key *string `json:"key,omitempty"` + Operator *corev1.TolerationOperator `json:"operator,omitempty"` + Value *string `json:"value,omitempty"` + Effect *corev1.TaintEffect `json:"effect,omitempty"` + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"` } -// TolerationApplyConfiguration constructs an declarative configuration of the Toleration type for use with +// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with // apply. func Toleration() *TolerationApplyConfiguration { return &TolerationApplyConfiguration{} @@ -49,7 +49,7 @@ func (b *TolerationApplyConfiguration) WithKey(value string) *TolerationApplyCon // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *TolerationApplyConfiguration) WithOperator(value v1.TolerationOperator) *TolerationApplyConfiguration { +func (b *TolerationApplyConfiguration) WithOperator(value corev1.TolerationOperator) *TolerationApplyConfiguration { b.Operator = &value return b } @@ -65,7 +65,7 @@ func (b *TolerationApplyConfiguration) WithValue(value string) *TolerationApplyC // WithEffect sets the Effect field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Effect field is set to the value of the last call. -func (b *TolerationApplyConfiguration) WithEffect(value v1.TaintEffect) *TolerationApplyConfiguration { +func (b *TolerationApplyConfiguration) WithEffect(value corev1.TaintEffect) *TolerationApplyConfiguration { b.Effect = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go index 9581490de..674ddec93 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TopologySelectorLabelRequirementApplyConfiguration represents an declarative configuration of the TopologySelectorLabelRequirement type for use +// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use // with apply. type TopologySelectorLabelRequirementApplyConfiguration struct { Key *string `json:"key,omitempty"` Values []string `json:"values,omitempty"` } -// TopologySelectorLabelRequirementApplyConfiguration constructs an declarative configuration of the TopologySelectorLabelRequirement type for use with +// TopologySelectorLabelRequirementApplyConfiguration constructs a declarative configuration of the TopologySelectorLabelRequirement type for use with // apply. func TopologySelectorLabelRequirement() *TopologySelectorLabelRequirementApplyConfiguration { return &TopologySelectorLabelRequirementApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go index a025b8a2a..7812ae520 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// TopologySelectorTermApplyConfiguration represents an declarative configuration of the TopologySelectorTerm type for use +// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use // with apply. type TopologySelectorTermApplyConfiguration struct { MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"` } -// TopologySelectorTermApplyConfiguration constructs an declarative configuration of the TopologySelectorTerm type for use with +// TopologySelectorTermApplyConfiguration constructs a declarative configuration of the TopologySelectorTerm type for use with // apply. func TopologySelectorTerm() *TopologySelectorTermApplyConfiguration { return &TopologySelectorTermApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go index fbfa8fa88..ab814e8e0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use +// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use // with apply. type TopologySpreadConstraintApplyConfiguration struct { MaxSkew *int32 `json:"maxSkew,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` - WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` MinDomains *int32 `json:"minDomains,omitempty"` - NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` - NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` + NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"` + NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"` MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` } -// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with +// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with // apply. func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration { return &TopologySpreadConstraintApplyConfiguration{} @@ -61,7 +61,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value strin // WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the WhenUnsatisfiable field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value v1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration { b.WhenUnsatisfiable = &value return b } @@ -85,7 +85,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) // WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeAffinityPolicy field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { b.NodeAffinityPolicy = &value return b } @@ -93,7 +93,7 @@ func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(valu // WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeTaintsPolicy field is set to the value of the last call. -func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { +func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration { b.NodeTaintsPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go index cdc2eb7d3..1e63b7988 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedLocalObjectReferenceApplyConfiguration represents an declarative configuration of the TypedLocalObjectReference type for use +// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use // with apply. type TypedLocalObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type TypedLocalObjectReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// TypedLocalObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedLocalObjectReference type for use with +// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with // apply. func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration { return &TypedLocalObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go index d9a01c9c3..f07de8902 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// TypedObjectReferenceApplyConfiguration represents an declarative configuration of the TypedObjectReference type for use +// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use // with apply. type TypedObjectReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -27,7 +27,7 @@ type TypedObjectReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// TypedObjectReferenceApplyConfiguration constructs an declarative configuration of the TypedObjectReference type for use with +// TypedObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedObjectReference type for use with // apply. func TypedObjectReference() *TypedObjectReferenceApplyConfiguration { return &TypedObjectReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go index db0686bce..e47cd031d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeApplyConfiguration represents an declarative configuration of the Volume type for use +// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use // with apply. type VolumeApplyConfiguration struct { Name *string `json:"name,omitempty"` VolumeSourceApplyConfiguration `json:",inline"` } -// VolumeApplyConfiguration constructs an declarative configuration of the Volume type for use with +// VolumeApplyConfiguration constructs a declarative configuration of the Volume type for use with // apply. func Volume() *VolumeApplyConfiguration { return &VolumeApplyConfiguration{} @@ -43,7 +43,7 @@ func (b *VolumeApplyConfiguration) WithName(value string) *VolumeApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HostPath field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.HostPath = value + b.VolumeSourceApplyConfiguration.HostPath = value return b } @@ -51,7 +51,7 @@ func (b *VolumeApplyConfiguration) WithHostPath(value *HostPathVolumeSourceApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EmptyDir field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.EmptyDir = value + b.VolumeSourceApplyConfiguration.EmptyDir = value return b } @@ -59,7 +59,7 @@ func (b *VolumeApplyConfiguration) WithEmptyDir(value *EmptyDirVolumeSourceApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GCEPersistentDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.GCEPersistentDisk = value + b.VolumeSourceApplyConfiguration.GCEPersistentDisk = value return b } @@ -67,7 +67,7 @@ func (b *VolumeApplyConfiguration) WithGCEPersistentDisk(value *GCEPersistentDis // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AWSElasticBlockStore field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlockStoreVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AWSElasticBlockStore = value + b.VolumeSourceApplyConfiguration.AWSElasticBlockStore = value return b } @@ -75,7 +75,7 @@ func (b *VolumeApplyConfiguration) WithAWSElasticBlockStore(value *AWSElasticBlo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GitRepo field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.GitRepo = value + b.VolumeSourceApplyConfiguration.GitRepo = value return b } @@ -83,7 +83,7 @@ func (b *VolumeApplyConfiguration) WithGitRepo(value *GitRepoVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Secret field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Secret = value + b.VolumeSourceApplyConfiguration.Secret = value return b } @@ -91,7 +91,7 @@ func (b *VolumeApplyConfiguration) WithSecret(value *SecretVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NFS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.NFS = value + b.VolumeSourceApplyConfiguration.NFS = value return b } @@ -99,7 +99,7 @@ func (b *VolumeApplyConfiguration) WithNFS(value *NFSVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ISCSI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ISCSI = value + b.VolumeSourceApplyConfiguration.ISCSI = value return b } @@ -107,7 +107,7 @@ func (b *VolumeApplyConfiguration) WithISCSI(value *ISCSIVolumeSourceApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Glusterfs field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Glusterfs = value + b.VolumeSourceApplyConfiguration.Glusterfs = value return b } @@ -115,7 +115,7 @@ func (b *VolumeApplyConfiguration) WithGlusterfs(value *GlusterfsVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PersistentVolumeClaim field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVolumeClaimVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PersistentVolumeClaim = value + b.VolumeSourceApplyConfiguration.PersistentVolumeClaim = value return b } @@ -123,7 +123,7 @@ func (b *VolumeApplyConfiguration) WithPersistentVolumeClaim(value *PersistentVo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RBD field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.RBD = value + b.VolumeSourceApplyConfiguration.RBD = value return b } @@ -131,7 +131,7 @@ func (b *VolumeApplyConfiguration) WithRBD(value *RBDVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FlexVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.FlexVolume = value + b.VolumeSourceApplyConfiguration.FlexVolume = value return b } @@ -139,7 +139,7 @@ func (b *VolumeApplyConfiguration) WithFlexVolume(value *FlexVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Cinder field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Cinder = value + b.VolumeSourceApplyConfiguration.Cinder = value return b } @@ -147,7 +147,7 @@ func (b *VolumeApplyConfiguration) WithCinder(value *CinderVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CephFS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.CephFS = value + b.VolumeSourceApplyConfiguration.CephFS = value return b } @@ -155,7 +155,7 @@ func (b *VolumeApplyConfiguration) WithCephFS(value *CephFSVolumeSourceApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Flocker field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Flocker = value + b.VolumeSourceApplyConfiguration.Flocker = value return b } @@ -163,7 +163,7 @@ func (b *VolumeApplyConfiguration) WithFlocker(value *FlockerVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DownwardAPI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.DownwardAPI = value + b.VolumeSourceApplyConfiguration.DownwardAPI = value return b } @@ -171,7 +171,7 @@ func (b *VolumeApplyConfiguration) WithDownwardAPI(value *DownwardAPIVolumeSourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FC field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.FC = value + b.VolumeSourceApplyConfiguration.FC = value return b } @@ -179,7 +179,7 @@ func (b *VolumeApplyConfiguration) WithFC(value *FCVolumeSourceApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureFile field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AzureFile = value + b.VolumeSourceApplyConfiguration.AzureFile = value return b } @@ -187,7 +187,7 @@ func (b *VolumeApplyConfiguration) WithAzureFile(value *AzureFileVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ConfigMap field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ConfigMap = value + b.VolumeSourceApplyConfiguration.ConfigMap = value return b } @@ -195,7 +195,7 @@ func (b *VolumeApplyConfiguration) WithConfigMap(value *ConfigMapVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VsphereVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.VsphereVolume = value + b.VolumeSourceApplyConfiguration.VsphereVolume = value return b } @@ -203,7 +203,7 @@ func (b *VolumeApplyConfiguration) WithVsphereVolume(value *VsphereVirtualDiskVo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Quobyte field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Quobyte = value + b.VolumeSourceApplyConfiguration.Quobyte = value return b } @@ -211,7 +211,7 @@ func (b *VolumeApplyConfiguration) WithQuobyte(value *QuobyteVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AzureDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.AzureDisk = value + b.VolumeSourceApplyConfiguration.AzureDisk = value return b } @@ -219,7 +219,7 @@ func (b *VolumeApplyConfiguration) WithAzureDisk(value *AzureDiskVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PhotonPersistentDisk field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersistentDiskVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PhotonPersistentDisk = value + b.VolumeSourceApplyConfiguration.PhotonPersistentDisk = value return b } @@ -227,7 +227,7 @@ func (b *VolumeApplyConfiguration) WithPhotonPersistentDisk(value *PhotonPersist // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Projected field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Projected = value + b.VolumeSourceApplyConfiguration.Projected = value return b } @@ -235,7 +235,7 @@ func (b *VolumeApplyConfiguration) WithProjected(value *ProjectedVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PortworxVolume field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.PortworxVolume = value + b.VolumeSourceApplyConfiguration.PortworxVolume = value return b } @@ -243,7 +243,7 @@ func (b *VolumeApplyConfiguration) WithPortworxVolume(value *PortworxVolumeSourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ScaleIO field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.ScaleIO = value + b.VolumeSourceApplyConfiguration.ScaleIO = value return b } @@ -251,7 +251,7 @@ func (b *VolumeApplyConfiguration) WithScaleIO(value *ScaleIOVolumeSourceApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the StorageOS field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.StorageOS = value + b.VolumeSourceApplyConfiguration.StorageOS = value return b } @@ -259,7 +259,7 @@ func (b *VolumeApplyConfiguration) WithStorageOS(value *StorageOSVolumeSourceApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CSI field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.CSI = value + b.VolumeSourceApplyConfiguration.CSI = value return b } @@ -267,6 +267,14 @@ func (b *VolumeApplyConfiguration) WithCSI(value *CSIVolumeSourceApplyConfigurat // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Ephemeral field is set to the value of the last call. func (b *VolumeApplyConfiguration) WithEphemeral(value *EphemeralVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { - b.Ephemeral = value + b.VolumeSourceApplyConfiguration.Ephemeral = value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeApplyConfiguration { + b.VolumeSourceApplyConfiguration.Image = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go index ea18ca8d9..0bc52aad2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// VolumeDeviceApplyConfiguration represents an declarative configuration of the VolumeDevice type for use +// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use // with apply. type VolumeDeviceApplyConfiguration struct { Name *string `json:"name,omitempty"` DevicePath *string `json:"devicePath,omitempty"` } -// VolumeDeviceApplyConfiguration constructs an declarative configuration of the VolumeDevice type for use with +// VolumeDeviceApplyConfiguration constructs a declarative configuration of the VolumeDevice type for use with // apply. func VolumeDevice() *VolumeDeviceApplyConfiguration { return &VolumeDeviceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go index 358658350..ccd426a0c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// VolumeMountApplyConfiguration represents an declarative configuration of the VolumeMount type for use +// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use // with apply. type VolumeMountApplyConfiguration struct { - Name *string `json:"name,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - SubPath *string `json:"subPath,omitempty"` - MountPropagation *v1.MountPropagationMode `json:"mountPropagation,omitempty"` - SubPathExpr *string `json:"subPathExpr,omitempty"` + Name *string `json:"name,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + SubPath *string `json:"subPath,omitempty"` + MountPropagation *corev1.MountPropagationMode `json:"mountPropagation,omitempty"` + SubPathExpr *string `json:"subPathExpr,omitempty"` } -// VolumeMountApplyConfiguration constructs an declarative configuration of the VolumeMount type for use with +// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with // apply. func VolumeMount() *VolumeMountApplyConfiguration { return &VolumeMountApplyConfiguration{} @@ -59,7 +59,7 @@ func (b *VolumeMountApplyConfiguration) WithReadOnly(value bool) *VolumeMountApp // WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RecursiveReadOnly field is set to the value of the last call. -func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { +func (b *VolumeMountApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountApplyConfiguration { b.RecursiveReadOnly = &value return b } @@ -83,7 +83,7 @@ func (b *VolumeMountApplyConfiguration) WithSubPath(value string) *VolumeMountAp // WithMountPropagation sets the MountPropagation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the MountPropagation field is set to the value of the last call. -func (b *VolumeMountApplyConfiguration) WithMountPropagation(value v1.MountPropagationMode) *VolumeMountApplyConfiguration { +func (b *VolumeMountApplyConfiguration) WithMountPropagation(value corev1.MountPropagationMode) *VolumeMountApplyConfiguration { b.MountPropagation = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go index c3d187fdf..f55c40723 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// VolumeMountStatusApplyConfiguration represents an declarative configuration of the VolumeMountStatus type for use +// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use // with apply. type VolumeMountStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - MountPath *string `json:"mountPath,omitempty"` - ReadOnly *bool `json:"readOnly,omitempty"` - RecursiveReadOnly *v1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + MountPath *string `json:"mountPath,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` + RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"` } -// VolumeMountStatusApplyConfiguration constructs an declarative configuration of the VolumeMountStatus type for use with +// VolumeMountStatusApplyConfiguration constructs a declarative configuration of the VolumeMountStatus type for use with // apply. func VolumeMountStatus() *VolumeMountStatusApplyConfiguration { return &VolumeMountStatusApplyConfiguration{} @@ -64,7 +64,7 @@ func (b *VolumeMountStatusApplyConfiguration) WithReadOnly(value bool) *VolumeMo // WithRecursiveReadOnly sets the RecursiveReadOnly field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the RecursiveReadOnly field is set to the value of the last call. -func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value v1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { +func (b *VolumeMountStatusApplyConfiguration) WithRecursiveReadOnly(value corev1.RecursiveReadOnlyMode) *VolumeMountStatusApplyConfiguration { b.RecursiveReadOnly = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go index 32bfd8292..9198c25dc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeAffinityApplyConfiguration represents an declarative configuration of the VolumeNodeAffinity type for use +// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use // with apply. type VolumeNodeAffinityApplyConfiguration struct { Required *NodeSelectorApplyConfiguration `json:"required,omitempty"` } -// VolumeNodeAffinityApplyConfiguration constructs an declarative configuration of the VolumeNodeAffinity type for use with +// VolumeNodeAffinityApplyConfiguration constructs a declarative configuration of the VolumeNodeAffinity type for use with // apply. func VolumeNodeAffinity() *VolumeNodeAffinityApplyConfiguration { return &VolumeNodeAffinityApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index a2ef0a994..c14e9fe69 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeProjectionApplyConfiguration represents an declarative configuration of the VolumeProjection type for use +// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use // with apply. type VolumeProjectionApplyConfiguration struct { Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"` @@ -28,7 +28,7 @@ type VolumeProjectionApplyConfiguration struct { ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } -// VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with +// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with // apply. func VolumeProjection() *VolumeProjectionApplyConfiguration { return &VolumeProjectionApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go index 89ad1da8b..5c83ae6d4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use +// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use // with apply. type VolumeResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` + Limits *corev1.ResourceList `json:"limits,omitempty"` + Requests *corev1.ResourceList `json:"requests,omitempty"` } -// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with +// VolumeResourceRequirementsApplyConfiguration constructs a declarative configuration of the VolumeResourceRequirements type for use with // apply. func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { return &VolumeResourceRequirementsApplyConfiguration{} @@ -38,7 +38,7 @@ func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration // WithLimits sets the Limits field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Limits field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { +func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { b.Limits = &value return b } @@ -46,7 +46,7 @@ func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.Resou // WithRequests sets the Requests field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Requests field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { +func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value corev1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { b.Requests = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go index 4a8d316dd..aeead953c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeSourceApplyConfiguration represents an declarative configuration of the VolumeSource type for use +// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use // with apply. type VolumeSourceApplyConfiguration struct { HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"` @@ -50,9 +50,10 @@ type VolumeSourceApplyConfiguration struct { StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"` CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"` Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"` + Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"` } -// VolumeSourceApplyConfiguration constructs an declarative configuration of the VolumeSource type for use with +// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with // apply. func VolumeSource() *VolumeSourceApplyConfiguration { return &VolumeSourceApplyConfiguration{} @@ -289,3 +290,11 @@ func (b *VolumeSourceApplyConfiguration) WithEphemeral(value *EphemeralVolumeSou b.Ephemeral = value return b } + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *VolumeSourceApplyConfiguration) WithImage(value *ImageVolumeSourceApplyConfiguration) *VolumeSourceApplyConfiguration { + b.Image = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go index ff3e3e27d..ea8fd8d62 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VsphereVirtualDiskVolumeSourceApplyConfiguration represents an declarative configuration of the VsphereVirtualDiskVolumeSource type for use +// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use // with apply. type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { VolumePath *string `json:"volumePath,omitempty"` @@ -27,7 +27,7 @@ type VsphereVirtualDiskVolumeSourceApplyConfiguration struct { StoragePolicyID *string `json:"storagePolicyID,omitempty"` } -// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs an declarative configuration of the VsphereVirtualDiskVolumeSource type for use with +// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with // apply. func VsphereVirtualDiskVolumeSource() *VsphereVirtualDiskVolumeSourceApplyConfiguration { return &VsphereVirtualDiskVolumeSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go index eb99d06ff..c49ef93eb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// WeightedPodAffinityTermApplyConfiguration represents an declarative configuration of the WeightedPodAffinityTerm type for use +// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use // with apply. type WeightedPodAffinityTermApplyConfiguration struct { Weight *int32 `json:"weight,omitempty"` PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"` } -// WeightedPodAffinityTermApplyConfiguration constructs an declarative configuration of the WeightedPodAffinityTerm type for use with +// WeightedPodAffinityTermApplyConfiguration constructs a declarative configuration of the WeightedPodAffinityTerm type for use with // apply. func WeightedPodAffinityTerm() *WeightedPodAffinityTermApplyConfiguration { return &WeightedPodAffinityTermApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go index 20692e014..bb37a500b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// WindowsSecurityContextOptionsApplyConfiguration represents an declarative configuration of the WindowsSecurityContextOptions type for use +// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use // with apply. type WindowsSecurityContextOptionsApplyConfiguration struct { GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"` @@ -27,7 +27,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct { HostProcess *bool `json:"hostProcess,omitempty"` } -// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with +// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with // apply. func WindowsSecurityContextOptions() *WindowsSecurityContextOptionsApplyConfiguration { return &WindowsSecurityContextOptionsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go index d8c2359a3..df45a6fb8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -35,7 +35,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go index 68c25dd57..20f0b9712 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go index 6eb9f21a5..d2d0f6776 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go index c71295600..b55c868cb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Port *int32 `json:"port,omitempty"` - AppProtocol *string `json:"appProtocol,omitempty"` + Name *string `json:"name,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Port *int32 `json:"port,omitempty"` + AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} @@ -48,7 +48,7 @@ func (b *EndpointPortApplyConfiguration) WithName(value string) *EndpointPortApp // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *EndpointPortApplyConfiguration) WithProtocol(value v1.Protocol) *EndpointPortApplyConfiguration { +func (b *EndpointPortApplyConfiguration) WithProtocol(value corev1.Protocol) *EndpointPortApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go index 640613753..a27c0ab1a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go @@ -20,24 +20,24 @@ package v1 import ( discoveryv1 "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - AddressType *discoveryv1.AddressType `json:"addressType,omitempty"` - Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` - Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + AddressType *discoveryv1.AddressType `json:"addressType,omitempty"` + Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` + Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go index 192a5ad2e..505d11ae2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go index 724c2d007..5d87dae72 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// EndpointApplyConfiguration represents an declarative configuration of the Endpoint type for use +// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use // with apply. type EndpointApplyConfiguration struct { Addresses []string `json:"addresses,omitempty"` @@ -34,7 +34,7 @@ type EndpointApplyConfiguration struct { Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"` } -// EndpointApplyConfiguration constructs an declarative configuration of the Endpoint type for use with +// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with // apply. func Endpoint() *EndpointApplyConfiguration { return &EndpointApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go index bc0438f90..13f5fa557 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// EndpointConditionsApplyConfiguration represents an declarative configuration of the EndpointConditions type for use +// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use // with apply. type EndpointConditionsApplyConfiguration struct { Ready *bool `json:"ready,omitempty"` @@ -26,7 +26,7 @@ type EndpointConditionsApplyConfiguration struct { Terminating *bool `json:"terminating,omitempty"` } -// EndpointConditionsApplyConfiguration constructs an declarative configuration of the EndpointConditions type for use with +// EndpointConditionsApplyConfiguration constructs a declarative configuration of the EndpointConditions type for use with // apply. func EndpointConditions() *EndpointConditionsApplyConfiguration { return &EndpointConditionsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go index 41d80206b..99f69027a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// EndpointHintsApplyConfiguration represents an declarative configuration of the EndpointHints type for use +// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use // with apply. type EndpointHintsApplyConfiguration struct { ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"` } -// EndpointHintsApplyConfiguration constructs an declarative configuration of the EndpointHints type for use with +// EndpointHintsApplyConfiguration constructs a declarative configuration of the EndpointHints type for use with // apply. func EndpointHints() *EndpointHintsApplyConfiguration { return &EndpointHintsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go index 9a3a31b96..07cfc684b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// EndpointPortApplyConfiguration represents an declarative configuration of the EndpointPort type for use +// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use // with apply. type EndpointPortApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -31,7 +31,7 @@ type EndpointPortApplyConfiguration struct { AppProtocol *string `json:"appProtocol,omitempty"` } -// EndpointPortApplyConfiguration constructs an declarative configuration of the EndpointPort type for use with +// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with // apply. func EndpointPort() *EndpointPortApplyConfiguration { return &EndpointPortApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go index 74a24773c..46133ea32 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/discovery/v1beta1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,17 +27,17 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EndpointSliceApplyConfiguration represents an declarative configuration of the EndpointSlice type for use +// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use // with apply. type EndpointSliceApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - AddressType *v1beta1.AddressType `json:"addressType,omitempty"` + AddressType *discoveryv1beta1.AddressType `json:"addressType,omitempty"` Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"` Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"` } -// EndpointSlice constructs an declarative configuration of the EndpointSlice type for use with +// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with // apply. func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { b := &EndpointSliceApplyConfiguration{} @@ -59,18 +59,18 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { +func ExtractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { return extractEndpointSlice(endpointSlice, fieldManager, "") } // ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEndpointSliceStatus(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { +func ExtractEndpointSliceStatus(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) { return extractEndpointSlice(endpointSlice, fieldManager, "status") } -func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { +func extractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) { b := &EndpointSliceApplyConfiguration{} err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *EndpointSliceApplyConfiguration) WithKind(value string) *EndpointSliceA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *EndpointSliceApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *EndpointSliceApplyConfiguration) WithAPIVersion(value string) *Endpoint // If called multiple times, the Name field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *EndpointSliceApplyConfiguration) WithName(value string) *EndpointSliceA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *EndpointSliceApplyConfiguration) WithGenerateName(value string) *Endpoi // If called multiple times, the Namespace field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *EndpointSliceApplyConfiguration) WithNamespace(value string) *EndpointS // If called multiple times, the UID field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *EndpointSliceApplyConfiguration) WithUID(value types.UID) *EndpointSlic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *EndpointSliceApplyConfiguration) WithResourceVersion(value string) *End // If called multiple times, the Generation field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *EndpointSliceApplyConfiguration) WithGeneration(value int64) *EndpointS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *EndpointSliceApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *EndpointSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *EndpointSliceApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *EndpointSliceApplyConfiguration) WithAnnotations(entries map[string]string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *EndpointSliceApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *EndpointSliceApplyConfiguration) WithFinalizers(values ...string) *EndpointSliceApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,7 +245,7 @@ func (b *EndpointSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExis // WithAddressType sets the AddressType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the AddressType field is set to the value of the last call. -func (b *EndpointSliceApplyConfiguration) WithAddressType(value v1beta1.AddressType) *EndpointSliceApplyConfiguration { +func (b *EndpointSliceApplyConfiguration) WithAddressType(value discoveryv1beta1.AddressType) *EndpointSliceApplyConfiguration { b.AddressType = &value return b } @@ -275,3 +275,9 @@ func (b *EndpointSliceApplyConfiguration) WithPorts(values ...*EndpointPortApply } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EndpointSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go index 4d1455ed3..4af09cc49 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// ForZoneApplyConfiguration represents an declarative configuration of the ForZone type for use +// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use // with apply. type ForZoneApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ForZoneApplyConfiguration constructs an declarative configuration of the ForZone type for use with +// ForZoneApplyConfiguration constructs a declarative configuration of the ForZone type for use with // apply. func ForZone() *ForZoneApplyConfiguration { return &ForZoneApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go index 767e3dfc7..64896c3d8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go @@ -19,37 +19,37 @@ limitations under the License. package v1 import ( - apieventsv1 "k8s.io/api/events/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + eventsv1 "k8s.io/api/events/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" corev1 "k8s.io/client-go/applyconfigurations/core/v1" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - EventTime *metav1.MicroTime `json:"eventTime,omitempty"` - Series *EventSeriesApplyConfiguration `json:"series,omitempty"` - ReportingController *string `json:"reportingController,omitempty"` - ReportingInstance *string `json:"reportingInstance,omitempty"` - Action *string `json:"action,omitempty"` - Reason *string `json:"reason,omitempty"` - Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"` - Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"` - Note *string `json:"note,omitempty"` - Type *string `json:"type,omitempty"` - DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"` - DeprecatedFirstTimestamp *metav1.Time `json:"deprecatedFirstTimestamp,omitempty"` - DeprecatedLastTimestamp *metav1.Time `json:"deprecatedLastTimestamp,omitempty"` - DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` -} - -// Event constructs an declarative configuration of the Event type for use with + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"` + Series *EventSeriesApplyConfiguration `json:"series,omitempty"` + ReportingController *string `json:"reportingController,omitempty"` + ReportingInstance *string `json:"reportingInstance,omitempty"` + Action *string `json:"action,omitempty"` + Reason *string `json:"reason,omitempty"` + Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"` + Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"` + Note *string `json:"note,omitempty"` + Type *string `json:"type,omitempty"` + DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"` + DeprecatedFirstTimestamp *apismetav1.Time `json:"deprecatedFirstTimestamp,omitempty"` + DeprecatedLastTimestamp *apismetav1.Time `json:"deprecatedLastTimestamp,omitempty"` + DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` +} + +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -71,18 +71,18 @@ func Event(name, namespace string) *EventApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEvent(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEvent(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "") } // ExtractEventStatus is the same as ExtractEvent except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEventStatus(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { +func ExtractEventStatus(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) { return extractEvent(event, fieldManager, "status") } -func extractEvent(event *apieventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { +func extractEvent(event *eventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) { b := &EventApplyConfiguration{} err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource) if err != nil { @@ -100,7 +100,7 @@ func extractEvent(event *apieventsv1.Event, fieldManager string, subresource str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -162,25 +162,25 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -226,13 +226,13 @@ func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *Ev // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -243,21 +243,21 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EventApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithEventTime sets the EventTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the EventTime field is set to the value of the last call. -func (b *EventApplyConfiguration) WithEventTime(value metav1.MicroTime) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithEventTime(value apismetav1.MicroTime) *EventApplyConfiguration { b.EventTime = &value return b } @@ -345,7 +345,7 @@ func (b *EventApplyConfiguration) WithDeprecatedSource(value *corev1.EventSource // WithDeprecatedFirstTimestamp sets the DeprecatedFirstTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeprecatedFirstTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.DeprecatedFirstTimestamp = &value return b } @@ -353,7 +353,7 @@ func (b *EventApplyConfiguration) WithDeprecatedFirstTimestamp(value metav1.Time // WithDeprecatedLastTimestamp sets the DeprecatedLastTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeprecatedLastTimestamp field is set to the value of the last call. -func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value metav1.Time) *EventApplyConfiguration { +func (b *EventApplyConfiguration) WithDeprecatedLastTimestamp(value apismetav1.Time) *EventApplyConfiguration { b.DeprecatedLastTimestamp = &value return b } @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go index e66fb4127..c90954bcc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { - Count *int32 `json:"count,omitempty"` - LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` + Count *int32 `json:"count,omitempty"` + LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} @@ -46,7 +46,7 @@ func (b *EventSeriesApplyConfiguration) WithCount(value int32) *EventSeriesApply // WithLastObservedTime sets the LastObservedTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastObservedTime field is set to the value of the last call. -func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value v1.MicroTime) *EventSeriesApplyConfiguration { +func (b *EventSeriesApplyConfiguration) WithLastObservedTime(value metav1.MicroTime) *EventSeriesApplyConfiguration { b.LastObservedTime = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go index cfc4a851f..dc302e395 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EventApplyConfiguration represents an declarative configuration of the Event type for use +// EventApplyConfiguration represents a declarative configuration of the Event type for use // with apply. type EventApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -49,7 +49,7 @@ type EventApplyConfiguration struct { DeprecatedCount *int32 `json:"deprecatedCount,omitempty"` } -// Event constructs an declarative configuration of the Event type for use with +// Event constructs a declarative configuration of the Event type for use with // apply. func Event(name, namespace string) *EventApplyConfiguration { b := &EventApplyConfiguration{} @@ -100,7 +100,7 @@ func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -108,7 +108,7 @@ func (b *EventApplyConfiguration) WithKind(value string) *EventApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -117,7 +117,7 @@ func (b *EventApplyConfiguration) WithAPIVersion(value string) *EventApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -126,7 +126,7 @@ func (b *EventApplyConfiguration) WithName(value string) *EventApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -135,7 +135,7 @@ func (b *EventApplyConfiguration) WithGenerateName(value string) *EventApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -144,7 +144,7 @@ func (b *EventApplyConfiguration) WithNamespace(value string) *EventApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -153,7 +153,7 @@ func (b *EventApplyConfiguration) WithUID(value types.UID) *EventApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -162,7 +162,7 @@ func (b *EventApplyConfiguration) WithResourceVersion(value string) *EventApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -171,7 +171,7 @@ func (b *EventApplyConfiguration) WithGeneration(value int64) *EventApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -180,7 +180,7 @@ func (b *EventApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -189,7 +189,7 @@ func (b *EventApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Even // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -199,11 +199,11 @@ func (b *EventApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *E // overwriting an existing map entries in Labels field with the same key. func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -214,11 +214,11 @@ func (b *EventApplyConfiguration) WithLabels(entries map[string]string) *EventAp // overwriting an existing map entries in Annotations field with the same key. func (b *EventApplyConfiguration) WithAnnotations(entries map[string]string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -232,7 +232,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -243,7 +243,7 @@ func (b *EventApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *EventApplyConfiguration) WithFinalizers(values ...string) *EventApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -365,3 +365,9 @@ func (b *EventApplyConfiguration) WithDeprecatedCount(value int32) *EventApplyCo b.DeprecatedCount = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EventApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go index 640a26517..75d936e8b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventSeriesApplyConfiguration represents an declarative configuration of the EventSeries type for use +// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use // with apply. type EventSeriesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"` } -// EventSeriesApplyConfiguration constructs an declarative configuration of the EventSeries type for use with +// EventSeriesApplyConfiguration constructs a declarative configuration of the EventSeries type for use with // apply. func EventSeries() *EventSeriesApplyConfiguration { return &EventSeriesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go index eae399d32..a75e38bfb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetApplyConfiguration represents an declarative configuration of the DaemonSet type for use +// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use // with apply. type DaemonSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DaemonSetApplyConfiguration struct { Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"` } -// DaemonSet constructs an declarative configuration of the DaemonSet type for use with +// DaemonSet constructs a declarative configuration of the DaemonSet type for use with // apply. func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration { b := &DaemonSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DaemonSetApplyConfiguration) WithKind(value string) *DaemonSetApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DaemonSetApplyConfiguration) WithAPIVersion(value string) *DaemonSetApp // If called multiple times, the Name field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DaemonSetApplyConfiguration) WithName(value string) *DaemonSetApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DaemonSetApplyConfiguration) WithGenerateName(value string) *DaemonSetA // If called multiple times, the Namespace field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DaemonSetApplyConfiguration) WithNamespace(value string) *DaemonSetAppl // If called multiple times, the UID field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DaemonSetApplyConfiguration) WithUID(value types.UID) *DaemonSetApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DaemonSetApplyConfiguration) WithResourceVersion(value string) *DaemonS // If called multiple times, the Generation field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DaemonSetApplyConfiguration) WithGeneration(value int64) *DaemonSetAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DaemonSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DaemonSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DaemonSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DaemonSetApplyConfiguration) WithLabels(entries map[string]string) *Dae // overwriting an existing map entries in Annotations field with the same key. func (b *DaemonSetApplyConfiguration) WithAnnotations(entries map[string]string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DaemonSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *DaemonSetApplyConfiguration) WithFinalizers(values ...string) *DaemonSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *DaemonSetApplyConfiguration) WithStatus(value *DaemonSetStatusApplyConf b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DaemonSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go index bbf718f0f..0312a3099 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go @@ -20,21 +20,21 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DaemonSetConditionApplyConfiguration represents an declarative configuration of the DaemonSetCondition type for use +// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use // with apply. type DaemonSetConditionApplyConfiguration struct { - Type *v1beta1.DaemonSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.DaemonSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DaemonSetConditionApplyConfiguration constructs an declarative configuration of the DaemonSetCondition type for use with +// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with // apply. func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { return &DaemonSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func DaemonSetCondition() *DaemonSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetConditionApplyConfiguration) WithType(value v1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { +func (b *DaemonSetConditionApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetConditionType) *DaemonSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go index b5d7a0c16..d62896918 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DaemonSetSpecApplyConfiguration represents an declarative configuration of the DaemonSetSpec type for use +// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use // with apply. type DaemonSetSpecApplyConfiguration struct { Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` @@ -34,7 +34,7 @@ type DaemonSetSpecApplyConfiguration struct { RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` } -// DaemonSetSpecApplyConfiguration constructs an declarative configuration of the DaemonSetSpec type for use with +// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with // apply. func DaemonSetSpec() *DaemonSetSpecApplyConfiguration { return &DaemonSetSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go index be6b3b285..373f9ef97 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DaemonSetStatusApplyConfiguration represents an declarative configuration of the DaemonSetStatus type for use +// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use // with apply. type DaemonSetStatusApplyConfiguration struct { CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"` @@ -33,7 +33,7 @@ type DaemonSetStatusApplyConfiguration struct { Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// DaemonSetStatusApplyConfiguration constructs an declarative configuration of the DaemonSetStatus type for use with +// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with // apply. func DaemonSetStatus() *DaemonSetStatusApplyConfiguration { return &DaemonSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go index 2c827e62d..d3403605f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) -// DaemonSetUpdateStrategyApplyConfiguration represents an declarative configuration of the DaemonSetUpdateStrategy type for use +// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use // with apply. type DaemonSetUpdateStrategyApplyConfiguration struct { - Type *v1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` - RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` + Type *extensionsv1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"` + RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DaemonSetUpdateStrategyApplyConfiguration constructs an declarative configuration of the DaemonSetUpdateStrategy type for use with +// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with // apply. func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { return &DaemonSetUpdateStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DaemonSetUpdateStrategy() *DaemonSetUpdateStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value v1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { +func (b *DaemonSetUpdateStrategyApplyConfiguration) WithType(value extensionsv1beta1.DaemonSetUpdateStrategyType) *DaemonSetUpdateStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go index 878083f82..94fac18c6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentApplyConfiguration represents an declarative configuration of the Deployment type for use +// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use // with apply. type DeploymentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type DeploymentApplyConfiguration struct { Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"` } -// Deployment constructs an declarative configuration of the Deployment type for use with +// Deployment constructs a declarative configuration of the Deployment type for use with // apply. func Deployment(name, namespace string) *DeploymentApplyConfiguration { b := &DeploymentApplyConfiguration{} @@ -87,7 +87,7 @@ func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *DeploymentApplyConfiguration) WithKind(value string) *DeploymentApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *DeploymentApplyConfiguration) WithAPIVersion(value string) *DeploymentA // If called multiple times, the Name field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *DeploymentApplyConfiguration) WithName(value string) *DeploymentApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *DeploymentApplyConfiguration) WithGenerateName(value string) *Deploymen // If called multiple times, the Namespace field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *DeploymentApplyConfiguration) WithNamespace(value string) *DeploymentAp // If called multiple times, the UID field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *DeploymentApplyConfiguration) WithUID(value types.UID) *DeploymentApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *DeploymentApplyConfiguration) WithResourceVersion(value string) *Deploy // If called multiple times, the Generation field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *DeploymentApplyConfiguration) WithGeneration(value int64) *DeploymentAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *DeploymentApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *DeploymentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *DeploymentApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *DeploymentApplyConfiguration) WithLabels(entries map[string]string) *De // overwriting an existing map entries in Annotations field with the same key. func (b *DeploymentApplyConfiguration) WithAnnotations(entries map[string]string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *DeploymentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *DeploymentApplyConfiguration) WithFinalizers(values ...string) *DeploymentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *DeploymentApplyConfiguration) WithStatus(value *DeploymentStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeploymentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go index d8a214b7f..2b64508d9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go @@ -20,22 +20,22 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeploymentConditionApplyConfiguration represents an declarative configuration of the DeploymentCondition type for use +// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use // with apply. type DeploymentConditionApplyConfiguration struct { - Type *v1beta1.DeploymentConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.DeploymentConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// DeploymentConditionApplyConfiguration constructs an declarative configuration of the DeploymentCondition type for use with +// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with // apply. func DeploymentCondition() *DeploymentConditionApplyConfiguration { return &DeploymentConditionApplyConfiguration{} @@ -44,7 +44,7 @@ func DeploymentCondition() *DeploymentConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentConditionApplyConfiguration) WithType(value v1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { +func (b *DeploymentConditionApplyConfiguration) WithType(value extensionsv1beta1.DeploymentConditionType) *DeploymentConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go index 5e18476bd..5531c756f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// DeploymentSpecApplyConfiguration represents an declarative configuration of the DeploymentSpec type for use +// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use // with apply. type DeploymentSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -37,7 +37,7 @@ type DeploymentSpecApplyConfiguration struct { ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` } -// DeploymentSpecApplyConfiguration constructs an declarative configuration of the DeploymentSpec type for use with +// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with // apply. func DeploymentSpec() *DeploymentSpecApplyConfiguration { return &DeploymentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go index f8d1cf5d2..adc023a34 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// DeploymentStatusApplyConfiguration represents an declarative configuration of the DeploymentStatus type for use +// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use // with apply. type DeploymentStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -31,7 +31,7 @@ type DeploymentStatusApplyConfiguration struct { CollisionCount *int32 `json:"collisionCount,omitempty"` } -// DeploymentStatusApplyConfiguration constructs an declarative configuration of the DeploymentStatus type for use with +// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with // apply. func DeploymentStatus() *DeploymentStatusApplyConfiguration { return &DeploymentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go index 7c17b4072..b142b0deb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) -// DeploymentStrategyApplyConfiguration represents an declarative configuration of the DeploymentStrategy type for use +// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use // with apply. type DeploymentStrategyApplyConfiguration struct { - Type *v1beta1.DeploymentStrategyType `json:"type,omitempty"` + Type *extensionsv1beta1.DeploymentStrategyType `json:"type,omitempty"` RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"` } -// DeploymentStrategyApplyConfiguration constructs an declarative configuration of the DeploymentStrategy type for use with +// DeploymentStrategyApplyConfiguration constructs a declarative configuration of the DeploymentStrategy type for use with // apply. func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { return &DeploymentStrategyApplyConfiguration{} @@ -38,7 +38,7 @@ func DeploymentStrategy() *DeploymentStrategyApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *DeploymentStrategyApplyConfiguration) WithType(value v1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { +func (b *DeploymentStrategyApplyConfiguration) WithType(value extensionsv1beta1.DeploymentStrategyType) *DeploymentStrategyApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go index 361605d8c..32e0c8b1d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1beta1.PathType `json:"pathType,omitempty"` + PathType *extensionsv1beta1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value extensionsv1beta1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go index 3137bc5eb..124545223 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go index 46c541048..8cc05cc62 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -87,7 +87,7 @@ func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go index f19c2f2ee..9d386f160 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go index 20bf63780..12dbc3596 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go index e16dd2363..e896ab341 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go index 083653797..4ee3f0161 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go index 015541eeb..809fada92 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go index 2d03c7b13..4a6412475 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go index 1ab4d8bb7..58fbde8b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go index faa7e2446..3aed61688 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go index 8ca93a0bc..63648cd46 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go index a90d3b220..4a671130b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go index 27ea5d9dd..5ce0eb31f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type NetworkPolicyApplyConfiguration struct { Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldM // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPo // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go index 6335ec375..ca3e174f9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go index 2ecc4c8c6..160713720 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go index c69b28122..8a0fa5741 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -30,7 +30,7 @@ type NetworkPolicyPeerApplyConfiguration struct { IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go index 0140d771b..6bc1c1977 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go @@ -23,7 +23,7 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { Protocol *v1.Protocol `json:"protocol,omitempty"` @@ -31,7 +31,7 @@ type NetworkPolicyPortApplyConfiguration struct { EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go index 179e4bd02..4454329c5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` @@ -32,7 +32,7 @@ type NetworkPolicySpecApplyConfiguration struct { PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go index b2afc835d..97a972f53 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetApplyConfiguration represents an declarative configuration of the ReplicaSet type for use +// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use // with apply. type ReplicaSetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ReplicaSetApplyConfiguration struct { Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"` } -// ReplicaSet constructs an declarative configuration of the ReplicaSet type for use with +// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with // apply. func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration { b := &ReplicaSetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *ReplicaSetApplyConfiguration) WithKind(value string) *ReplicaSetApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *ReplicaSetApplyConfiguration) WithAPIVersion(value string) *ReplicaSetA // If called multiple times, the Name field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *ReplicaSetApplyConfiguration) WithName(value string) *ReplicaSetApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *ReplicaSetApplyConfiguration) WithGenerateName(value string) *ReplicaSe // If called multiple times, the Namespace field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *ReplicaSetApplyConfiguration) WithNamespace(value string) *ReplicaSetAp // If called multiple times, the UID field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *ReplicaSetApplyConfiguration) WithUID(value types.UID) *ReplicaSetApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *ReplicaSetApplyConfiguration) WithResourceVersion(value string) *Replic // If called multiple times, the Generation field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *ReplicaSetApplyConfiguration) WithGeneration(value int64) *ReplicaSetAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *ReplicaSetApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *ReplicaSetApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *ReplicaSetApplyConfiguration) WithLabels(entries map[string]string) *Re // overwriting an existing map entries in Annotations field with the same key. func (b *ReplicaSetApplyConfiguration) WithAnnotations(entries map[string]string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *ReplicaSetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *ReplicaSetApplyConfiguration) WithFinalizers(values ...string) *ReplicaSetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *ReplicaSetApplyConfiguration) WithStatus(value *ReplicaSetStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ReplicaSetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go index b71736517..540079fe5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go @@ -20,21 +20,21 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ReplicaSetConditionApplyConfiguration represents an declarative configuration of the ReplicaSetCondition type for use +// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use // with apply. type ReplicaSetConditionApplyConfiguration struct { - Type *v1beta1.ReplicaSetConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *extensionsv1beta1.ReplicaSetConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// ReplicaSetConditionApplyConfiguration constructs an declarative configuration of the ReplicaSetCondition type for use with +// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with // apply. func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { return &ReplicaSetConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func ReplicaSetCondition() *ReplicaSetConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *ReplicaSetConditionApplyConfiguration) WithType(value v1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { +func (b *ReplicaSetConditionApplyConfiguration) WithType(value extensionsv1beta1.ReplicaSetConditionType) *ReplicaSetConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go index 5d0c57014..27653dd1a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ReplicaSetSpecApplyConfiguration represents an declarative configuration of the ReplicaSetSpec type for use +// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use // with apply. type ReplicaSetSpecApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -32,7 +32,7 @@ type ReplicaSetSpecApplyConfiguration struct { Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"` } -// ReplicaSetSpecApplyConfiguration constructs an declarative configuration of the ReplicaSetSpec type for use with +// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with // apply. func ReplicaSetSpec() *ReplicaSetSpecApplyConfiguration { return &ReplicaSetSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go index 45dc4bf31..9a5b468a3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ReplicaSetStatusApplyConfiguration represents an declarative configuration of the ReplicaSetStatus type for use +// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use // with apply. type ReplicaSetStatusApplyConfiguration struct { Replicas *int32 `json:"replicas,omitempty"` @@ -29,7 +29,7 @@ type ReplicaSetStatusApplyConfiguration struct { Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"` } -// ReplicaSetStatusApplyConfiguration constructs an declarative configuration of the ReplicaSetStatus type for use with +// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with // apply. func ReplicaSetStatus() *ReplicaSetStatusApplyConfiguration { return &ReplicaSetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go index 131e57a39..775f82eef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// RollbackConfigApplyConfiguration represents an declarative configuration of the RollbackConfig type for use +// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use // with apply. type RollbackConfigApplyConfiguration struct { Revision *int64 `json:"revision,omitempty"` } -// RollbackConfigApplyConfiguration constructs an declarative configuration of the RollbackConfig type for use with +// RollbackConfigApplyConfiguration constructs a declarative configuration of the RollbackConfig type for use with // apply. func RollbackConfig() *RollbackConfigApplyConfiguration { return &RollbackConfigApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go index 3aa5e2f89..4352f7fac 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDaemonSetApplyConfiguration represents an declarative configuration of the RollingUpdateDaemonSet type for use +// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use // with apply. type RollingUpdateDaemonSetApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDaemonSetApplyConfiguration constructs an declarative configuration of the RollingUpdateDaemonSet type for use with +// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with // apply. func RollingUpdateDaemonSet() *RollingUpdateDaemonSetApplyConfiguration { return &RollingUpdateDaemonSetApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go index dde5f064b..244701a5e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go @@ -22,14 +22,14 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// RollingUpdateDeploymentApplyConfiguration represents an declarative configuration of the RollingUpdateDeployment type for use +// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use // with apply. type RollingUpdateDeploymentApplyConfiguration struct { MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"` } -// RollingUpdateDeploymentApplyConfiguration constructs an declarative configuration of the RollingUpdateDeployment type for use with +// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with // apply. func RollingUpdateDeployment() *RollingUpdateDeploymentApplyConfiguration { return &RollingUpdateDeploymentApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go index 60a1a8430..53e73439e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go @@ -19,22 +19,22 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use +// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use // with apply. type ScaleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *v1beta1.ScaleSpec `json:"spec,omitempty"` - Status *v1beta1.ScaleStatus `json:"status,omitempty"` + Spec *extensionsv1beta1.ScaleSpec `json:"spec,omitempty"` + Status *extensionsv1beta1.ScaleStatus `json:"status,omitempty"` } -// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with +// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with // apply. func Scale() *ScaleApplyConfiguration { b := &ScaleApplyConfiguration{} @@ -47,7 +47,7 @@ func Scale() *ScaleApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -55,7 +55,7 @@ func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguratio // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -64,7 +64,7 @@ func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfig // If called multiple times, the Name field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -73,7 +73,7 @@ func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguratio // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -82,7 +82,7 @@ func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConf // If called multiple times, the Namespace field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -91,7 +91,7 @@ func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfigu // If called multiple times, the UID field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -100,7 +100,7 @@ func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfigurat // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -109,7 +109,7 @@ func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyC // If called multiple times, the Generation field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -118,7 +118,7 @@ func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfigu // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -127,7 +127,7 @@ func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -136,7 +136,7 @@ func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *Scal // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -146,11 +146,11 @@ func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *S // overwriting an existing map entries in Labels field with the same key. func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -161,11 +161,11 @@ func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleAp // overwriting an existing map entries in Annotations field with the same key. func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -179,7 +179,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -190,7 +190,7 @@ func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferen func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -204,7 +204,7 @@ func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithSpec(value extensionsv1beta1.ScaleSpec) *ScaleApplyConfiguration { b.Spec = &value return b } @@ -212,7 +212,13 @@ func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyC // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleApplyConfiguration { +func (b *ScaleApplyConfiguration) WithStatus(value extensionsv1beta1.ScaleStatus) *ScaleApplyConfiguration { b.Status = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ScaleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go index cd21214f5..4e5805f39 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go index d9c8a79cc..f8923ae7b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go index 8809fafba..3219319ae 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` - Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"` + Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -57,18 +57,18 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { +func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *FlowSchemaApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go index 808ab09a5..d1c3dfbc6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaCond // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go index 2785f5baf..4efd5d287 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go index 7c61360a5..6f951967e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go index 92a03d862..0be9eddfd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go index c19f09703..8e2764298 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go index 03ff6d910..dc2e919d7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go index d9f8c2ecc..29c26b340 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go index b193efa8b..088afdc58 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go index e8a1b97c9..50d5e5132 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` - Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"` + Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -57,18 +57,18 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontro // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries m // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { +func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PriorityLevelConfigurationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go index 6ce588c8d..a7810adfb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go index 0638aee8b..f445713f0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go index 5d8874959..45e4cdcd8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go index 322871edc..ff650bc3d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go index 69fd2c23c..7488f9bbe 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go index 0991ce944..7428582a8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go index 55787ca76..58ad10764 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go index f02b03bdc..e2f6f3849 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go index 2d17c111c..fd90067d4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go index 071048090..45ccc5cb7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go index 6dc1bb4d6..11aa62bba 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta1.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go index f44313f54..f5d69b8a5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go index b62e9a22f..e7dcb4366 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta1.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta1.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go index 8d72c2d0d..1d6e8fc58 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go index 6bc6d0543..5ad8a432b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go index 95b416e42..cc274fe2f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go index 6f57169e1..0fe5feca1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go index 86e1bef6b..20e1b17bd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta1.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go index 594ebc991..3c571ccb0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go index ea5b266b4..32a082dc7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go index 84324dbfd..54030159e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go index 59bc61051..74eda9170 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go index c44bcc08b..b5e773e82 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go index 19146d9f6..775f476dd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go index 3c27e6aa6..875b01efe 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go index 5e6e6e7b0..85a8b8863 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go index 2e12ee1cc..5c67dad75 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go index f5a146a9b..439e5ff75 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go index af571029f..000508065 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go @@ -19,19 +19,19 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta1.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go index 35bf27a59..bc2deae4c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go index d6bc330fe..0c02d9b38 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go index 924f966d4..3922c4729 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta2.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta2.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go index 63a5f0aa3..fcab6df87 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go index 04dfcbf11..f47130eeb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta2.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta2.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta2.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta2.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta2.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go index a5477e276..6eab63bfa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go index 67c5be2cb..70ac997e4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go index b670f2cfd..25207d7c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go index 563b185c7..298dd4637 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go index a9b7661fb..58cd78006 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta2.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta2.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta2.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta2.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go index cb8ba0afd..5032ee489 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go index 179c3979d..2bb8c8718 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go index 3256b3630..116bcfd31 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go index f742adeff..caf517be3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta2.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta2.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go index 581b451ff..bbf718b60 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go index 994a8a16a..c680ea1ef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta2.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta2.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go index b55e32be0..7a1f8790b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go index 06246fb27..19c34c5f8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go index b67ea1c7f..070d2ed46 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta2 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go index b6cfdcad3..c0d44721c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta2 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go index 7030785b8..2b569a628 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go @@ -19,19 +19,19 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta2.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta2.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta2.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta2.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go index 8c77b3e8a..c249f042d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta2 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go index b03c11d0d..b9bf6993a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use +// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. type ExemptPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` } -// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with +// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with // apply. func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration { return &ExemptPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go index cd4572593..cc32fa100 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use +// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *flowcontrolv1beta3.FlowDistinguisherMethodType `json:"type,omitempty"` } -// FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with +// FlowDistinguisherMethodApplyConfiguration constructs a declarative configuration of the FlowDistinguisherMethod type for use with // apply. func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { return &FlowDistinguisherMethodApplyConfiguration{} @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value flowcontrolv1beta3.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go index c95635360..5f6416c7c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// FlowSchemaApplyConfiguration represents an declarative configuration of the FlowSchema type for use +// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use // with apply. type FlowSchemaApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type FlowSchemaApplyConfiguration struct { Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"` } -// FlowSchema constructs an declarative configuration of the FlowSchema type for use with +// FlowSchema constructs a declarative configuration of the FlowSchema type for use with // apply. func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} @@ -85,7 +85,7 @@ func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *FlowSchemaApplyConfiguration) WithKind(value string) *FlowSchemaApplyCo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *FlowSchemaApplyConfiguration) WithAPIVersion(value string) *FlowSchemaA // If called multiple times, the Name field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *FlowSchemaApplyConfiguration) WithName(value string) *FlowSchemaApplyCo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *FlowSchemaApplyConfiguration) WithGenerateName(value string) *FlowSchem // If called multiple times, the Namespace field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *FlowSchemaApplyConfiguration) WithNamespace(value string) *FlowSchemaAp // If called multiple times, the UID field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *FlowSchemaApplyConfiguration) WithUID(value types.UID) *FlowSchemaApply // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *FlowSchemaApplyConfiguration) WithResourceVersion(value string) *FlowSc // If called multiple times, the Generation field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *FlowSchemaApplyConfiguration) WithGeneration(value int64) *FlowSchemaAp // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *FlowSchemaApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *FlowSchemaApplyConfiguration) WithDeletionGracePeriodSeconds(value int6 // overwriting an existing map entries in Labels field with the same key. func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *FlowSchemaApplyConfiguration) WithLabels(entries map[string]string) *Fl // overwriting an existing map entries in Annotations field with the same key. func (b *FlowSchemaApplyConfiguration) WithAnnotations(entries map[string]string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *FlowSchemaApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRe func (b *FlowSchemaApplyConfiguration) WithFinalizers(values ...string) *FlowSchemaApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *FlowSchemaApplyConfiguration) WithStatus(value *FlowSchemaStatusApplyCo b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *FlowSchemaApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go index 0ef3a2c92..d5ba21f71 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use +// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1beta3.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1beta3.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta3.FlowSchemaConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with +// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with // apply. func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { return &FlowSchemaConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value flowcontrolv1beta3.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1beta3.FlowSchem // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go index e077ed3fd..7141f6a6a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use +// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use // with apply. type FlowSchemaSpecApplyConfiguration struct { PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"` @@ -27,7 +27,7 @@ type FlowSchemaSpecApplyConfiguration struct { Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"` } -// FlowSchemaSpecApplyConfiguration constructs an declarative configuration of the FlowSchemaSpec type for use with +// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with // apply. func FlowSchemaSpec() *FlowSchemaSpecApplyConfiguration { return &FlowSchemaSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go index 18db1c932..294ddc909 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use +// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use // with apply. type FlowSchemaStatusApplyConfiguration struct { Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"` } -// FlowSchemaStatusApplyConfiguration constructs an declarative configuration of the FlowSchemaStatus type for use with +// FlowSchemaStatusApplyConfiguration constructs a declarative configuration of the FlowSchemaStatus type for use with // apply. func FlowSchemaStatus() *FlowSchemaStatusApplyConfiguration { return &FlowSchemaStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go index b919b711b..6576e716e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use +// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use // with apply. type GroupSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// GroupSubjectApplyConfiguration constructs an declarative configuration of the GroupSubject type for use with +// GroupSubjectApplyConfiguration constructs a declarative configuration of the GroupSubject type for use with // apply. func GroupSubject() *GroupSubjectApplyConfiguration { return &GroupSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go index 269a48721..bd98dd683 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use +// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` @@ -27,7 +27,7 @@ type LimitedPriorityLevelConfigurationApplyConfiguration struct { BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` } -// LimitedPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the LimitedPriorityLevelConfiguration type for use with +// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with // apply. func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApplyConfiguration { return &LimitedPriorityLevelConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go index b7a64ebfe..2c289c777 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go @@ -19,17 +19,17 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use +// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1beta3.LimitResponseType `json:"type,omitempty"` + Type *flowcontrolv1beta3.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } -// LimitResponseApplyConfiguration constructs an declarative configuration of the LimitResponse type for use with +// LimitResponseApplyConfiguration constructs a declarative configuration of the LimitResponse type for use with // apply. func LimitResponse() *LimitResponseApplyConfiguration { return &LimitResponseApplyConfiguration{} @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1beta3.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value flowcontrolv1beta3.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go index ecb47f52c..2dd0d2b06 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use +// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use // with apply. type NonResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// NonResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the NonResourcePolicyRule type for use with +// NonResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the NonResourcePolicyRule type for use with // apply. func NonResourcePolicyRule() *NonResourcePolicyRuleApplyConfiguration { return &NonResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go index e30aace19..cc64dc585 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use +// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use // with apply. type PolicyRulesWithSubjectsApplyConfiguration struct { Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` @@ -26,7 +26,7 @@ type PolicyRulesWithSubjectsApplyConfiguration struct { NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"` } -// PolicyRulesWithSubjectsApplyConfiguration constructs an declarative configuration of the PolicyRulesWithSubjects type for use with +// PolicyRulesWithSubjectsApplyConfiguration constructs a declarative configuration of the PolicyRulesWithSubjects type for use with // apply. func PolicyRulesWithSubjects() *PolicyRulesWithSubjectsApplyConfiguration { return &PolicyRulesWithSubjectsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go index 6fbbbea8f..bb036c466 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the PriorityLevelConfiguration type for use +// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use // with apply. type PriorityLevelConfigurationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PriorityLevelConfigurationApplyConfiguration struct { Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"` } -// PriorityLevelConfiguration constructs an declarative configuration of the PriorityLevelConfiguration type for use with +// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with // apply. func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyConfiguration { b := &PriorityLevelConfigurationApplyConfiguration{} @@ -85,7 +85,7 @@ func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1 // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *PriorityLevelConfigurationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithKind(value string) *P // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value string) *PriorityLevelConfigurationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithAPIVersion(value stri // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithName(value string) *P // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGenerateName(value st // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithNamespace(value strin // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithUID(value types.UID) // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithResourceVersion(value // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithGeneration(value int6 // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithCreationTimestamp(val // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionTimestamp(val // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithDeletionGracePeriodSe // overwriting an existing map entries in Labels field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithLabels(entries map[st // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityLevelConfigurationApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithOwnerReferences(value func (b *PriorityLevelConfigurationApplyConfiguration) WithFinalizers(values ...string) *PriorityLevelConfigurationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *PriorityLevelConfigurationApplyConfiguration) WithStatus(value *Priorit b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityLevelConfigurationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go index 6e36b6a07..01695f144 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use +// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1beta3.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *flowcontrolv1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with +// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with // apply. func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionApplyConfiguration { return &PriorityLevelConfigurationConditionApplyConfiguration{} @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value flowcontrolv1beta3.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go index cb827b1e6..566aaa916 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use +// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. type PriorityLevelConfigurationReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// PriorityLevelConfigurationReferenceApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationReference type for use with +// PriorityLevelConfigurationReferenceApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationReference type for use with // apply. func PriorityLevelConfigurationReference() *PriorityLevelConfigurationReferenceApplyConfiguration { return &PriorityLevelConfigurationReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go index 5b0680d91..c95085478 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use +// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"` + Type *flowcontrolv1beta3.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } -// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with +// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with // apply. func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfiguration { return &PriorityLevelConfigurationSpecApplyConfiguration{} @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value flowcontrolv1beta3.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go index 0ee9e306e..be2436457 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use +// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. type PriorityLevelConfigurationStatusApplyConfiguration struct { Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"` } -// PriorityLevelConfigurationStatusApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationStatus type for use with +// PriorityLevelConfigurationStatusApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationStatus type for use with // apply. func PriorityLevelConfigurationStatus() *PriorityLevelConfigurationStatusApplyConfiguration { return &PriorityLevelConfigurationStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go index fc86c4443..f9a3c6d1a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use +// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use // with apply. type QueuingConfigurationApplyConfiguration struct { Queues *int32 `json:"queues,omitempty"` @@ -26,7 +26,7 @@ type QueuingConfigurationApplyConfiguration struct { QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"` } -// QueuingConfigurationApplyConfiguration constructs an declarative configuration of the QueuingConfiguration type for use with +// QueuingConfigurationApplyConfiguration constructs a declarative configuration of the QueuingConfiguration type for use with // apply. func QueuingConfiguration() *QueuingConfigurationApplyConfiguration { return &QueuingConfigurationApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go index 72623ffe4..e38f711db 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta3 -// ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use +// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use // with apply. type ResourcePolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type ResourcePolicyRuleApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` } -// ResourcePolicyRuleApplyConfiguration constructs an declarative configuration of the ResourcePolicyRule type for use with +// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with // apply. func ResourcePolicyRule() *ResourcePolicyRuleApplyConfiguration { return &ResourcePolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go index e2d6b1b21..a5ed40c2a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta3 -// ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use +// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use // with apply. type ServiceAccountSubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` Name *string `json:"name,omitempty"` } -// ServiceAccountSubjectApplyConfiguration constructs an declarative configuration of the ServiceAccountSubject type for use with +// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with // apply. func ServiceAccountSubject() *ServiceAccountSubjectApplyConfiguration { return &ServiceAccountSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go index f13b8f3ec..46499f541 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go @@ -19,19 +19,19 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" ) -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1beta3.SubjectKind `json:"kind,omitempty"` + Kind *flowcontrolv1beta3.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1beta3.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value flowcontrolv1beta3.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go index 3db3abbc1..7b3ec2ba8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta3 -// UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use +// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use // with apply. type UserSubjectApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// UserSubjectApplyConfiguration constructs an declarative configuration of the UserSubject type for use with +// UserSubjectApplyConfiguration constructs a declarative configuration of the UserSubject type for use with // apply. func UserSubject() *UserSubjectApplyConfiguration { return &UserSubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 47bfb44e0..cd9fcd98b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -19,8 +19,8 @@ limitations under the License. package internal import ( - "fmt" - "sync" + fmt "fmt" + sync "sync" typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) @@ -512,6 +512,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: url type: scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration + map: + fields: + - name: expression + type: + scalar: string - name: io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation map: fields: @@ -534,6 +540,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.admissionregistration.v1alpha1.JSONPatch + map: + fields: + - name: expression + type: + scalar: string - name: io.k8s.api.admissionregistration.v1alpha1.MatchCondition map: fields: @@ -570,6 +582,100 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations elementRelationship: atomic elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec + default: {} +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec + map: + fields: + - name: matchResources + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: paramRef + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamRef + - name: policyName + type: + scalar: string +- name: io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec + map: + fields: + - name: failurePolicy + type: + scalar: string + - name: matchConditions + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchCondition + elementRelationship: associative + keys: + - name + - name: matchConstraints + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.MatchResources + - name: mutations + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Mutation + elementRelationship: atomic + - name: paramKind + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ParamKind + - name: reinvocationPolicy + type: + scalar: string + - name: variables + type: + list: + elementType: + namedType: io.k8s.api.admissionregistration.v1alpha1.Variable + elementRelationship: atomic +- name: io.k8s.api.admissionregistration.v1alpha1.Mutation + map: + fields: + - name: applyConfiguration + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration + - name: jsonPatch + type: + namedType: io.k8s.api.admissionregistration.v1alpha1.JSONPatch + - name: patchType + type: + scalar: string + default: "" - name: io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations map: fields: @@ -4356,9 +4462,55 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string + - name: renewTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string +- name: io.k8s.api.coordination.v1alpha2.LeaseCandidate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec + default: {} +- name: io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec + map: + fields: + - name: binaryVersion + type: + scalar: string + default: "" + - name: emulationVersion + type: + scalar: string + - name: leaseName + type: + scalar: string + default: "" + - name: pingTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string - name: io.k8s.api.coordination.v1beta1.Lease map: fields: @@ -4391,9 +4543,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: leaseTransitions type: scalar: numeric + - name: preferredHolder + type: + scalar: string - name: renewTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + - name: strategy + type: + scalar: string - name: io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource map: fields: @@ -4454,6 +4612,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: cachingMode type: scalar: string + default: ReadWrite - name: diskName type: scalar: string @@ -4465,12 +4624,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: ext4 - name: kind type: scalar: string + default: Shared - name: readOnly type: scalar: boolean + default: false - name: io.k8s.api.core.v1.AzureFilePersistentVolumeSource map: fields: @@ -4655,15 +4817,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.ClaimSource - map: - fields: - - name: resourceClaimName - type: - scalar: string - - name: resourceClaimTemplateName - type: - scalar: string - name: io.k8s.api.core.v1.ClientIPConfig map: fields: @@ -5047,6 +5200,14 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: allocatedResourcesStatus + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceStatus + elementRelationship: associative + keys: + - name - name: containerID type: scalar: string @@ -5084,6 +5245,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.ContainerState default: {} + - name: user + type: + namedType: io.k8s.api.core.v1.ContainerUser - name: volumeMounts type: list: @@ -5092,6 +5256,12 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - mountPath +- name: io.k8s.api.core.v1.ContainerUser + map: + fields: + - name: linux + type: + namedType: io.k8s.api.core.v1.LinuxContainerUser - name: io.k8s.api.core.v1.DaemonEndpoint map: fields: @@ -5661,6 +5831,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.HostPathVolumeSource map: fields: @@ -5693,6 +5864,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5735,6 +5907,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: iscsiInterface type: scalar: string + default: default - name: lun type: scalar: numeric @@ -5755,6 +5928,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.core.v1.ImageVolumeSource + map: + fields: + - name: pullPolicy + type: + scalar: string + - name: reference + type: + scalar: string - name: io.k8s.api.core.v1.KeyToPath map: fields: @@ -5851,6 +6033,23 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.core.v1.LimitRangeItem elementRelationship: atomic +- name: io.k8s.api.core.v1.LinuxContainerUser + map: + fields: + - name: gid + type: + scalar: numeric + default: 0 + - name: supplementalGroups + type: + list: + elementType: + scalar: numeric + elementRelationship: atomic + - name: uid + type: + scalar: numeric + default: 0 - name: io.k8s.api.core.v1.LoadBalancerIngress map: fields: @@ -6079,6 +6278,12 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.DaemonEndpoint default: {} +- name: io.k8s.api.core.v1.NodeFeatures + map: + fields: + - name: supplementalGroupsPolicy + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeRuntimeHandler map: fields: @@ -6095,6 +6300,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: recursiveReadOnlyMounts type: scalar: boolean + - name: userNamespaces + type: + scalar: boolean - name: io.k8s.api.core.v1.NodeSelector map: fields: @@ -6204,6 +6412,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.api.core.v1.NodeDaemonEndpoints default: {} + - name: features + type: + namedType: io.k8s.api.core.v1.NodeFeatures - name: images type: list: @@ -6747,6 +6958,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string + default: "" - name: io.k8s.api.core.v1.PodOS map: fields: @@ -6768,10 +6980,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: source + - name: resourceClaimName type: - namedType: io.k8s.api.core.v1.ClaimSource - default: {} + scalar: string + - name: resourceClaimTemplateName + type: + scalar: string - name: io.k8s.api.core.v1.PodResourceClaimStatus map: fields: @@ -6810,6 +7024,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: runAsUser type: scalar: numeric + - name: seLinuxChangePolicy + type: + scalar: string - name: seLinuxOptions type: namedType: io.k8s.api.core.v1.SELinuxOptions @@ -6822,6 +7039,9 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: numeric elementRelationship: atomic + - name: supplementalGroupsPolicy + type: + scalar: string - name: sysctls type: list: @@ -6947,6 +7167,9 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - name + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements - name: restartPolicy type: scalar: string @@ -7233,6 +7456,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -7242,6 +7466,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -7251,6 +7476,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.RBDVolumeSource map: fields: @@ -7264,6 +7490,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: keyring type: scalar: string + default: /etc/ceph/keyring - name: monitors type: list: @@ -7273,6 +7500,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: pool type: scalar: string + default: rbd - name: readOnly type: scalar: boolean @@ -7282,6 +7510,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: user type: scalar: string + default: admin - name: io.k8s.api.core.v1.ReplicationController map: fields: @@ -7375,6 +7604,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: request + type: + scalar: string - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -7389,6 +7621,16 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceHealth + map: + fields: + - name: health + type: + scalar: string + - name: resourceID + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceQuota map: fields: @@ -7461,6 +7703,21 @@ var schemaYAML = typed.YAMLObject(`types: map: elementType: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.ResourceStatus + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resources + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceHealth + elementRelationship: associative + keys: + - resourceID - name: io.k8s.api.core.v1.SELinuxOptions map: fields: @@ -7482,6 +7739,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7501,6 +7759,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -7517,6 +7776,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: fsType type: scalar: string + default: xfs - name: gateway type: scalar: string @@ -7536,6 +7796,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageMode type: scalar: string + default: ThinProvisioned - name: storagePool type: scalar: string @@ -8157,6 +8418,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: hostPath type: namedType: io.k8s.api.core.v1.HostPathVolumeSource + - name: image + type: + namedType: io.k8s.api.core.v1.ImageVolumeSource - name: iscsi type: namedType: io.k8s.api.core.v1.ISCSIVolumeSource @@ -10939,6 +11203,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric + elementRelationship: atomic - name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: @@ -11040,6 +11305,29 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.networking.v1beta1.HTTPIngressPath elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.IPAddress + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.IPAddressSpec + default: {} +- name: io.k8s.api.networking.v1beta1.IPAddressSpec + map: + fields: + - name: parentRef + type: + namedType: io.k8s.api.networking.v1beta1.ParentReference - name: io.k8s.api.networking.v1beta1.Ingress map: fields: @@ -11206,21 +11494,77 @@ var schemaYAML = typed.YAMLObject(`types: - name: secretName type: scalar: string -- name: io.k8s.api.node.v1.Overhead +- name: io.k8s.api.networking.v1beta1.ParentReference map: fields: - - name: podFixed + - name: group type: - map: - elementType: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity -- name: io.k8s.api.node.v1.RuntimeClass - map: - fields: - - name: apiVersion + scalar: string + - name: name type: scalar: string - - name: handler + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string +- name: io.k8s.api.networking.v1beta1.ServiceCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + default: {} + - name: status + type: + namedType: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + default: {} +- name: io.k8s.api.networking.v1beta1.ServiceCIDRSpec + map: + fields: + - name: cidrs + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.networking.v1beta1.ServiceCIDRStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type +- name: io.k8s.api.node.v1.Overhead + map: + fields: + - name: podFixed + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.node.v1.RuntimeClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: handler type: scalar: string default: "" @@ -12010,135 +12354,594 @@ var schemaYAML = typed.YAMLObject(`types: - name: namespace type: scalar: string -- name: io.k8s.api.resource.v1alpha2.AllocationResult +- name: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: data + type: + namedType: __untyped_atomic_ + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: networkData + type: + namedType: io.k8s.api.resource.v1alpha3.NetworkDeviceData + - name: pool + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.AllocationResult map: fields: - - name: availableOnNodes + - name: devices + type: + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult + default: {} + - name: nodeSelector type: namedType: io.k8s.api.core.v1.NodeSelector - - name: resourceHandles +- name: io.k8s.api.resource.v1alpha3.BasicDevice + map: + fields: + - name: attributes + type: + map: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceAttribute + - name: capacity + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector + map: + fields: + - name: expression + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.Device + map: + fields: + - name: basic + type: + namedType: io.k8s.api.resource.v1alpha3.BasicDevice + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceHandle + scalar: string elementRelationship: atomic - - name: shareable + - name: source + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceAllocationResult + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationConfiguration + elementRelationship: atomic + - name: results + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceAttribute + map: + fields: + - name: bool type: scalar: boolean -- name: io.k8s.api.resource.v1alpha2.DriverAllocationResult + - name: int + type: + scalar: numeric + - name: string + type: + scalar: string + - name: version + type: + scalar: string +- name: io.k8s.api.resource.v1alpha3.DeviceClaim + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration + elementRelationship: atomic + - name: constraints + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceConstraint + elementRelationship: atomic + - name: requests + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceRequest + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceClaimConfiguration map: fields: - - name: namedResources + - name: opaque type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult - - name: vendorRequestParameters + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + - name: requests type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.DriverRequests + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceClass map: fields: - - name: driverName + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.DeviceClassSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + map: + fields: + - name: opaque + type: + namedType: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1alpha3.DeviceClassSpec + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceClassConfiguration + elementRelationship: atomic + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceConstraint + map: + fields: + - name: matchAttribute type: scalar: string - name: requests type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceRequest + scalar: string elementRelationship: atomic - - name: vendorParameters +- name: io.k8s.api.resource.v1alpha3.DeviceRequest + map: + fields: + - name: adminAccess + type: + scalar: boolean + - name: allocationMode + type: + scalar: string + - name: count + type: + scalar: numeric + - name: deviceClassName + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: selectors + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.DeviceRequestAllocationResult + map: + fields: + - name: adminAccess + type: + scalar: boolean + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: pool + type: + scalar: string + default: "" + - name: request + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.DeviceSelector + map: + fields: + - name: cel + type: + namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector +- name: io.k8s.api.resource.v1alpha3.NetworkDeviceData + map: + fields: + - name: hardwareAddress + type: + scalar: string + - name: interfaceName + type: + scalar: string + - name: ips + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1alpha3.OpaqueDeviceConfiguration + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: parameters type: namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult +- name: io.k8s.api.resource.v1alpha3.ResourceClaim + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec + default: {} + - name: status + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimStatus + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference map: fields: + - name: apiGroup + type: + scalar: string - name: name type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute + - name: resource + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: bool + - name: devices type: - scalar: boolean - - name: int + namedType: io.k8s.api.resource.v1alpha3.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimStatus + map: + fields: + - name: allocation + type: + namedType: io.k8s.api.resource.v1alpha3.AllocationResult + - name: devices + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.AllocatedDeviceStatus + elementRelationship: associative + keys: + - driver + - device + - pool + - name: reservedFor + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimConsumerReference + elementRelationship: associative + keys: + - uid +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplate + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceClaimTemplateSpec + map: + fields: + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourcePool + map: + fields: + - name: generation + type: + scalar: numeric + default: 0 + - name: name + type: + scalar: string + default: "" + - name: resourceSliceCount type: scalar: numeric - - name: intSlice + default: 0 +- name: io.k8s.api.resource.v1alpha3.ResourceSlice + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1alpha3.ResourceSliceSpec + map: + fields: + - name: allNodes + type: + scalar: boolean + - name: devices + type: + list: + elementType: + namedType: io.k8s.api.resource.v1alpha3.Device + elementRelationship: atomic + - name: driver + type: + scalar: string + default: "" + - name: nodeName + type: + scalar: string + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool + type: + namedType: io.k8s.api.resource.v1alpha3.ResourcePool + default: {} +- name: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus + map: + fields: + - name: conditions type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: data + type: + namedType: __untyped_atomic_ + - name: device + type: + scalar: string + default: "" + - name: driver + type: + scalar: string + default: "" + - name: networkData + type: + namedType: io.k8s.api.resource.v1beta1.NetworkDeviceData + - name: pool + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.AllocationResult + map: + fields: + - name: devices + type: + namedType: io.k8s.api.resource.v1beta1.DeviceAllocationResult + default: {} + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector +- name: io.k8s.api.resource.v1beta1.BasicDevice + map: + fields: + - name: attributes + type: + map: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceAttribute + - name: capacity + type: + map: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceCapacity +- name: io.k8s.api.resource.v1beta1.CELDeviceSelector + map: + fields: + - name: expression + type: + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.Device + map: + fields: + - name: basic + type: + namedType: io.k8s.api.resource.v1beta1.BasicDevice - name: name type: scalar: string default: "" - - name: quantity +- name: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration + map: + fields: + - name: opaque type: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: string + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration + - name: requests + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: source type: scalar: string - - name: stringSlice + default: "" +- name: io.k8s.api.resource.v1beta1.DeviceAllocationResult + map: + fields: + - name: config + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration + elementRelationship: atomic + - name: results + type: + list: + elementType: + namedType: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceAttribute + map: + fields: + - name: bool + type: + scalar: boolean + - name: int + type: + scalar: numeric + - name: string type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice + scalar: string - name: version type: scalar: string -- name: io.k8s.api.resource.v1alpha2.NamedResourcesFilter +- name: io.k8s.api.resource.v1beta1.DeviceCapacity map: fields: - - name: selector + - name: value type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesInstance + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.resource.v1beta1.DeviceClaim map: fields: - - name: attributes + - name: config type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute + namedType: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration elementRelationship: atomic - - name: name - type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice - map: - fields: - - name: ints + - name: constraints type: list: elementType: - scalar: numeric + namedType: io.k8s.api.resource.v1beta1.DeviceConstraint elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.NamedResourcesRequest - map: - fields: - - name: selector - type: - scalar: string - default: "" -- name: io.k8s.api.resource.v1alpha2.NamedResourcesResources - map: - fields: - - name: instances + - name: requests type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesInstance + namedType: io.k8s.api.resource.v1beta1.DeviceRequest elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice +- name: io.k8s.api.resource.v1beta1.DeviceClaimConfiguration map: fields: - - name: strings + - name: opaque + type: + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration + - name: requests type: list: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext +- name: io.k8s.api.resource.v1beta1.DeviceClass map: fields: - name: apiVersion @@ -12153,159 +12956,121 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + namedType: io.k8s.api.resource.v1beta1.DeviceClassSpec default: {} - - name: status +- name: io.k8s.api.resource.v1beta1.DeviceClassConfiguration + map: + fields: + - name: opaque type: - namedType: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus - default: {} -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec + namedType: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration +- name: io.k8s.api.resource.v1beta1.DeviceClassSpec map: fields: - - name: potentialNodes + - name: config type: list: elementType: - scalar: string + namedType: io.k8s.api.resource.v1beta1.DeviceClassConfiguration elementRelationship: atomic - - name: selectedNode - type: - scalar: string -- name: io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus - map: - fields: - - name: resourceClaims + - name: selectors type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus - elementRelationship: associative - keys: - - name -- name: io.k8s.api.resource.v1alpha2.ResourceClaim + namedType: io.k8s.api.resource.v1beta1.DeviceSelector + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceConstraint map: fields: - - name: apiVersion - type: - scalar: string - - name: kind + - name: matchAttribute type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec - default: {} - - name: status + - name: requests type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimStatus - default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.resource.v1beta1.DeviceRequest map: fields: - - name: apiGroup + - name: adminAccess type: - scalar: string - - name: name + scalar: boolean + - name: allocationMode type: scalar: string - default: "" - - name: resource + - name: count type: - scalar: string - default: "" - - name: uid + scalar: numeric + - name: deviceClassName type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParameters - map: - fields: - - name: apiVersion + - name: name type: scalar: string - - name: driverRequests + default: "" + - name: selectors type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.DriverRequests + namedType: io.k8s.api.resource.v1beta1.DeviceSelector elementRelationship: atomic - - name: generatedFrom +- name: io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult + map: + fields: + - name: adminAccess type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - - name: kind + scalar: boolean + - name: device type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: shareable - type: - scalar: boolean -- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - map: - fields: - - name: apiGroup + default: "" + - name: driver type: scalar: string - - name: kind + default: "" + - name: pool type: scalar: string default: "" - - name: name + - name: request type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus +- name: io.k8s.api.resource.v1beta1.DeviceSelector map: fields: - - name: name + - name: cel + type: + namedType: io.k8s.api.resource.v1beta1.CELDeviceSelector +- name: io.k8s.api.resource.v1beta1.NetworkDeviceData + map: + fields: + - name: hardwareAddress + type: + scalar: string + - name: interfaceName type: scalar: string - - name: unsuitableNodes + - name: ips type: list: elementType: scalar: string elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec +- name: io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration map: fields: - - name: allocationMode - type: - scalar: string - - name: parametersRef - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference - - name: resourceClassName + - name: driver type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha2.ResourceClaimStatus - map: - fields: - - name: allocation - type: - namedType: io.k8s.api.resource.v1alpha2.AllocationResult - - name: deallocationRequested - type: - scalar: boolean - - name: driverName - type: - scalar: string - - name: reservedFor + - name: parameters type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference - elementRelationship: associative - keys: - - uid -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplate + namedType: __untyped_atomic_ +- name: io.k8s.api.resource.v1beta1.ResourceClaim map: fields: - name: apiVersion @@ -12320,130 +13085,110 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec - default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec - map: - fields: - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec default: {} - - name: spec + - name: status type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClaimSpec + namedType: io.k8s.api.resource.v1beta1.ResourceClaimStatus default: {} -- name: io.k8s.api.resource.v1alpha2.ResourceClass +- name: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference map: fields: - - name: apiVersion + - name: apiGroup type: scalar: string - - name: driverName + - name: name type: scalar: string default: "" - - name: kind + - name: resource type: scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: parametersRef - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - - name: structuredParameters + default: "" + - name: uid type: - scalar: boolean - - name: suitableNodes + scalar: string + default: "" +- name: io.k8s.api.resource.v1beta1.ResourceClaimSpec + map: + fields: + - name: devices type: - namedType: io.k8s.api.core.v1.NodeSelector -- name: io.k8s.api.resource.v1alpha2.ResourceClassParameters + namedType: io.k8s.api.resource.v1beta1.DeviceClaim + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceClaimStatus map: fields: - - name: apiVersion + - name: allocation type: - scalar: string - - name: filters + namedType: io.k8s.api.resource.v1beta1.AllocationResult + - name: devices type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.ResourceFilter - elementRelationship: atomic - - name: generatedFrom - type: - namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: vendorParameters + namedType: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus + elementRelationship: associative + keys: + - driver + - device + - pool + - name: reservedFor type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.VendorParameters - elementRelationship: atomic -- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference + namedType: io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference + elementRelationship: associative + keys: + - uid +- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplate map: fields: - - name: apiGroup + - name: apiVersion type: scalar: string - name: kind type: scalar: string - default: "" - - name: name + - name: metadata type: - scalar: string - default: "" - - name: namespace + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - scalar: string -- name: io.k8s.api.resource.v1alpha2.ResourceFilter + namedType: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec map: fields: - - name: driverName + - name: metadata type: - scalar: string - - name: namedResources + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesFilter -- name: io.k8s.api.resource.v1alpha2.ResourceHandle + namedType: io.k8s.api.resource.v1beta1.ResourceClaimSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourcePool map: fields: - - name: data + - name: generation type: - scalar: string - - name: driverName + scalar: numeric + default: 0 + - name: name type: scalar: string - - name: structuredData - type: - namedType: io.k8s.api.resource.v1alpha2.StructuredResourceHandle -- name: io.k8s.api.resource.v1alpha2.ResourceRequest - map: - fields: - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesRequest - - name: vendorParameters + default: "" + - name: resourceSliceCount type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.ResourceSlice + scalar: numeric + default: 0 +- name: io.k8s.api.resource.v1beta1.ResourceSlice map: fields: - name: apiVersion type: scalar: string - - name: driverName - type: - scalar: string - default: "" - name: kind type: scalar: string @@ -12451,39 +13196,36 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} - - name: namedResources - type: - namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources - - name: nodeName + - name: spec type: - scalar: string -- name: io.k8s.api.resource.v1alpha2.StructuredResourceHandle + namedType: io.k8s.api.resource.v1beta1.ResourceSliceSpec + default: {} +- name: io.k8s.api.resource.v1beta1.ResourceSliceSpec map: fields: - - name: nodeName + - name: allNodes type: - scalar: string - - name: results + scalar: boolean + - name: devices type: list: elementType: - namedType: io.k8s.api.resource.v1alpha2.DriverAllocationResult + namedType: io.k8s.api.resource.v1beta1.Device elementRelationship: atomic - - name: vendorClaimParameters - type: - namedType: __untyped_atomic_ - - name: vendorClassParameters + - name: driver type: - namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha2.VendorParameters - map: - fields: - - name: driverName + scalar: string + default: "" + - name: nodeName type: scalar: string - - name: parameters + - name: nodeSelector type: - namedType: __untyped_atomic_ + namedType: io.k8s.api.core.v1.NodeSelector + - name: pool + type: + namedType: io.k8s.api.resource.v1beta1.ResourcePool + default: {} - name: io.k8s.api.scheduling.v1.PriorityClass map: fields: @@ -13177,6 +13919,28 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1beta1.VolumeError +- name: io.k8s.api.storage.v1beta1.VolumeAttributesClass + map: + fields: + - name: apiVersion + type: + scalar: string + - name: driverName + type: + scalar: string + default: "" + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: parameters + type: + map: + elementType: + scalar: string - name: io.k8s.api.storage.v1beta1.VolumeError map: fields: @@ -13311,6 +14075,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: gracePeriodSeconds type: scalar: numeric + - name: ignoreStoreReadErrorWithClusterBreakingPotential + type: + scalar: boolean - name: kind type: scalar: string diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go index c84102cdd..69063df65 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go @@ -19,21 +19,21 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ConditionApplyConfiguration represents an declarative configuration of the Condition type for use +// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use // with apply. type ConditionApplyConfiguration struct { - Type *string `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *string `json:"type,omitempty"` + Status *metav1.ConditionStatus `json:"status,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// ConditionApplyConfiguration constructs an declarative configuration of the Condition type for use with +// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with // apply. func Condition() *ConditionApplyConfiguration { return &ConditionApplyConfiguration{} @@ -50,7 +50,7 @@ func (b *ConditionApplyConfiguration) WithType(value string) *ConditionApplyConf // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *ConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *ConditionApplyConfiguration { +func (b *ConditionApplyConfiguration) WithStatus(value metav1.ConditionStatus) *ConditionApplyConfiguration { b.Status = &value return b } @@ -66,7 +66,7 @@ func (b *ConditionApplyConfiguration) WithObservedGeneration(value int64) *Condi // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *ConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *ConditionApplyConfiguration { +func (b *ConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *ConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go index 7a1d23114..ab398ef56 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go @@ -22,18 +22,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// DeleteOptionsApplyConfiguration represents an declarative configuration of the DeleteOptions type for use +// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use // with apply. type DeleteOptionsApplyConfiguration struct { - TypeMetaApplyConfiguration `json:",inline"` - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` - Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"` - OrphanDependents *bool `json:"orphanDependents,omitempty"` - PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"` - DryRun []string `json:"dryRun,omitempty"` + TypeMetaApplyConfiguration `json:",inline"` + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` + Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"` + OrphanDependents *bool `json:"orphanDependents,omitempty"` + PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"` + DryRun []string `json:"dryRun,omitempty"` + IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty"` } -// DeleteOptionsApplyConfiguration constructs an declarative configuration of the DeleteOptions type for use with +// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with // apply. func DeleteOptions() *DeleteOptionsApplyConfiguration { b := &DeleteOptionsApplyConfiguration{} @@ -46,7 +47,7 @@ func DeleteOptions() *DeleteOptionsApplyConfiguration { // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -54,7 +55,7 @@ func (b *DeleteOptionsApplyConfiguration) WithKind(value string) *DeleteOptionsA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *DeleteOptionsApplyConfiguration) WithAPIVersion(value string) *DeleteOptionsApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -99,3 +100,11 @@ func (b *DeleteOptionsApplyConfiguration) WithDryRun(values ...string) *DeleteOp } return b } + +// WithIgnoreStoreReadErrorWithClusterBreakingPotential sets the IgnoreStoreReadErrorWithClusterBreakingPotential field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IgnoreStoreReadErrorWithClusterBreakingPotential field is set to the value of the last call. +func (b *DeleteOptionsApplyConfiguration) WithIgnoreStoreReadErrorWithClusterBreakingPotential(value bool) *DeleteOptionsApplyConfiguration { + b.IgnoreStoreReadErrorWithClusterBreakingPotential = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go index 6d24bc363..1f33c94e0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// LabelSelectorApplyConfiguration represents an declarative configuration of the LabelSelector type for use +// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use // with apply. type LabelSelectorApplyConfiguration struct { MatchLabels map[string]string `json:"matchLabels,omitempty"` MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"` } -// LabelSelectorApplyConfiguration constructs an declarative configuration of the LabelSelector type for use with +// LabelSelectorApplyConfiguration constructs a declarative configuration of the LabelSelector type for use with // apply. func LabelSelector() *LabelSelectorApplyConfiguration { return &LabelSelectorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go index ff70f365e..c8b015c98 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// LabelSelectorRequirementApplyConfiguration represents an declarative configuration of the LabelSelectorRequirement type for use +// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use // with apply. type LabelSelectorRequirementApplyConfiguration struct { - Key *string `json:"key,omitempty"` - Operator *v1.LabelSelectorOperator `json:"operator,omitempty"` - Values []string `json:"values,omitempty"` + Key *string `json:"key,omitempty"` + Operator *metav1.LabelSelectorOperator `json:"operator,omitempty"` + Values []string `json:"values,omitempty"` } -// LabelSelectorRequirementApplyConfiguration constructs an declarative configuration of the LabelSelectorRequirement type for use with +// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with // apply. func LabelSelectorRequirement() *LabelSelectorRequirementApplyConfiguration { return &LabelSelectorRequirementApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *LabelSelectorRequirementApplyConfiguration) WithKey(value string) *Labe // WithOperator sets the Operator field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operator field is set to the value of the last call. -func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value v1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration { +func (b *LabelSelectorRequirementApplyConfiguration) WithOperator(value metav1.LabelSelectorOperator) *LabelSelectorRequirementApplyConfiguration { b.Operator = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go index f4d7e2681..7175537c3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go @@ -19,22 +19,22 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use +// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use // with apply. type ManagedFieldsEntryApplyConfiguration struct { - Manager *string `json:"manager,omitempty"` - Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"` - APIVersion *string `json:"apiVersion,omitempty"` - Time *v1.Time `json:"time,omitempty"` - FieldsType *string `json:"fieldsType,omitempty"` - FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"` - Subresource *string `json:"subresource,omitempty"` + Manager *string `json:"manager,omitempty"` + Operation *metav1.ManagedFieldsOperationType `json:"operation,omitempty"` + APIVersion *string `json:"apiVersion,omitempty"` + Time *metav1.Time `json:"time,omitempty"` + FieldsType *string `json:"fieldsType,omitempty"` + FieldsV1 *metav1.FieldsV1 `json:"fieldsV1,omitempty"` + Subresource *string `json:"subresource,omitempty"` } -// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with +// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with // apply. func ManagedFieldsEntry() *ManagedFieldsEntryApplyConfiguration { return &ManagedFieldsEntryApplyConfiguration{} @@ -51,7 +51,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithManager(value string) *Manage // WithOperation sets the Operation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Operation field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value v1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithOperation(value metav1.ManagedFieldsOperationType) *ManagedFieldsEntryApplyConfiguration { b.Operation = &value return b } @@ -67,7 +67,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithAPIVersion(value string) *Man // WithTime sets the Time field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Time field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value v1.Time) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithTime(value metav1.Time) *ManagedFieldsEntryApplyConfiguration { b.Time = &value return b } @@ -83,7 +83,7 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsType(value string) *Man // WithFieldsV1 sets the FieldsV1 field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FieldsV1 field is set to the value of the last call. -func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value v1.FieldsV1) *ManagedFieldsEntryApplyConfiguration { +func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value metav1.FieldsV1) *ManagedFieldsEntryApplyConfiguration { b.FieldsV1 = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go index 9b290e968..9b98d2209 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go @@ -19,11 +19,11 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" ) -// ObjectMetaApplyConfiguration represents an declarative configuration of the ObjectMeta type for use +// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use // with apply. type ObjectMetaApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -32,8 +32,8 @@ type ObjectMetaApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` Generation *int64 `json:"generation,omitempty"` - CreationTimestamp *v1.Time `json:"creationTimestamp,omitempty"` - DeletionTimestamp *v1.Time `json:"deletionTimestamp,omitempty"` + CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"` + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty"` DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` @@ -41,7 +41,7 @@ type ObjectMetaApplyConfiguration struct { Finalizers []string `json:"finalizers,omitempty"` } -// ObjectMetaApplyConfiguration constructs an declarative configuration of the ObjectMeta type for use with +// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with // apply. func ObjectMeta() *ObjectMetaApplyConfiguration { return &ObjectMetaApplyConfiguration{} @@ -98,7 +98,7 @@ func (b *ObjectMetaApplyConfiguration) WithGeneration(value int64) *ObjectMetaAp // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *ObjectMetaApplyConfiguration { +func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration { b.CreationTimestamp = &value return b } @@ -106,7 +106,7 @@ func (b *ObjectMetaApplyConfiguration) WithCreationTimestamp(value v1.Time) *Obj // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value v1.Time) *ObjectMetaApplyConfiguration { +func (b *ObjectMetaApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ObjectMetaApplyConfiguration { b.DeletionTimestamp = &value return b } @@ -169,3 +169,8 @@ func (b *ObjectMetaApplyConfiguration) WithFinalizers(values ...string) *ObjectM } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ObjectMetaApplyConfiguration) GetName() *string { + return b.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go index b3117d6a4..277615232 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go @@ -22,7 +22,7 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// OwnerReferenceApplyConfiguration represents an declarative configuration of the OwnerReference type for use +// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use // with apply. type OwnerReferenceApplyConfiguration struct { APIVersion *string `json:"apiVersion,omitempty"` @@ -33,7 +33,7 @@ type OwnerReferenceApplyConfiguration struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"` } -// OwnerReferenceApplyConfiguration constructs an declarative configuration of the OwnerReference type for use with +// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with // apply. func OwnerReference() *OwnerReferenceApplyConfiguration { return &OwnerReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go index f627733f1..8f8b6c6b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go @@ -22,14 +22,14 @@ import ( types "k8s.io/apimachinery/pkg/types" ) -// PreconditionsApplyConfiguration represents an declarative configuration of the Preconditions type for use +// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use // with apply. type PreconditionsApplyConfiguration struct { UID *types.UID `json:"uid,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// PreconditionsApplyConfiguration constructs an declarative configuration of the Preconditions type for use with +// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with // apply. func Preconditions() *PreconditionsApplyConfiguration { return &PreconditionsApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go index 877b0890e..979044384 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TypeMetaApplyConfiguration represents an declarative configuration of the TypeMeta type for use +// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use // with apply. type TypeMetaApplyConfiguration struct { Kind *string `json:"kind,omitempty"` APIVersion *string `json:"apiVersion,omitempty"` } -// TypeMetaApplyConfiguration constructs an declarative configuration of the TypeMeta type for use with +// TypeMetaApplyConfiguration constructs a declarative configuration of the TypeMeta type for use with // apply. func TypeMeta() *TypeMetaApplyConfiguration { return &TypeMetaApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go index 07b6a67f6..96f9b1f56 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1.PathType `json:"pathType,omitempty"` + PathType *networkingv1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go index fef529d69..ad9a7a677 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go index b5146902d..9e275f24f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` - Status *IngressStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"` + Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -58,18 +58,18 @@ func Ingress(name, namespace string) *IngressApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngress(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngress(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "") } // ExtractIngressStatus is the same as ExtractIngress except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressStatus(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { +func ExtractIngressStatus(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) { return extractIngress(ingress, fieldManager, "status") } -func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { +func extractIngress(ingress *networkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) { b := &IngressApplyConfiguration{} err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subre // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { +func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go index 575713599..b014b7bee 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go @@ -22,14 +22,14 @@ import ( corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"` Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go index e33d0b2d9..f723b5d70 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -56,18 +56,18 @@ func IngressClass(name string) *IngressClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { +func ExtractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { return extractIngressClass(ingressClass, fieldManager, "") } // ExtractIngressClassStatus is the same as ExtractIngressClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractIngressClassStatus(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { +func ExtractIngressClassStatus(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) { return extractIngressClass(ingressClass, fieldManager, "status") } -func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { +func extractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) { b := &IngressClassApplyConfiguration{} err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl // If called multiple times, the Name field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla // If called multiple times, the UID field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration { +func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *IngressClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go index a020d3a8d..0dba1ebc5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go index ec0423e70..23e848434 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go index 444275a12..d0feb44da 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go index 8e01a301a..08c841f06 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go index 82b5babd9..84ba243ab 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { - Port *int32 `json:"port,omitempty"` - Protocol *v1.Protocol `json:"protocol,omitempty"` - Error *string `json:"error,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` + Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *IngressPortStatusApplyConfiguration) WithPort(value int32) *IngressPort // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *IngressPortStatusApplyConfiguration) WithProtocol(value v1.Protocol) *IngressPortStatusApplyConfiguration { +func (b *IngressPortStatusApplyConfiguration) WithProtocol(value corev1.Protocol) *IngressPortStatusApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go index 8153e88fe..20a1816bf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go index d0e094387..1e13e378b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go index 399739631..07876afd1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressServiceBackendApplyConfiguration represents an declarative configuration of the IngressServiceBackend type for use +// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use // with apply. type IngressServiceBackendApplyConfiguration struct { Name *string `json:"name,omitempty"` Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"` } -// IngressServiceBackendApplyConfiguration constructs an declarative configuration of the IngressServiceBackend type for use with +// IngressServiceBackendApplyConfiguration constructs a declarative configuration of the IngressServiceBackend type for use with // apply. func IngressServiceBackend() *IngressServiceBackendApplyConfiguration { return &IngressServiceBackendApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go index 635514ecf..0572153aa 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go index 7131bf8d0..bd1327c93 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go index 4d8d369f7..44092503f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go index 1efd6edfd..f3447a8f1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// IPBlockApplyConfiguration represents an declarative configuration of the IPBlock type for use +// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use // with apply. type IPBlockApplyConfiguration struct { CIDR *string `json:"cidr,omitempty"` Except []string `json:"except,omitempty"` } -// IPBlockApplyConfiguration constructs an declarative configuration of the IPBlock type for use with +// IPBlockApplyConfiguration constructs a declarative configuration of the IPBlock type for use with // apply. func IPBlock() *IPBlockApplyConfiguration { return &IPBlockApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go index 409507310..e8da1be06 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyApplyConfiguration represents an declarative configuration of the NetworkPolicy type for use +// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use // with apply. type NetworkPolicyApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with +// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with // apply. func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { b := &NetworkPolicyApplyConfiguration{} @@ -57,18 +57,18 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { +func ExtractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { return extractNetworkPolicy(networkPolicy, fieldManager, "") } // ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractNetworkPolicyStatus(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { +func ExtractNetworkPolicyStatus(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) { return extractNetworkPolicy(networkPolicy, fieldManager, "status") } -func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { +func extractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) { b := &NetworkPolicyApplyConfiguration{} err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldMan // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *NetworkPolicyApplyConfiguration) WithKind(value string) *NetworkPolicyA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkPolicyApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *NetworkPolicyApplyConfiguration) WithAPIVersion(value string) *NetworkP // If called multiple times, the Name field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *NetworkPolicyApplyConfiguration) WithName(value string) *NetworkPolicyA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *NetworkPolicyApplyConfiguration) WithGenerateName(value string) *Networ // If called multiple times, the Namespace field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *NetworkPolicyApplyConfiguration) WithNamespace(value string) *NetworkPo // If called multiple times, the UID field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *NetworkPolicyApplyConfiguration) WithUID(value types.UID) *NetworkPolic // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *NetworkPolicyApplyConfiguration) WithResourceVersion(value string) *Net // If called multiple times, the Generation field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithGeneration(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *NetworkPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *NetworkPolicyApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *NetworkPolicyApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration { +func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *NetworkPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *NetworkPolicyApplyConfiguration) WithFinalizers(values ...string) *NetworkPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *NetworkPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -247,3 +247,9 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *NetworkPolicyApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go index e5751c441..46e2706ec 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyEgressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyEgressRule type for use +// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use // with apply. type NetworkPolicyEgressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"` } -// NetworkPolicyEgressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyEgressRule type for use with +// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with // apply. func NetworkPolicyEgressRule() *NetworkPolicyEgressRuleApplyConfiguration { return &NetworkPolicyEgressRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go index 630fe1fab..6e9875978 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// NetworkPolicyIngressRuleApplyConfiguration represents an declarative configuration of the NetworkPolicyIngressRule type for use +// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use // with apply. type NetworkPolicyIngressRuleApplyConfiguration struct { Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"` From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"` } -// NetworkPolicyIngressRuleApplyConfiguration constructs an declarative configuration of the NetworkPolicyIngressRule type for use with +// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with // apply. func NetworkPolicyIngressRule() *NetworkPolicyIngressRuleApplyConfiguration { return &NetworkPolicyIngressRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go index 909b651c0..716ceeeef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go @@ -19,18 +19,18 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicyPeerApplyConfiguration represents an declarative configuration of the NetworkPolicyPeer type for use +// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use // with apply. type NetworkPolicyPeerApplyConfiguration struct { - PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` + PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` + IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"` } -// NetworkPolicyPeerApplyConfiguration constructs an declarative configuration of the NetworkPolicyPeer type for use with +// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with // apply. func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { return &NetworkPolicyPeerApplyConfiguration{} @@ -39,7 +39,7 @@ func NetworkPolicyPeer() *NetworkPolicyPeerApplyConfiguration { // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { +func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { b.PodSelector = value return b } @@ -47,7 +47,7 @@ func (b *NetworkPolicyPeerApplyConfiguration) WithPodSelector(value *v1.LabelSel // WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { +func (b *NetworkPolicyPeerApplyConfiguration) WithNamespaceSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicyPeerApplyConfiguration { b.NamespaceSelector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go index 73dbed1d8..2ded0aecf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go @@ -19,19 +19,19 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// NetworkPolicyPortApplyConfiguration represents an declarative configuration of the NetworkPolicyPort type for use +// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use // with apply. type NetworkPolicyPortApplyConfiguration struct { - Protocol *v1.Protocol `json:"protocol,omitempty"` + Protocol *corev1.Protocol `json:"protocol,omitempty"` Port *intstr.IntOrString `json:"port,omitempty"` EndPort *int32 `json:"endPort,omitempty"` } -// NetworkPolicyPortApplyConfiguration constructs an declarative configuration of the NetworkPolicyPort type for use with +// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with // apply. func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { return &NetworkPolicyPortApplyConfiguration{} @@ -40,7 +40,7 @@ func NetworkPolicyPort() *NetworkPolicyPortApplyConfiguration { // WithProtocol sets the Protocol field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Protocol field is set to the value of the last call. -func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value v1.Protocol) *NetworkPolicyPortApplyConfiguration { +func (b *NetworkPolicyPortApplyConfiguration) WithProtocol(value corev1.Protocol) *NetworkPolicyPortApplyConfiguration { b.Protocol = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go index 882d8233a..48369b921 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go @@ -19,20 +19,20 @@ limitations under the License. package v1 import ( - apinetworkingv1 "k8s.io/api/networking/v1" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// NetworkPolicySpecApplyConfiguration represents an declarative configuration of the NetworkPolicySpec type for use +// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use // with apply. type NetworkPolicySpecApplyConfiguration struct { - PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` + PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"` Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"` Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"` - PolicyTypes []apinetworkingv1.PolicyType `json:"policyTypes,omitempty"` + PolicyTypes []networkingv1.PolicyType `json:"policyTypes,omitempty"` } -// NetworkPolicySpecApplyConfiguration constructs an declarative configuration of the NetworkPolicySpec type for use with +// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with // apply. func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { return &NetworkPolicySpecApplyConfiguration{} @@ -41,7 +41,7 @@ func NetworkPolicySpec() *NetworkPolicySpecApplyConfiguration { // WithPodSelector sets the PodSelector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodSelector field is set to the value of the last call. -func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *v1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration { +func (b *NetworkPolicySpecApplyConfiguration) WithPodSelector(value *metav1.LabelSelectorApplyConfiguration) *NetworkPolicySpecApplyConfiguration { b.PodSelector = value return b } @@ -75,7 +75,7 @@ func (b *NetworkPolicySpecApplyConfiguration) WithEgress(values ...*NetworkPolic // WithPolicyTypes adds the given value to the PolicyTypes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the PolicyTypes field. -func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...apinetworkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration { +func (b *NetworkPolicySpecApplyConfiguration) WithPolicyTypes(values ...networkingv1.PolicyType) *NetworkPolicySpecApplyConfiguration { for i := range values { b.PolicyTypes = append(b.PolicyTypes, values[i]) } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go index ec278960c..517f97483 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// ServiceBackendPortApplyConfiguration represents an declarative configuration of the ServiceBackendPort type for use +// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use // with apply. type ServiceBackendPortApplyConfiguration struct { Name *string `json:"name,omitempty"` Number *int32 `json:"number,omitempty"` } -// ServiceBackendPortApplyConfiguration constructs an declarative configuration of the ServiceBackendPort type for use with +// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with // apply. func ServiceBackendPort() *ServiceBackendPortApplyConfiguration { return &ServiceBackendPortApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go index da6822111..cc7880992 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IPAddressApplyConfiguration represents an declarative configuration of the IPAddress type for use +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use // with apply. type IPAddressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IPAddressApplyConfiguration struct { Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` } -// IPAddress constructs an declarative configuration of the IPAddress type for use with +// IPAddress constructs a declarative configuration of the IPAddress type for use with // apply. func IPAddress(name string) *IPAddressApplyConfiguration { b := &IPAddressApplyConfiguration{} @@ -84,7 +84,7 @@ func extractIPAddress(iPAddress *networkingv1alpha1.IPAddress, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApp // If called multiple times, the Name field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressA // If called multiple times, the Namespace field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressAppl // If called multiple times, the UID field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddre // If called multiple times, the Generation field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPA // overwriting an existing map entries in Annotations field with the same key. func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go index 064963d69..bf025a8c1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/ipaddressspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// IPAddressSpecApplyConfiguration represents an declarative configuration of the IPAddressSpec type for use +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use // with apply. type IPAddressSpecApplyConfiguration struct { ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` } -// IPAddressSpecApplyConfiguration constructs an declarative configuration of the IPAddressSpec type for use with +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with // apply. func IPAddressSpec() *IPAddressSpecApplyConfiguration { return &IPAddressSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index ce1049709..d5a52d503 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -27,7 +27,7 @@ type ParentReferenceApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with // apply. func ParentReference() *ParentReferenceApplyConfiguration { return &ParentReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go index f6d0a91e0..27d3f271b 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use // with apply. type ServiceCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ServiceCIDRApplyConfiguration struct { Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` } -// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with // apply. func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { b := &ServiceCIDRApplyConfiguration{} @@ -85,7 +85,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID // If called multiple times, the Name field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC // If called multiple times, the Namespace field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR // If called multiple times, the UID field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi // If called multiple times, the Generation field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S // overwriting an existing map entries in Annotations field with the same key. func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApply b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go index 302d69194..7875ff403 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go @@ -18,13 +18,13 @@ limitations under the License. package v1alpha1 -// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use // with apply. type ServiceCIDRSpecApplyConfiguration struct { CIDRs []string `json:"cidrs,omitempty"` } -// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with // apply. func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { return &ServiceCIDRSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go index 5afc549a6..34715e3a4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use // with apply. type ServiceCIDRStatusApplyConfiguration struct { Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with // apply. func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { return &ServiceCIDRStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go index b12907e81..c7301c6a3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go @@ -19,18 +19,18 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" ) -// HTTPIngressPathApplyConfiguration represents an declarative configuration of the HTTPIngressPath type for use +// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use // with apply. type HTTPIngressPathApplyConfiguration struct { Path *string `json:"path,omitempty"` - PathType *v1beta1.PathType `json:"pathType,omitempty"` + PathType *networkingv1beta1.PathType `json:"pathType,omitempty"` Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"` } -// HTTPIngressPathApplyConfiguration constructs an declarative configuration of the HTTPIngressPath type for use with +// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with // apply. func HTTPIngressPath() *HTTPIngressPathApplyConfiguration { return &HTTPIngressPathApplyConfiguration{} @@ -47,7 +47,7 @@ func (b *HTTPIngressPathApplyConfiguration) WithPath(value string) *HTTPIngressP // WithPathType sets the PathType field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PathType field is set to the value of the last call. -func (b *HTTPIngressPathApplyConfiguration) WithPathType(value v1beta1.PathType) *HTTPIngressPathApplyConfiguration { +func (b *HTTPIngressPathApplyConfiguration) WithPathType(value networkingv1beta1.PathType) *HTTPIngressPathApplyConfiguration { b.PathType = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go index 3137bc5eb..124545223 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// HTTPIngressRuleValueApplyConfiguration represents an declarative configuration of the HTTPIngressRuleValue type for use +// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use // with apply. type HTTPIngressRuleValueApplyConfiguration struct { Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"` } -// HTTPIngressRuleValueApplyConfiguration constructs an declarative configuration of the HTTPIngressRuleValue type for use with +// HTTPIngressRuleValueApplyConfiguration constructs a declarative configuration of the HTTPIngressRuleValue type for use with // apply. func HTTPIngressRuleValue() *HTTPIngressRuleValueApplyConfiguration { return &HTTPIngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go index 56f65c30a..5d26cd75c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressApplyConfiguration represents an declarative configuration of the Ingress type for use +// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use // with apply. type IngressApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type IngressApplyConfiguration struct { Status *IngressStatusApplyConfiguration `json:"status,omitempty"` } -// Ingress constructs an declarative configuration of the Ingress type for use with +// Ingress constructs a declarative configuration of the Ingress type for use with // apply. func Ingress(name, namespace string) *IngressApplyConfiguration { b := &IngressApplyConfiguration{} @@ -87,7 +87,7 @@ func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, sub // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *IngressApplyConfiguration) WithKind(value string) *IngressApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *IngressApplyConfiguration) WithAPIVersion(value string) *IngressApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *IngressApplyConfiguration) WithName(value string) *IngressApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *IngressApplyConfiguration) WithGenerateName(value string) *IngressApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *IngressApplyConfiguration) WithNamespace(value string) *IngressApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *IngressApplyConfiguration) WithUID(value types.UID) *IngressApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *IngressApplyConfiguration) WithResourceVersion(value string) *IngressAp // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *IngressApplyConfiguration) WithGeneration(value int64) *IngressApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *IngressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *In // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *IngressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *In // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *IngressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *IngressApplyConfiguration) WithLabels(entries map[string]string) *Ingre // overwriting an existing map entries in Annotations field with the same key. func (b *IngressApplyConfiguration) WithAnnotations(entries map[string]string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *IngressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *IngressApplyConfiguration) WithFinalizers(values ...string) *IngressApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *IngressApplyConfiguration) WithStatus(value *IngressStatusApplyConfigur b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go index f19c2f2ee..9d386f160 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// IngressBackendApplyConfiguration represents an declarative configuration of the IngressBackend type for use +// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use // with apply. type IngressBackendApplyConfiguration struct { ServiceName *string `json:"serviceName,omitempty"` @@ -31,7 +31,7 @@ type IngressBackendApplyConfiguration struct { Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"` } -// IngressBackendApplyConfiguration constructs an declarative configuration of the IngressBackend type for use with +// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with // apply. func IngressBackend() *IngressBackendApplyConfiguration { return &IngressBackendApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go index b65d4b307..272e0339f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// IngressClassApplyConfiguration represents an declarative configuration of the IngressClass type for use +// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use // with apply. type IngressClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type IngressClassApplyConfiguration struct { Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"` } -// IngressClass constructs an declarative configuration of the IngressClass type for use with +// IngressClass constructs a declarative configuration of the IngressClass type for use with // apply. func IngressClass(name string) *IngressClassApplyConfiguration { b := &IngressClassApplyConfiguration{} @@ -84,7 +84,7 @@ func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldMana // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *IngressClassApplyConfiguration) WithKind(value string) *IngressClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *IngressClassApplyConfiguration) WithAPIVersion(value string) *IngressCl // If called multiple times, the Name field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *IngressClassApplyConfiguration) WithName(value string) *IngressClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *IngressClassApplyConfiguration) WithGenerateName(value string) *Ingress // If called multiple times, the Namespace field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *IngressClassApplyConfiguration) WithNamespace(value string) *IngressCla // If called multiple times, the UID field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *IngressClassApplyConfiguration) WithUID(value types.UID) *IngressClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *IngressClassApplyConfiguration) WithResourceVersion(value string) *Ingr // If called multiple times, the Generation field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *IngressClassApplyConfiguration) WithGeneration(value int64) *IngressCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *IngressClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *IngressClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *IngressClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *IngressClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *IngressClassApplyConfiguration) WithAnnotations(entries map[string]string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *IngressClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *IngressClassApplyConfiguration) WithFinalizers(values ...string) *IngressClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *IngressClassApplyConfiguration) WithSpec(value *IngressClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IngressClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go index e6ca805e4..2a307a676 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressClassParametersReferenceApplyConfiguration represents an declarative configuration of the IngressClassParametersReference type for use +// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use // with apply. type IngressClassParametersReferenceApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -28,7 +28,7 @@ type IngressClassParametersReferenceApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// IngressClassParametersReferenceApplyConfiguration constructs an declarative configuration of the IngressClassParametersReference type for use with +// IngressClassParametersReferenceApplyConfiguration constructs a declarative configuration of the IngressClassParametersReference type for use with // apply. func IngressClassParametersReference() *IngressClassParametersReferenceApplyConfiguration { return &IngressClassParametersReferenceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go index 51040462c..eefbf62b8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressClassSpecApplyConfiguration represents an declarative configuration of the IngressClassSpec type for use +// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use // with apply. type IngressClassSpecApplyConfiguration struct { Controller *string `json:"controller,omitempty"` Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"` } -// IngressClassSpecApplyConfiguration constructs an declarative configuration of the IngressClassSpec type for use with +// IngressClassSpecApplyConfiguration constructs a declarative configuration of the IngressClassSpec type for use with // apply. func IngressClassSpec() *IngressClassSpecApplyConfiguration { return &IngressClassSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go index 20bf63780..12dbc3596 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerIngressApplyConfiguration represents an declarative configuration of the IngressLoadBalancerIngress type for use +// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use // with apply. type IngressLoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` @@ -26,7 +26,7 @@ type IngressLoadBalancerIngressApplyConfiguration struct { Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"` } -// IngressLoadBalancerIngressApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerIngress type for use with +// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with // apply. func IngressLoadBalancerIngress() *IngressLoadBalancerIngressApplyConfiguration { return &IngressLoadBalancerIngressApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go index e16dd2363..e896ab341 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressLoadBalancerStatusApplyConfiguration represents an declarative configuration of the IngressLoadBalancerStatus type for use +// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use // with apply. type IngressLoadBalancerStatusApplyConfiguration struct { Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"` } -// IngressLoadBalancerStatusApplyConfiguration constructs an declarative configuration of the IngressLoadBalancerStatus type for use with +// IngressLoadBalancerStatusApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerStatus type for use with // apply. func IngressLoadBalancerStatus() *IngressLoadBalancerStatusApplyConfiguration { return &IngressLoadBalancerStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go index 083653797..4ee3f0161 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" ) -// IngressPortStatusApplyConfiguration represents an declarative configuration of the IngressPortStatus type for use +// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use // with apply. type IngressPortStatusApplyConfiguration struct { Port *int32 `json:"port,omitempty"` @@ -30,7 +30,7 @@ type IngressPortStatusApplyConfiguration struct { Error *string `json:"error,omitempty"` } -// IngressPortStatusApplyConfiguration constructs an declarative configuration of the IngressPortStatus type for use with +// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with // apply. func IngressPortStatus() *IngressPortStatusApplyConfiguration { return &IngressPortStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go index 015541eeb..809fada92 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressRuleApplyConfiguration represents an declarative configuration of the IngressRule type for use +// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use // with apply. type IngressRuleApplyConfiguration struct { Host *string `json:"host,omitempty"` - IngressRuleValueApplyConfiguration `json:",omitempty,inline"` + IngressRuleValueApplyConfiguration `json:",inline"` } -// IngressRuleApplyConfiguration constructs an declarative configuration of the IngressRule type for use with +// IngressRuleApplyConfiguration constructs a declarative configuration of the IngressRule type for use with // apply. func IngressRule() *IngressRuleApplyConfiguration { return &IngressRuleApplyConfiguration{} @@ -43,6 +43,6 @@ func (b *IngressRuleApplyConfiguration) WithHost(value string) *IngressRuleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the HTTP field is set to the value of the last call. func (b *IngressRuleApplyConfiguration) WithHTTP(value *HTTPIngressRuleValueApplyConfiguration) *IngressRuleApplyConfiguration { - b.HTTP = value + b.IngressRuleValueApplyConfiguration.HTTP = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go index 2d03c7b13..4a6412475 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressRuleValueApplyConfiguration represents an declarative configuration of the IngressRuleValue type for use +// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use // with apply. type IngressRuleValueApplyConfiguration struct { HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"` } -// IngressRuleValueApplyConfiguration constructs an declarative configuration of the IngressRuleValue type for use with +// IngressRuleValueApplyConfiguration constructs a declarative configuration of the IngressRuleValue type for use with // apply. func IngressRuleValue() *IngressRuleValueApplyConfiguration { return &IngressRuleValueApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go index 1ab4d8bb7..58fbde8b3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// IngressSpecApplyConfiguration represents an declarative configuration of the IngressSpec type for use +// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use // with apply. type IngressSpecApplyConfiguration struct { IngressClassName *string `json:"ingressClassName,omitempty"` @@ -27,7 +27,7 @@ type IngressSpecApplyConfiguration struct { Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"` } -// IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with +// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with // apply. func IngressSpec() *IngressSpecApplyConfiguration { return &IngressSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go index faa7e2446..3aed61688 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// IngressStatusApplyConfiguration represents an declarative configuration of the IngressStatus type for use +// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use // with apply. type IngressStatusApplyConfiguration struct { LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"` } -// IngressStatusApplyConfiguration constructs an declarative configuration of the IngressStatus type for use with +// IngressStatusApplyConfiguration constructs a declarative configuration of the IngressStatus type for use with // apply. func IngressStatus() *IngressStatusApplyConfiguration { return &IngressStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go index 8ca93a0bc..63648cd46 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// IngressTLSApplyConfiguration represents an declarative configuration of the IngressTLS type for use +// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use // with apply. type IngressTLSApplyConfiguration struct { Hosts []string `json:"hosts,omitempty"` SecretName *string `json:"secretName,omitempty"` } -// IngressTLSApplyConfiguration constructs an declarative configuration of the IngressTLS type for use with +// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with // apply. func IngressTLS() *IngressTLSApplyConfiguration { return &IngressTLSApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..f58b54da5 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use +// with apply. +type IPAddressApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"` +} + +// IPAddress constructs a declarative configuration of the IPAddress type for use with +// apply. +func IPAddress(name string) *IPAddressApplyConfiguration { + b := &IPAddressApplyConfiguration{} + b.WithName(name) + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractIPAddress extracts the applied configuration owned by fieldManager from +// iPAddress. If no managedFields are found in iPAddress for fieldManager, a +// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API. +// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "") +} + +// ExtractIPAddressStatus is the same as ExtractIPAddress except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) { + return extractIPAddress(iPAddress, fieldManager, "status") +} + +func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) { + b := &IPAddressApplyConfiguration{} + err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(iPAddress.Name) + + b.WithKind("IPAddress") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithKind(value string) *IPAddressApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithAPIVersion(value string) *IPAddressApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGenerateName(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithNamespace(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithUID(value types.UID) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithResourceVersion(value string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithGeneration(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithCreationTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *IPAddressApplyConfiguration) WithLabels(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *IPAddressApplyConfiguration) WithAnnotations(entries map[string]string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *IPAddressApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *IPAddressApplyConfiguration) WithFinalizers(values ...string) *IPAddressApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *IPAddressApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *IPAddressApplyConfiguration) WithSpec(value *IPAddressSpecApplyConfiguration) *IPAddressApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *IPAddressApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go new file mode 100644 index 000000000..76b02137d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use +// with apply. +type IPAddressSpecApplyConfiguration struct { + ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"` +} + +// IPAddressSpecApplyConfiguration constructs a declarative configuration of the IPAddressSpec type for use with +// apply. +func IPAddressSpec() *IPAddressSpecApplyConfiguration { + return &IPAddressSpecApplyConfiguration{} +} + +// WithParentRef sets the ParentRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ParentRef field is set to the value of the last call. +func (b *IPAddressSpecApplyConfiguration) WithParentRef(value *ParentReferenceApplyConfiguration) *IPAddressSpecApplyConfiguration { + b.ParentRef = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go new file mode 100644 index 000000000..1863938f1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use +// with apply. +type ParentReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with +// apply. +func ParentReference() *ParentReferenceApplyConfiguration { + return &ParentReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithGroup(value string) *ParentReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithResource(value string) *ParentReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithNamespace(value string) *ParentReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go new file mode 100644 index 000000000..6a53db5c0 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go @@ -0,0 +1,262 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use +// with apply. +type ServiceCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` + Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` +} + +// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with +// apply. +func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { + b := &ServiceCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b +} + +// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from +// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a +// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. +// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "") +} + +// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { + return extractServiceCIDR(serviceCIDR, fieldManager, "status") +} + +func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { + b := &ServiceCIDRApplyConfiguration{} + err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(serviceCIDR.Name) + + b.WithKind("ServiceCIDR") + b.WithAPIVersion("networking.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ServiceCIDRApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go new file mode 100644 index 000000000..1f283532d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use +// with apply. +type ServiceCIDRSpecApplyConfiguration struct { + CIDRs []string `json:"cidrs,omitempty"` +} + +// ServiceCIDRSpecApplyConfiguration constructs a declarative configuration of the ServiceCIDRSpec type for use with +// apply. +func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { + return &ServiceCIDRSpecApplyConfiguration{} +} + +// WithCIDRs adds the given value to the CIDRs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the CIDRs field. +func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { + for i := range values { + b.CIDRs = append(b.CIDRs, values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go new file mode 100644 index 000000000..f2dd92404 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use +// with apply. +type ServiceCIDRStatusApplyConfiguration struct { + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// ServiceCIDRStatusApplyConfiguration constructs a declarative configuration of the ServiceCIDRStatus type for use with +// apply. +func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { + return &ServiceCIDRStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go index 9eec00267..30ce9fb42 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { - PodFixed *v1.ResourceList `json:"podFixed,omitempty"` + PodFixed *corev1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} @@ -37,7 +37,7 @@ func Overhead() *OverheadApplyConfiguration { // WithPodFixed sets the PodFixed field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the PodFixed field is set to the value of the last call. -func (b *OverheadApplyConfiguration) WithPodFixed(value v1.ResourceList) *OverheadApplyConfiguration { +func (b *OverheadApplyConfiguration) WithPodFixed(value corev1.ResourceList) *OverheadApplyConfiguration { b.PodFixed = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go index 3c9d1fc46..067dc1703 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go @@ -19,25 +19,25 @@ limitations under the License. package v1 import ( - apinodev1 "k8s.io/api/node/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + nodev1 "k8s.io/api/node/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Handler *string `json:"handler,omitempty"` - Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"` - Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Handler *string `json:"handler,omitempty"` + Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"` + Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -58,18 +58,18 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { +func ExtractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { return extractRuntimeClass(runtimeClass, fieldManager, "") } // ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRuntimeClassStatus(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { +func ExtractRuntimeClassStatus(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) { return extractRuntimeClass(runtimeClass, fieldManager, "status") } -func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { +func extractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) { b := &RuntimeClassApplyConfiguration{} err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration { +func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RuntimeClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go index e01db85d7..b45400fbc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + Tolerations []corev1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} @@ -52,7 +52,7 @@ func (b *SchedulingApplyConfiguration) WithNodeSelector(entries map[string]strin // WithTolerations adds the given value to the Tolerations field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Tolerations field. -func (b *SchedulingApplyConfiguration) WithTolerations(values ...*v1.TolerationApplyConfiguration) *SchedulingApplyConfiguration { +func (b *SchedulingApplyConfiguration) WithTolerations(values ...*corev1.TolerationApplyConfiguration) *SchedulingApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithTolerations") diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go index 1ddaa64ac..84770a092 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go index e680e12de..5ddca3b6e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RuntimeClassApplyConfiguration struct { Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -84,7 +84,7 @@ func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager s // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *RuntimeClassApplyConfiguration) WithSpec(value *RuntimeClassSpecApplyCo b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go index 86e8585ad..1aa43eb13 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RuntimeClassSpecApplyConfiguration represents an declarative configuration of the RuntimeClassSpec type for use +// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use // with apply. type RuntimeClassSpecApplyConfiguration struct { RuntimeHandler *string `json:"runtimeHandler,omitempty"` @@ -26,7 +26,7 @@ type RuntimeClassSpecApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClassSpecApplyConfiguration constructs an declarative configuration of the RuntimeClassSpec type for use with +// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with // apply. func RuntimeClassSpec() *RuntimeClassSpecApplyConfiguration { return &RuntimeClassSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go index d4117d6bc..6ce49ad86 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go index e8c489550..cf767e702 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/api/core/v1" ) -// OverheadApplyConfiguration represents an declarative configuration of the Overhead type for use +// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use // with apply. type OverheadApplyConfiguration struct { PodFixed *v1.ResourceList `json:"podFixed,omitempty"` } -// OverheadApplyConfiguration constructs an declarative configuration of the Overhead type for use with +// OverheadApplyConfiguration constructs a declarative configuration of the Overhead type for use with // apply. func Overhead() *OverheadApplyConfiguration { return &OverheadApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go index f5487665c..b17de6763 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RuntimeClassApplyConfiguration represents an declarative configuration of the RuntimeClass type for use +// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use // with apply. type RuntimeClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -37,7 +37,7 @@ type RuntimeClassApplyConfiguration struct { Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"` } -// RuntimeClass constructs an declarative configuration of the RuntimeClass type for use with +// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with // apply. func RuntimeClass(name string) *RuntimeClassApplyConfiguration { b := &RuntimeClassApplyConfiguration{} @@ -86,7 +86,7 @@ func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RuntimeClassApplyConfiguration) WithKind(value string) *RuntimeClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RuntimeClassApplyConfiguration) WithAPIVersion(value string) *RuntimeCl // If called multiple times, the Name field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RuntimeClassApplyConfiguration) WithName(value string) *RuntimeClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RuntimeClassApplyConfiguration) WithGenerateName(value string) *Runtime // If called multiple times, the Namespace field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RuntimeClassApplyConfiguration) WithNamespace(value string) *RuntimeCla // If called multiple times, the UID field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RuntimeClassApplyConfiguration) WithUID(value types.UID) *RuntimeClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RuntimeClassApplyConfiguration) WithResourceVersion(value string) *Runt // If called multiple times, the Generation field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RuntimeClassApplyConfiguration) WithGeneration(value int64) *RuntimeCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RuntimeClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RuntimeClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RuntimeClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *RuntimeClassApplyConfiguration) WithAnnotations(entries map[string]string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RuntimeClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *RuntimeClassApplyConfiguration) WithFinalizers(values ...string) *RuntimeClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -263,3 +263,9 @@ func (b *RuntimeClassApplyConfiguration) WithScheduling(value *SchedulingApplyCo b.Scheduling = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RuntimeClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go index 10831d0ff..23d0b9752 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// SchedulingApplyConfiguration represents an declarative configuration of the Scheduling type for use +// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use // with apply. type SchedulingApplyConfiguration struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"` } -// SchedulingApplyConfiguration constructs an declarative configuration of the Scheduling type for use with +// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with // apply. func Scheduling() *SchedulingApplyConfiguration { return &SchedulingApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go index 76a9533a6..079c6f3bc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go @@ -20,22 +20,22 @@ package v1 import ( policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DeleteOptions *metav1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -86,7 +86,7 @@ func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresour // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply // If called multiple times, the Name field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp // If called multiple times, the Namespace field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC // If called multiple times, the UID field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction // If called multiple times, the Generation field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic // overwriting an existing map entries in Annotations field with the same key. func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,21 +229,27 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithDeleteOptions sets the DeleteOptions field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeleteOptions field is set to the value of the last call. -func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration { +func (b *EvictionApplyConfiguration) WithDeleteOptions(value *metav1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration { b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go index 6b547c269..82ec5a082 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apipolicyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + policyv1 "k8s.io/api/policy/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"` + Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -58,18 +58,18 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { +func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "") } // ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { +func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) { return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status") } -func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { +func extractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) { b := &PodDisruptionBudgetApplyConfiguration{} err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBu // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po // If called multiple times, the Name field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) * // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod // If called multiple times, the UID field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string // If called multiple times, the Generation field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v // overwriting an existing map entries in Labels field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st // overwriting an existing map entries in Annotations field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration { +func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PodDisruptionBudgetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go index 67d9ba6bb..3c66739bd 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go @@ -21,19 +21,19 @@ package v1 import ( policyv1 "k8s.io/api/policy/v1" intstr "k8s.io/apimachinery/pkg/util/intstr" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} @@ -50,7 +50,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMinAvailable(value intst // WithSelector sets the Selector field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Selector field is set to the value of the last call. -func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration { +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithSelector(value *metav1.LabelSelectorApplyConfiguration) *PodDisruptionBudgetSpecApplyConfiguration { b.Selector = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go index 2dd427b9e..d3c44d90a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - DisruptedPods map[string]v1.Time `json:"disruptedPods,omitempty"` - DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"` - CurrentHealthy *int32 `json:"currentHealthy,omitempty"` - DesiredHealthy *int32 `json:"desiredHealthy,omitempty"` - ExpectedPods *int32 `json:"expectedPods,omitempty"` - Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty"` + DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"` + CurrentHealthy *int32 `json:"currentHealthy,omitempty"` + DesiredHealthy *int32 `json:"desiredHealthy,omitempty"` + ExpectedPods *int32 `json:"expectedPods,omitempty"` + Conditions []applyconfigurationsmetav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} @@ -53,9 +53,9 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithObservedGeneration(val // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the DisruptedPods field, // overwriting an existing map entries in DisruptedPods field with the same key. -func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]v1.Time) *PodDisruptionBudgetStatusApplyConfiguration { +func (b *PodDisruptionBudgetStatusApplyConfiguration) WithDisruptedPods(entries map[string]metav1.Time) *PodDisruptionBudgetStatusApplyConfiguration { if b.DisruptedPods == nil && len(entries) > 0 { - b.DisruptedPods = make(map[string]v1.Time, len(entries)) + b.DisruptedPods = make(map[string]metav1.Time, len(entries)) } for k, v := range entries { b.DisruptedPods[k] = v @@ -98,7 +98,7 @@ func (b *PodDisruptionBudgetStatusApplyConfiguration) WithExpectedPods(value int // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration { +func (b *PodDisruptionBudgetStatusApplyConfiguration) WithConditions(values ...*applyconfigurationsmetav1.ConditionApplyConfiguration) *PodDisruptionBudgetStatusApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithConditions") diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go index d2a361d1b..0b5945935 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use +// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use // with apply. type EvictionApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type EvictionApplyConfiguration struct { DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"` } -// Eviction constructs an declarative configuration of the Eviction type for use with +// Eviction constructs a declarative configuration of the Eviction type for use with // apply. func Eviction(name, namespace string) *EvictionApplyConfiguration { b := &EvictionApplyConfiguration{} @@ -57,18 +57,18 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractEviction(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { +func ExtractEviction(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { return extractEviction(eviction, fieldManager, "") } // ExtractEvictionStatus is the same as ExtractEviction except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractEvictionStatus(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { +func ExtractEvictionStatus(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) { return extractEviction(eviction, fieldManager, "status") } -func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { +func extractEviction(eviction *policyv1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) { b := &EvictionApplyConfiguration{} err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresourc // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfig // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApply // If called multiple times, the Name field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfig // If called multiple times, the GenerateName field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApp // If called multiple times, the Namespace field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyC // If called multiple times, the UID field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConf // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *Eviction // If called multiple times, the Generation field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *E // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *E // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *Evic // overwriting an existing map entries in Annotations field with the same key. func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefe func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -247,3 +247,9 @@ func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsAp b.DeleteOptions = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *EvictionApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go index cef51a279..7743da76a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetApplyConfiguration represents an declarative configuration of the PodDisruptionBudget type for use +// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use // with apply. type PodDisruptionBudgetApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type PodDisruptionBudgetApplyConfiguration struct { Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"` } -// PodDisruptionBudget constructs an declarative configuration of the PodDisruptionBudget type for use with +// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with // apply. func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfiguration { b := &PodDisruptionBudgetApplyConfiguration{} @@ -87,7 +87,7 @@ func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruption // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisruptionBudgetApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithKind(value string) *PodDisru // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *PodDisruptionBudgetApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithAPIVersion(value string) *Po // If called multiple times, the Name field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithName(value string) *PodDisru // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGenerateName(value string) * // If called multiple times, the Namespace field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithNamespace(value string) *Pod // If called multiple times, the UID field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithUID(value types.UID) *PodDis // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithResourceVersion(value string // If called multiple times, the Generation field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithGeneration(value int64) *Pod // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithCreationTimestamp(value meta // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionTimestamp(value meta // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithDeletionGracePeriodSeconds(v // overwriting an existing map entries in Labels field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithLabels(entries map[string]st // overwriting an existing map entries in Annotations field with the same key. func (b *PodDisruptionBudgetApplyConfiguration) WithAnnotations(entries map[string]string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithOwnerReferences(values ...*v func (b *PodDisruptionBudgetApplyConfiguration) WithFinalizers(values ...string) *PodDisruptionBudgetApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -256,3 +256,9 @@ func (b *PodDisruptionBudgetApplyConfiguration) WithStatus(value *PodDisruptionB b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PodDisruptionBudgetApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go index 0ba3ea1c2..d8fecf7a3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go @@ -19,21 +19,21 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" intstr "k8s.io/apimachinery/pkg/util/intstr" v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetSpecApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetSpec type for use +// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use // with apply. type PodDisruptionBudgetSpecApplyConfiguration struct { - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` - Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - UnhealthyPodEvictionPolicy *v1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"` + Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` + UnhealthyPodEvictionPolicy *policyv1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"` } -// PodDisruptionBudgetSpecApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetSpec type for use with +// PodDisruptionBudgetSpecApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetSpec type for use with // apply. func PodDisruptionBudgetSpec() *PodDisruptionBudgetSpecApplyConfiguration { return &PodDisruptionBudgetSpecApplyConfiguration{} @@ -66,7 +66,7 @@ func (b *PodDisruptionBudgetSpecApplyConfiguration) WithMaxUnavailable(value int // WithUnhealthyPodEvictionPolicy sets the UnhealthyPodEvictionPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UnhealthyPodEvictionPolicy field is set to the value of the last call. -func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value v1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { +func (b *PodDisruptionBudgetSpecApplyConfiguration) WithUnhealthyPodEvictionPolicy(value policyv1beta1.UnhealthyPodEvictionPolicyType) *PodDisruptionBudgetSpecApplyConfiguration { b.UnhealthyPodEvictionPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go index d0813590e..e66a7fb38 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PodDisruptionBudgetStatusApplyConfiguration represents an declarative configuration of the PodDisruptionBudgetStatus type for use +// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use // with apply. type PodDisruptionBudgetStatusApplyConfiguration struct { ObservedGeneration *int64 `json:"observedGeneration,omitempty"` @@ -35,7 +35,7 @@ type PodDisruptionBudgetStatusApplyConfiguration struct { Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` } -// PodDisruptionBudgetStatusApplyConfiguration constructs an declarative configuration of the PodDisruptionBudgetStatus type for use with +// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with // apply. func PodDisruptionBudgetStatus() *PodDisruptionBudgetStatusApplyConfiguration { return &PodDisruptionBudgetStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go index fda9205c2..b7049a8ef 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { - ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` + ClusterRoleSelectors []metav1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} @@ -37,7 +37,7 @@ func AggregationRule() *AggregationRuleApplyConfiguration { // WithClusterRoleSelectors adds the given value to the ClusterRoleSelectors field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ClusterRoleSelectors field. -func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*v1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration { +func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*metav1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration { for i := range values { if values[i] == nil { panic("nil value passed to WithClusterRoleSelectors") diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go index 3a5660fe1..9b46fdbe9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` - AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` + AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -57,18 +57,18 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { +func ExtractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { return extractClusterRole(clusterRole, fieldManager, "") } // ExtractClusterRoleStatus is the same as ExtractClusterRole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterRoleStatus(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { +func ExtractClusterRoleStatus(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) { return extractClusterRole(clusterRole, fieldManager, "status") } -func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { +func extractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) { b := &ClusterRoleApplyConfiguration{} err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration { +func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterRoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go index 625ad72c4..7775bff0f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` - RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -57,18 +57,18 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { +func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "") } // ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractClusterRoleBindingStatus(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { +func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) { return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status") } -func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { +func extractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) { b := &ClusterRoleBindingApplyConfiguration{} err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration { +func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *ClusterRoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go index 65ee1d4fe..a2e66d109 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go index 97df25fb6..b592753f6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -57,18 +57,18 @@ func Role(name, namespace string) *RoleApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRole(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { +func ExtractRole(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { return extractRole(role, fieldManager, "") } // ExtractRoleStatus is the same as ExtractRole except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRoleStatus(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { +func ExtractRoleStatus(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) { return extractRole(role, fieldManager, "status") } -func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { +func extractRole(role *rbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) { b := &RoleApplyConfiguration{} err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource) if err != nil { @@ -86,7 +86,7 @@ func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,25 +148,25 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -212,13 +212,13 @@ func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *Rol // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration { +func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,14 +229,14 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RoleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go index 7270f07e4..32f12e87c 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apirbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacv1 "k8s.io/api/rbac/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` - RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"` + RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -58,18 +58,18 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { +func ExtractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { return extractRoleBinding(roleBinding, fieldManager, "") } // ExtractRoleBindingStatus is the same as ExtractRoleBinding except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractRoleBindingStatus(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { +func ExtractRoleBindingStatus(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) { return extractRoleBinding(roleBinding, fieldManager, "status") } -func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { +func extractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) { b := &RoleBindingApplyConfiguration{} err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource) if err != nil { @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,25 +149,25 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -213,13 +213,13 @@ func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration { +func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,14 +230,14 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *RoleBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go index ef03a4882..646a3bb19 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go index ebc87fdc4..e1d9c5cfb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go index 63cdc3fcc..ff4aeb59e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go index 19b1180fa..ecc75d340 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go index a1723efc3..3b8c43a39 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBindi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go index 12143af13..89d7a2914 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go index cd256397a..3fbd98543 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -86,7 +86,7 @@ func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go index a0ec20d0b..37c0d37cf 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go index 40dbc3307..4b2553117 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go index 46640dbbe..665b42af5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go index d52ac3db9..e9bb68dcb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go @@ -22,13 +22,13 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// AggregationRuleApplyConfiguration represents an declarative configuration of the AggregationRule type for use +// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use // with apply. type AggregationRuleApplyConfiguration struct { ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"` } -// AggregationRuleApplyConfiguration constructs an declarative configuration of the AggregationRule type for use with +// AggregationRuleApplyConfiguration constructs a declarative configuration of the AggregationRule type for use with // apply. func AggregationRule() *AggregationRuleApplyConfiguration { return &AggregationRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go index cf714ecc2..124e47ef7 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleApplyConfiguration represents an declarative configuration of the ClusterRole type for use +// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use // with apply. type ClusterRoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleApplyConfiguration struct { AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"` } -// ClusterRole constructs an declarative configuration of the ClusterRole type for use with +// ClusterRole constructs a declarative configuration of the ClusterRole type for use with // apply. func ClusterRole(name string) *ClusterRoleApplyConfiguration { b := &ClusterRoleApplyConfiguration{} @@ -85,7 +85,7 @@ func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleApplyConfiguration) WithKind(value string) *ClusterRoleApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleApplyConfiguration) WithAPIVersion(value string) *ClusterRol // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleApplyConfiguration) WithName(value string) *ClusterRoleApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleApplyConfiguration) WithGenerateName(value string) *ClusterR // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleApplyConfiguration) WithNamespace(value string) *ClusterRole // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleApplyConfiguration) WithUID(value types.UID) *ClusterRoleApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleApplyConfiguration) WithResourceVersion(value string) *Clust // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleApplyConfiguration) WithGeneration(value int64) *ClusterRole // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleApplyConfiguration) WithLabels(entries map[string]string) *C // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *ClusterRoleApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -259,3 +259,9 @@ func (b *ClusterRoleApplyConfiguration) WithAggregationRule(value *AggregationRu b.AggregationRule = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go index b97cbcba2..140e7e176 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ClusterRoleBindingApplyConfiguration represents an declarative configuration of the ClusterRoleBinding type for use +// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use // with apply. type ClusterRoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type ClusterRoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// ClusterRoleBinding constructs an declarative configuration of the ClusterRoleBinding type for use with +// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with // apply. func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration { b := &ClusterRoleBindingApplyConfiguration{} @@ -85,7 +85,7 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBindin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithKind(value string) *ClusterRo // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *ClusterRoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithAPIVersion(value string) *Clu // If called multiple times, the Name field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithName(value string) *ClusterRo // If called multiple times, the GenerateName field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithNamespace(value string) *Clus // If called multiple times, the UID field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithUID(value types.UID) *Cluster // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithGeneration(value int64) *Clus // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *ClusterRoleBindingApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *ClusterRoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *ClusterRoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *ClusterRoleBindingApplyConfiguration) WithFinalizers(values ...string) *ClusterRoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -259,3 +259,9 @@ func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyCo b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ClusterRoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go index c63dc68c6..dc630df20 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// PolicyRuleApplyConfiguration represents an declarative configuration of the PolicyRule type for use +// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use // with apply. type PolicyRuleApplyConfiguration struct { Verbs []string `json:"verbs,omitempty"` @@ -28,7 +28,7 @@ type PolicyRuleApplyConfiguration struct { NonResourceURLs []string `json:"nonResourceURLs,omitempty"` } -// PolicyRuleApplyConfiguration constructs an declarative configuration of the PolicyRule type for use with +// PolicyRuleApplyConfiguration constructs a declarative configuration of the PolicyRule type for use with // apply. func PolicyRule() *PolicyRuleApplyConfiguration { return &PolicyRuleApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go index 53a751eb3..82240514f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleApplyConfiguration represents an declarative configuration of the Role type for use +// RoleApplyConfiguration represents a declarative configuration of the Role type for use // with apply. type RoleApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type RoleApplyConfiguration struct { Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"` } -// Role constructs an declarative configuration of the Role type for use with +// Role constructs a declarative configuration of the Role type for use with // apply. func Role(name, namespace string) *RoleApplyConfiguration { b := &RoleApplyConfiguration{} @@ -86,7 +86,7 @@ func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -94,7 +94,7 @@ func (b *RoleApplyConfiguration) WithKind(value string) *RoleApplyConfiguration // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -103,7 +103,7 @@ func (b *RoleApplyConfiguration) WithAPIVersion(value string) *RoleApplyConfigur // If called multiple times, the Name field is set to the value of the last call. func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -112,7 +112,7 @@ func (b *RoleApplyConfiguration) WithName(value string) *RoleApplyConfiguration // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -121,7 +121,7 @@ func (b *RoleApplyConfiguration) WithGenerateName(value string) *RoleApplyConfig // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -130,7 +130,7 @@ func (b *RoleApplyConfiguration) WithNamespace(value string) *RoleApplyConfigura // If called multiple times, the UID field is set to the value of the last call. func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -139,7 +139,7 @@ func (b *RoleApplyConfiguration) WithUID(value types.UID) *RoleApplyConfiguratio // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -148,7 +148,7 @@ func (b *RoleApplyConfiguration) WithResourceVersion(value string) *RoleApplyCon // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -157,7 +157,7 @@ func (b *RoleApplyConfiguration) WithGeneration(value int64) *RoleApplyConfigura // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -166,7 +166,7 @@ func (b *RoleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -175,7 +175,7 @@ func (b *RoleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleA // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -185,11 +185,11 @@ func (b *RoleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *Ro // overwriting an existing map entries in Labels field with the same key. func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -200,11 +200,11 @@ func (b *RoleApplyConfiguration) WithLabels(entries map[string]string) *RoleAppl // overwriting an existing map entries in Annotations field with the same key. func (b *RoleApplyConfiguration) WithAnnotations(entries map[string]string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,7 +218,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -229,7 +229,7 @@ func (b *RoleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenc func (b *RoleApplyConfiguration) WithFinalizers(values ...string) *RoleApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -252,3 +252,9 @@ func (b *RoleApplyConfiguration) WithRules(values ...*PolicyRuleApplyConfigurati } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go index ecccdf91b..1c66b976e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// RoleBindingApplyConfiguration represents an declarative configuration of the RoleBinding type for use +// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use // with apply. type RoleBindingApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type RoleBindingApplyConfiguration struct { RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"` } -// RoleBinding constructs an declarative configuration of the RoleBinding type for use with +// RoleBinding constructs a declarative configuration of the RoleBinding type for use with // apply. func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration { b := &RoleBindingApplyConfiguration{} @@ -87,7 +87,7 @@ func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager strin // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -95,7 +95,7 @@ func (b *RoleBindingApplyConfiguration) WithKind(value string) *RoleBindingApply // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindingApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -104,7 +104,7 @@ func (b *RoleBindingApplyConfiguration) WithAPIVersion(value string) *RoleBindin // If called multiple times, the Name field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -113,7 +113,7 @@ func (b *RoleBindingApplyConfiguration) WithName(value string) *RoleBindingApply // If called multiple times, the GenerateName field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -122,7 +122,7 @@ func (b *RoleBindingApplyConfiguration) WithGenerateName(value string) *RoleBind // If called multiple times, the Namespace field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -131,7 +131,7 @@ func (b *RoleBindingApplyConfiguration) WithNamespace(value string) *RoleBinding // If called multiple times, the UID field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -140,7 +140,7 @@ func (b *RoleBindingApplyConfiguration) WithUID(value types.UID) *RoleBindingApp // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -149,7 +149,7 @@ func (b *RoleBindingApplyConfiguration) WithResourceVersion(value string) *RoleB // If called multiple times, the Generation field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -158,7 +158,7 @@ func (b *RoleBindingApplyConfiguration) WithGeneration(value int64) *RoleBinding // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -167,7 +167,7 @@ func (b *RoleBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -176,7 +176,7 @@ func (b *RoleBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -186,11 +186,11 @@ func (b *RoleBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int // overwriting an existing map entries in Labels field with the same key. func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -201,11 +201,11 @@ func (b *RoleBindingApplyConfiguration) WithLabels(entries map[string]string) *R // overwriting an existing map entries in Annotations field with the same key. func (b *RoleBindingApplyConfiguration) WithAnnotations(entries map[string]string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -219,7 +219,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -230,7 +230,7 @@ func (b *RoleBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR func (b *RoleBindingApplyConfiguration) WithFinalizers(values ...string) *RoleBindingApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -261,3 +261,9 @@ func (b *RoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfigura b.RoleRef = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *RoleBindingApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go index e6a02dc60..19d0420a8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// RoleRefApplyConfiguration represents an declarative configuration of the RoleRef type for use +// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use // with apply. type RoleRefApplyConfiguration struct { APIGroup *string `json:"apiGroup,omitempty"` @@ -26,7 +26,7 @@ type RoleRefApplyConfiguration struct { Name *string `json:"name,omitempty"` } -// RoleRefApplyConfiguration constructs an declarative configuration of the RoleRef type for use with +// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with // apply. func RoleRef() *RoleRefApplyConfiguration { return &RoleRefApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go index b616da8b1..f7c1a21a9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// SubjectApplyConfiguration represents an declarative configuration of the Subject type for use +// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { Kind *string `json:"kind,omitempty"` @@ -27,7 +27,7 @@ type SubjectApplyConfiguration struct { Namespace *string `json:"namespace,omitempty"` } -// SubjectApplyConfiguration constructs an declarative configuration of the Subject type for use with +// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with // apply. func Subject() *SubjectApplyConfiguration { return &SubjectApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go deleted file mode 100644 index bc6078aa9..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// AllocationResultApplyConfiguration represents an declarative configuration of the AllocationResult type for use -// with apply. -type AllocationResultApplyConfiguration struct { - ResourceHandles []ResourceHandleApplyConfiguration `json:"resourceHandles,omitempty"` - AvailableOnNodes *v1.NodeSelectorApplyConfiguration `json:"availableOnNodes,omitempty"` - Shareable *bool `json:"shareable,omitempty"` -} - -// AllocationResultApplyConfiguration constructs an declarative configuration of the AllocationResult type for use with -// apply. -func AllocationResult() *AllocationResultApplyConfiguration { - return &AllocationResultApplyConfiguration{} -} - -// WithResourceHandles adds the given value to the ResourceHandles field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceHandles field. -func (b *AllocationResultApplyConfiguration) WithResourceHandles(values ...*ResourceHandleApplyConfiguration) *AllocationResultApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceHandles") - } - b.ResourceHandles = append(b.ResourceHandles, *values[i]) - } - return b -} - -// WithAvailableOnNodes sets the AvailableOnNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AvailableOnNodes field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithAvailableOnNodes(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { - b.AvailableOnNodes = value - return b -} - -// WithShareable sets the Shareable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Shareable field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithShareable(value bool) *AllocationResultApplyConfiguration { - b.Shareable = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go deleted file mode 100644 index 0c8be0e6a..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresultmodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// AllocationResultModelApplyConfiguration represents an declarative configuration of the AllocationResultModel type for use -// with apply. -type AllocationResultModelApplyConfiguration struct { - NamedResources *NamedResourcesAllocationResultApplyConfiguration `json:"namedResources,omitempty"` -} - -// AllocationResultModelApplyConfiguration constructs an declarative configuration of the AllocationResultModel type for use with -// apply. -func AllocationResultModel() *AllocationResultModelApplyConfiguration { - return &AllocationResultModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *AllocationResultModelApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *AllocationResultModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go deleted file mode 100644 index a1f082fad..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverallocationresult.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DriverAllocationResultApplyConfiguration represents an declarative configuration of the DriverAllocationResult type for use -// with apply. -type DriverAllocationResultApplyConfiguration struct { - VendorRequestParameters *runtime.RawExtension `json:"vendorRequestParameters,omitempty"` - AllocationResultModelApplyConfiguration `json:",inline"` -} - -// DriverAllocationResultApplyConfiguration constructs an declarative configuration of the DriverAllocationResult type for use with -// apply. -func DriverAllocationResult() *DriverAllocationResultApplyConfiguration { - return &DriverAllocationResultApplyConfiguration{} -} - -// WithVendorRequestParameters sets the VendorRequestParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorRequestParameters field is set to the value of the last call. -func (b *DriverAllocationResultApplyConfiguration) WithVendorRequestParameters(value runtime.RawExtension) *DriverAllocationResultApplyConfiguration { - b.VendorRequestParameters = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *DriverAllocationResultApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *DriverAllocationResultApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go deleted file mode 100644 index 805291578..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/driverrequests.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DriverRequestsApplyConfiguration represents an declarative configuration of the DriverRequests type for use -// with apply. -type DriverRequestsApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - VendorParameters *runtime.RawExtension `json:"vendorParameters,omitempty"` - Requests []ResourceRequestApplyConfiguration `json:"requests,omitempty"` -} - -// DriverRequestsApplyConfiguration constructs an declarative configuration of the DriverRequests type for use with -// apply. -func DriverRequests() *DriverRequestsApplyConfiguration { - return &DriverRequestsApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *DriverRequestsApplyConfiguration) WithDriverName(value string) *DriverRequestsApplyConfiguration { - b.DriverName = &value - return b -} - -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorParameters field is set to the value of the last call. -func (b *DriverRequestsApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *DriverRequestsApplyConfiguration { - b.VendorParameters = &value - return b -} - -// WithRequests adds the given value to the Requests field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Requests field. -func (b *DriverRequestsApplyConfiguration) WithRequests(values ...*ResourceRequestApplyConfiguration) *DriverRequestsApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithRequests") - } - b.Requests = append(b.Requests, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go deleted file mode 100644 index 311edbac8..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesallocationresult.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesAllocationResultApplyConfiguration represents an declarative configuration of the NamedResourcesAllocationResult type for use -// with apply. -type NamedResourcesAllocationResultApplyConfiguration struct { - Name *string `json:"name,omitempty"` -} - -// NamedResourcesAllocationResultApplyConfiguration constructs an declarative configuration of the NamedResourcesAllocationResult type for use with -// apply. -func NamedResourcesAllocationResult() *NamedResourcesAllocationResultApplyConfiguration { - return &NamedResourcesAllocationResultApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesAllocationResultApplyConfiguration) WithName(value string) *NamedResourcesAllocationResultApplyConfiguration { - b.Name = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go deleted file mode 100644 index d9545d054..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattribute.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesAttributeApplyConfiguration represents an declarative configuration of the NamedResourcesAttribute type for use -// with apply. -type NamedResourcesAttributeApplyConfiguration struct { - Name *string `json:"name,omitempty"` - NamedResourcesAttributeValueApplyConfiguration `json:",inline"` -} - -// NamedResourcesAttributeApplyConfiguration constructs an declarative configuration of the NamedResourcesAttribute type for use with -// apply. -func NamedResourcesAttribute() *NamedResourcesAttributeApplyConfiguration { - return &NamedResourcesAttributeApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithName(value string) *NamedResourcesAttributeApplyConfiguration { - b.Name = &value - return b -} - -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the QuantityValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeApplyConfiguration { - b.QuantityValue = &value - return b -} - -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BoolValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeApplyConfiguration { - b.BoolValue = &value - return b -} - -// WithIntValue sets the IntValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeApplyConfiguration { - b.IntValue = &value - return b -} - -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration { - b.IntSliceValue = value - return b -} - -// WithStringValue sets the StringValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeApplyConfiguration { - b.StringValue = &value - return b -} - -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration { - b.StringSliceValue = value - return b -} - -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VersionValue field is set to the value of the last call. -func (b *NamedResourcesAttributeApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeApplyConfiguration { - b.VersionValue = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go deleted file mode 100644 index e0b19650a..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesattributevalue.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" -) - -// NamedResourcesAttributeValueApplyConfiguration represents an declarative configuration of the NamedResourcesAttributeValue type for use -// with apply. -type NamedResourcesAttributeValueApplyConfiguration struct { - QuantityValue *resource.Quantity `json:"quantity,omitempty"` - BoolValue *bool `json:"bool,omitempty"` - IntValue *int64 `json:"int,omitempty"` - IntSliceValue *NamedResourcesIntSliceApplyConfiguration `json:"intSlice,omitempty"` - StringValue *string `json:"string,omitempty"` - StringSliceValue *NamedResourcesStringSliceApplyConfiguration `json:"stringSlice,omitempty"` - VersionValue *string `json:"version,omitempty"` -} - -// NamedResourcesAttributeValueApplyConfiguration constructs an declarative configuration of the NamedResourcesAttributeValue type for use with -// apply. -func NamedResourcesAttributeValue() *NamedResourcesAttributeValueApplyConfiguration { - return &NamedResourcesAttributeValueApplyConfiguration{} -} - -// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the QuantityValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeValueApplyConfiguration { - b.QuantityValue = &value - return b -} - -// WithBoolValue sets the BoolValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the BoolValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeValueApplyConfiguration { - b.BoolValue = &value - return b -} - -// WithIntValue sets the IntValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeValueApplyConfiguration { - b.IntValue = &value - return b -} - -// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IntSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration { - b.IntSliceValue = value - return b -} - -// WithStringValue sets the StringValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeValueApplyConfiguration { - b.StringValue = &value - return b -} - -// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StringSliceValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration { - b.StringSliceValue = value - return b -} - -// WithVersionValue sets the VersionValue field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VersionValue field is set to the value of the last call. -func (b *NamedResourcesAttributeValueApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeValueApplyConfiguration { - b.VersionValue = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go deleted file mode 100644 index e483d8622..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesfilter.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesFilterApplyConfiguration represents an declarative configuration of the NamedResourcesFilter type for use -// with apply. -type NamedResourcesFilterApplyConfiguration struct { - Selector *string `json:"selector,omitempty"` -} - -// NamedResourcesFilterApplyConfiguration constructs an declarative configuration of the NamedResourcesFilter type for use with -// apply. -func NamedResourcesFilter() *NamedResourcesFilterApplyConfiguration { - return &NamedResourcesFilterApplyConfiguration{} -} - -// WithSelector sets the Selector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Selector field is set to the value of the last call. -func (b *NamedResourcesFilterApplyConfiguration) WithSelector(value string) *NamedResourcesFilterApplyConfiguration { - b.Selector = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go deleted file mode 100644 index 4f01372e4..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesinstance.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesInstanceApplyConfiguration represents an declarative configuration of the NamedResourcesInstance type for use -// with apply. -type NamedResourcesInstanceApplyConfiguration struct { - Name *string `json:"name,omitempty"` - Attributes []NamedResourcesAttributeApplyConfiguration `json:"attributes,omitempty"` -} - -// NamedResourcesInstanceApplyConfiguration constructs an declarative configuration of the NamedResourcesInstance type for use with -// apply. -func NamedResourcesInstance() *NamedResourcesInstanceApplyConfiguration { - return &NamedResourcesInstanceApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *NamedResourcesInstanceApplyConfiguration) WithName(value string) *NamedResourcesInstanceApplyConfiguration { - b.Name = &value - return b -} - -// WithAttributes adds the given value to the Attributes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Attributes field. -func (b *NamedResourcesInstanceApplyConfiguration) WithAttributes(values ...*NamedResourcesAttributeApplyConfiguration) *NamedResourcesInstanceApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAttributes") - } - b.Attributes = append(b.Attributes, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go deleted file mode 100644 index ea00bffe5..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesintslice.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesIntSliceApplyConfiguration represents an declarative configuration of the NamedResourcesIntSlice type for use -// with apply. -type NamedResourcesIntSliceApplyConfiguration struct { - Ints []int64 `json:"ints,omitempty"` -} - -// NamedResourcesIntSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesIntSlice type for use with -// apply. -func NamedResourcesIntSlice() *NamedResourcesIntSliceApplyConfiguration { - return &NamedResourcesIntSliceApplyConfiguration{} -} - -// WithInts adds the given value to the Ints field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Ints field. -func (b *NamedResourcesIntSliceApplyConfiguration) WithInts(values ...int64) *NamedResourcesIntSliceApplyConfiguration { - for i := range values { - b.Ints = append(b.Ints, values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go deleted file mode 100644 index 5adfd84ee..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesrequest.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesRequestApplyConfiguration represents an declarative configuration of the NamedResourcesRequest type for use -// with apply. -type NamedResourcesRequestApplyConfiguration struct { - Selector *string `json:"selector,omitempty"` -} - -// NamedResourcesRequestApplyConfiguration constructs an declarative configuration of the NamedResourcesRequest type for use with -// apply. -func NamedResourcesRequest() *NamedResourcesRequestApplyConfiguration { - return &NamedResourcesRequestApplyConfiguration{} -} - -// WithSelector sets the Selector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Selector field is set to the value of the last call. -func (b *NamedResourcesRequestApplyConfiguration) WithSelector(value string) *NamedResourcesRequestApplyConfiguration { - b.Selector = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go deleted file mode 100644 index f01ff8699..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesresources.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesResourcesApplyConfiguration represents an declarative configuration of the NamedResourcesResources type for use -// with apply. -type NamedResourcesResourcesApplyConfiguration struct { - Instances []NamedResourcesInstanceApplyConfiguration `json:"instances,omitempty"` -} - -// NamedResourcesResourcesApplyConfiguration constructs an declarative configuration of the NamedResourcesResources type for use with -// apply. -func NamedResourcesResources() *NamedResourcesResourcesApplyConfiguration { - return &NamedResourcesResourcesApplyConfiguration{} -} - -// WithInstances adds the given value to the Instances field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Instances field. -func (b *NamedResourcesResourcesApplyConfiguration) WithInstances(values ...*NamedResourcesInstanceApplyConfiguration) *NamedResourcesResourcesApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithInstances") - } - b.Instances = append(b.Instances, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go deleted file mode 100644 index 1e9387354..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/namedresourcesstringslice.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// NamedResourcesStringSliceApplyConfiguration represents an declarative configuration of the NamedResourcesStringSlice type for use -// with apply. -type NamedResourcesStringSliceApplyConfiguration struct { - Strings []string `json:"strings,omitempty"` -} - -// NamedResourcesStringSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesStringSlice type for use with -// apply. -func NamedResourcesStringSlice() *NamedResourcesStringSliceApplyConfiguration { - return &NamedResourcesStringSliceApplyConfiguration{} -} - -// WithStrings adds the given value to the Strings field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Strings field. -func (b *NamedResourcesStringSliceApplyConfiguration) WithStrings(values ...string) *NamedResourcesStringSliceApplyConfiguration { - for i := range values { - b.Strings = append(b.Strings, values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index 1dfb6ff97..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// PodSchedulingContextApplyConfiguration represents an declarative configuration of the PodSchedulingContext type for use -// with apply. -type PodSchedulingContextApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` -} - -// PodSchedulingContext constructs an declarative configuration of the PodSchedulingContext type for use with -// apply. -func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { - b := &PodSchedulingContextApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from -// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a -// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. -// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") -} - -// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") -} - -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha2.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { - b := &PodSchedulingContextApplyConfiguration{} - err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha2.PodSchedulingContext"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(podSchedulingContext.Name) - b.WithNamespace(podSchedulingContext.Namespace) - - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.Status = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go deleted file mode 100644 index c95d3295e..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// PodSchedulingContextSpecApplyConfiguration represents an declarative configuration of the PodSchedulingContextSpec type for use -// with apply. -type PodSchedulingContextSpecApplyConfiguration struct { - SelectedNode *string `json:"selectedNode,omitempty"` - PotentialNodes []string `json:"potentialNodes,omitempty"` -} - -// PodSchedulingContextSpecApplyConfiguration constructs an declarative configuration of the PodSchedulingContextSpec type for use with -// apply. -func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { - return &PodSchedulingContextSpecApplyConfiguration{} -} - -// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { - b.SelectedNode = &value - return b -} - -// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { - for i := range values { - b.PotentialNodes = append(b.PotentialNodes, values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go deleted file mode 100644 index a8b10b9a0..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// PodSchedulingContextStatusApplyConfiguration represents an declarative configuration of the PodSchedulingContextStatus type for use -// with apply. -type PodSchedulingContextStatusApplyConfiguration struct { - ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` -} - -// PodSchedulingContextStatusApplyConfiguration constructs an declarative configuration of the PodSchedulingContextStatus type for use with -// apply. -func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { - return &PodSchedulingContextStatusApplyConfiguration{} -} - -// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceClaims") - } - b.ResourceClaims = append(b.ResourceClaims, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index 6c219f837..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClaimApplyConfiguration represents an declarative configuration of the ResourceClaim type for use -// with apply. -type ResourceClaimApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` - Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` -} - -// ResourceClaim constructs an declarative configuration of the ResourceClaim type for use with -// apply. -func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { - b := &ResourceClaimApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClaim extracts the applied configuration owned by fieldManager from -// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a -// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. -// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { - return extractResourceClaim(resourceClaim, fieldManager, "") -} - -// ExtractResourceClaimStatus is the same as ExtractResourceClaim except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { - return extractResourceClaim(resourceClaim, fieldManager, "status") -} - -func extractResourceClaim(resourceClaim *resourcev1alpha2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { - b := &ResourceClaimApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaim"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClaim.Name) - b.WithNamespace(resourceClaim.Namespace) - - b.WithKind("ResourceClaim") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { - b.Status = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go deleted file mode 100644 index 41bb9e9a1..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimconsumerreference.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - types "k8s.io/apimachinery/pkg/types" -) - -// ResourceClaimConsumerReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimConsumerReference type for use -// with apply. -type ResourceClaimConsumerReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Resource *string `json:"resource,omitempty"` - Name *string `json:"name,omitempty"` - UID *types.UID `json:"uid,omitempty"` -} - -// ResourceClaimConsumerReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimConsumerReference type for use with -// apply. -func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { - return &ResourceClaimConsumerReferenceApplyConfiguration{} -} - -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { - b.APIGroup = &value - return b -} - -// WithResource sets the Resource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { - b.Resource = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { - b.Name = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { - b.UID = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index ea13570e3..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClaimParametersApplyConfiguration represents an declarative configuration of the ResourceClaimParameters type for use -// with apply. -type ResourceClaimParametersApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - GeneratedFrom *ResourceClaimParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"` - Shareable *bool `json:"shareable,omitempty"` - DriverRequests []DriverRequestsApplyConfiguration `json:"driverRequests,omitempty"` -} - -// ResourceClaimParameters constructs an declarative configuration of the ResourceClaimParameters type for use with -// apply. -func ResourceClaimParameters(name, namespace string) *ResourceClaimParametersApplyConfiguration { - b := &ResourceClaimParametersApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClaimParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClaimParameters extracts the applied configuration owned by fieldManager from -// resourceClaimParameters. If no managedFields are found in resourceClaimParameters for fieldManager, a -// ResourceClaimParametersApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClaimParameters must be a unmodified ResourceClaimParameters API object that was retrieved from the Kubernetes API. -// ExtractResourceClaimParameters provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) { - return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "") -} - -// ExtractResourceClaimParametersStatus is the same as ExtractResourceClaimParameters except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClaimParametersStatus(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string) (*ResourceClaimParametersApplyConfiguration, error) { - return extractResourceClaimParameters(resourceClaimParameters, fieldManager, "status") -} - -func extractResourceClaimParameters(resourceClaimParameters *resourcev1alpha2.ResourceClaimParameters, fieldManager string, subresource string) (*ResourceClaimParametersApplyConfiguration, error) { - b := &ResourceClaimParametersApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimParameters"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClaimParameters.Name) - b.WithNamespace(resourceClaimParameters.Namespace) - - b.WithKind("ResourceClaimParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithKind(value string) *ResourceClaimParametersApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClaimParametersApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithName(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGenerateName(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithNamespace(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithUID(value types.UID) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGeneration(value int64) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClaimParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClaimParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClaimParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClaimParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClaimParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GeneratedFrom field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - b.GeneratedFrom = value - return b -} - -// WithShareable sets the Shareable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Shareable field is set to the value of the last call. -func (b *ResourceClaimParametersApplyConfiguration) WithShareable(value bool) *ResourceClaimParametersApplyConfiguration { - b.Shareable = &value - return b -} - -// WithDriverRequests adds the given value to the DriverRequests field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the DriverRequests field. -func (b *ResourceClaimParametersApplyConfiguration) WithDriverRequests(values ...*DriverRequestsApplyConfiguration) *ResourceClaimParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithDriverRequests") - } - b.DriverRequests = append(b.DriverRequests, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go deleted file mode 100644 index 27820ede6..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClaimParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClaimParametersReference type for use -// with apply. -type ResourceClaimParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` -} - -// ResourceClaimParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClaimParametersReference type for use with -// apply. -func ResourceClaimParametersReference() *ResourceClaimParametersReferenceApplyConfiguration { - return &ResourceClaimParametersReferenceApplyConfiguration{} -} - -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.APIGroup = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Kind = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimParametersReferenceApplyConfiguration) WithName(value string) *ResourceClaimParametersReferenceApplyConfiguration { - b.Name = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go deleted file mode 100644 index e74679aed..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimschedulingstatus.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClaimSchedulingStatusApplyConfiguration represents an declarative configuration of the ResourceClaimSchedulingStatus type for use -// with apply. -type ResourceClaimSchedulingStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` -} - -// ResourceClaimSchedulingStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimSchedulingStatus type for use with -// apply. -func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { - return &ResourceClaimSchedulingStatusApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration { - b.Name = &value - return b -} - -// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration { - for i := range values { - b.UnsuitableNodes = append(b.UnsuitableNodes, values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go deleted file mode 100644 index 0c73e64e9..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" -) - -// ResourceClaimSpecApplyConfiguration represents an declarative configuration of the ResourceClaimSpec type for use -// with apply. -type ResourceClaimSpecApplyConfiguration struct { - ResourceClassName *string `json:"resourceClassName,omitempty"` - ParametersRef *ResourceClaimParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - AllocationMode *resourcev1alpha2.AllocationMode `json:"allocationMode,omitempty"` -} - -// ResourceClaimSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimSpec type for use with -// apply. -func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { - return &ResourceClaimSpecApplyConfiguration{} -} - -// WithResourceClassName sets the ResourceClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceClassName field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithResourceClassName(value string) *ResourceClaimSpecApplyConfiguration { - b.ResourceClassName = &value - return b -} - -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithParametersRef(value *ResourceClaimParametersReferenceApplyConfiguration) *ResourceClaimSpecApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AllocationMode field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithAllocationMode(value resourcev1alpha2.AllocationMode) *ResourceClaimSpecApplyConfiguration { - b.AllocationMode = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go deleted file mode 100644 index c6fa61090..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimstatus.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClaimStatusApplyConfiguration represents an declarative configuration of the ResourceClaimStatus type for use -// with apply. -type ResourceClaimStatusApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` - ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` - DeallocationRequested *bool `json:"deallocationRequested,omitempty"` -} - -// ResourceClaimStatusApplyConfiguration constructs an declarative configuration of the ResourceClaimStatus type for use with -// apply. -func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { - return &ResourceClaimStatusApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDriverName(value string) *ResourceClaimStatusApplyConfiguration { - b.DriverName = &value - return b -} - -// WithAllocation sets the Allocation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Allocation field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { - b.Allocation = value - return b -} - -// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ReservedFor field. -func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithReservedFor") - } - b.ReservedFor = append(b.ReservedFor, *values[i]) - } - return b -} - -// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeallocationRequested field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration { - b.DeallocationRequested = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index fc2209b8f..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClaimTemplateApplyConfiguration represents an declarative configuration of the ResourceClaimTemplate type for use -// with apply. -type ResourceClaimTemplateApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` -} - -// ResourceClaimTemplate constructs an declarative configuration of the ResourceClaimTemplate type for use with -// apply. -func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { - b := &ResourceClaimTemplateApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from -// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a -// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. -// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { - return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") -} - -// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { - return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") -} - -func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { - b := &ResourceClaimTemplateApplyConfiguration{} - err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClaimTemplate"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClaimTemplate.Name) - b.WithNamespace(resourceClaimTemplate.Namespace) - - b.WithKind("ResourceClaimTemplate") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { - b.Spec = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go deleted file mode 100644 index 2f38ea036..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimtemplatespec.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClaimTemplateSpecApplyConfiguration represents an declarative configuration of the ResourceClaimTemplateSpec type for use -// with apply. -type ResourceClaimTemplateSpecApplyConfiguration struct { - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` -} - -// ResourceClaimTemplateSpecApplyConfiguration constructs an declarative configuration of the ResourceClaimTemplateSpec type for use with -// apply. -func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { - return &ResourceClaimTemplateSpecApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { - b.Spec = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go deleted file mode 100644 index 364fda9d0..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClassApplyConfiguration represents an declarative configuration of the ResourceClass type for use -// with apply. -type ResourceClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DriverName *string `json:"driverName,omitempty"` - ParametersRef *ResourceClassParametersReferenceApplyConfiguration `json:"parametersRef,omitempty"` - SuitableNodes *corev1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` - StructuredParameters *bool `json:"structuredParameters,omitempty"` -} - -// ResourceClass constructs an declarative configuration of the ResourceClass type for use with -// apply. -func ResourceClass(name string) *ResourceClassApplyConfiguration { - b := &ResourceClassApplyConfiguration{} - b.WithName(name) - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClass extracts the applied configuration owned by fieldManager from -// resourceClass. If no managedFields are found in resourceClass for fieldManager, a -// ResourceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClass must be a unmodified ResourceClass API object that was retrieved from the Kubernetes API. -// ExtractResourceClass provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "") -} - -// ExtractResourceClassStatus is the same as ExtractResourceClass except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClassStatus(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string) (*ResourceClassApplyConfiguration, error) { - return extractResourceClass(resourceClass, fieldManager, "status") -} - -func extractResourceClass(resourceClass *resourcev1alpha2.ResourceClass, fieldManager string, subresource string) (*ResourceClassApplyConfiguration, error) { - b := &ResourceClassApplyConfiguration{} - err := managedfields.ExtractInto(resourceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClass"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClass.Name) - - b.WithKind("ResourceClass") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithKind(value string) *ResourceClassApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithAPIVersion(value string) *ResourceClassApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithName(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGenerateName(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithNamespace(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithUID(value types.UID) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithResourceVersion(value string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithGeneration(value int64) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClassApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClassApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClassApplyConfiguration) WithFinalizers(values ...string) *ResourceClassApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithDriverName(value string) *ResourceClassApplyConfiguration { - b.DriverName = &value - return b -} - -// WithParametersRef sets the ParametersRef field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ParametersRef field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithParametersRef(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassApplyConfiguration { - b.ParametersRef = value - return b -} - -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SuitableNodes field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithSuitableNodes(value *corev1.NodeSelectorApplyConfiguration) *ResourceClassApplyConfiguration { - b.SuitableNodes = value - return b -} - -// WithStructuredParameters sets the StructuredParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StructuredParameters field is set to the value of the last call. -func (b *ResourceClassApplyConfiguration) WithStructuredParameters(value bool) *ResourceClassApplyConfiguration { - b.StructuredParameters = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 028d0d612..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceClassParametersApplyConfiguration represents an declarative configuration of the ResourceClassParameters type for use -// with apply. -type ResourceClassParametersApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - GeneratedFrom *ResourceClassParametersReferenceApplyConfiguration `json:"generatedFrom,omitempty"` - VendorParameters []VendorParametersApplyConfiguration `json:"vendorParameters,omitempty"` - Filters []ResourceFilterApplyConfiguration `json:"filters,omitempty"` -} - -// ResourceClassParameters constructs an declarative configuration of the ResourceClassParameters type for use with -// apply. -func ResourceClassParameters(name, namespace string) *ResourceClassParametersApplyConfiguration { - b := &ResourceClassParametersApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("ResourceClassParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceClassParameters extracts the applied configuration owned by fieldManager from -// resourceClassParameters. If no managedFields are found in resourceClassParameters for fieldManager, a -// ResourceClassParametersApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceClassParameters must be a unmodified ResourceClassParameters API object that was retrieved from the Kubernetes API. -// ExtractResourceClassParameters provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) { - return extractResourceClassParameters(resourceClassParameters, fieldManager, "") -} - -// ExtractResourceClassParametersStatus is the same as ExtractResourceClassParameters except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceClassParametersStatus(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string) (*ResourceClassParametersApplyConfiguration, error) { - return extractResourceClassParameters(resourceClassParameters, fieldManager, "status") -} - -func extractResourceClassParameters(resourceClassParameters *resourcev1alpha2.ResourceClassParameters, fieldManager string, subresource string) (*ResourceClassParametersApplyConfiguration, error) { - b := &ResourceClassParametersApplyConfiguration{} - err := managedfields.ExtractInto(resourceClassParameters, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceClassParameters"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceClassParameters.Name) - b.WithNamespace(resourceClassParameters.Namespace) - - b.WithKind("ResourceClassParameters") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithKind(value string) *ResourceClassParametersApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithAPIVersion(value string) *ResourceClassParametersApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithName(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGenerateName(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithNamespace(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithUID(value types.UID) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithResourceVersion(value string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGeneration(value int64) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceClassParametersApplyConfiguration) WithLabels(entries map[string]string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceClassParametersApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceClassParametersApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceClassParametersApplyConfiguration) WithFinalizers(values ...string) *ResourceClassParametersApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceClassParametersApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithGeneratedFrom sets the GeneratedFrom field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GeneratedFrom field is set to the value of the last call. -func (b *ResourceClassParametersApplyConfiguration) WithGeneratedFrom(value *ResourceClassParametersReferenceApplyConfiguration) *ResourceClassParametersApplyConfiguration { - b.GeneratedFrom = value - return b -} - -// WithVendorParameters adds the given value to the VendorParameters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the VendorParameters field. -func (b *ResourceClassParametersApplyConfiguration) WithVendorParameters(values ...*VendorParametersApplyConfiguration) *ResourceClassParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithVendorParameters") - } - b.VendorParameters = append(b.VendorParameters, *values[i]) - } - return b -} - -// WithFilters adds the given value to the Filters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Filters field. -func (b *ResourceClassParametersApplyConfiguration) WithFilters(values ...*ResourceFilterApplyConfiguration) *ResourceClassParametersApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithFilters") - } - b.Filters = append(b.Filters, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go deleted file mode 100644 index d67e4d397..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclassparametersreference.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceClassParametersReferenceApplyConfiguration represents an declarative configuration of the ResourceClassParametersReference type for use -// with apply. -type ResourceClassParametersReferenceApplyConfiguration struct { - APIGroup *string `json:"apiGroup,omitempty"` - Kind *string `json:"kind,omitempty"` - Name *string `json:"name,omitempty"` - Namespace *string `json:"namespace,omitempty"` -} - -// ResourceClassParametersReferenceApplyConfiguration constructs an declarative configuration of the ResourceClassParametersReference type for use with -// apply. -func ResourceClassParametersReference() *ResourceClassParametersReferenceApplyConfiguration { - return &ResourceClassParametersReferenceApplyConfiguration{} -} - -// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIGroup field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.APIGroup = &value - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithKind(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Kind = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithName(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Name = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceClassParametersReferenceApplyConfiguration) WithNamespace(value string) *ResourceClassParametersReferenceApplyConfiguration { - b.Namespace = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go deleted file mode 100644 index 15371b44a..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefilter.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceFilterApplyConfiguration represents an declarative configuration of the ResourceFilter type for use -// with apply. -type ResourceFilterApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - ResourceFilterModelApplyConfiguration `json:",inline"` -} - -// ResourceFilterApplyConfiguration constructs an declarative configuration of the ResourceFilter type for use with -// apply. -func ResourceFilter() *ResourceFilterApplyConfiguration { - return &ResourceFilterApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceFilterApplyConfiguration) WithDriverName(value string) *ResourceFilterApplyConfiguration { - b.DriverName = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceFilterApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go deleted file mode 100644 index 4f8d138f7..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcefiltermodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceFilterModelApplyConfiguration represents an declarative configuration of the ResourceFilterModel type for use -// with apply. -type ResourceFilterModelApplyConfiguration struct { - NamedResources *NamedResourcesFilterApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceFilterModelApplyConfiguration constructs an declarative configuration of the ResourceFilterModel type for use with -// apply. -func ResourceFilterModel() *ResourceFilterModelApplyConfiguration { - return &ResourceFilterModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceFilterModelApplyConfiguration) WithNamedResources(value *NamedResourcesFilterApplyConfiguration) *ResourceFilterModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go deleted file mode 100644 index b4f3da735..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceHandleApplyConfiguration represents an declarative configuration of the ResourceHandle type for use -// with apply. -type ResourceHandleApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - Data *string `json:"data,omitempty"` - StructuredData *StructuredResourceHandleApplyConfiguration `json:"structuredData,omitempty"` -} - -// ResourceHandleApplyConfiguration constructs an declarative configuration of the ResourceHandle type for use with -// apply. -func ResourceHandle() *ResourceHandleApplyConfiguration { - return &ResourceHandleApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithDriverName(value string) *ResourceHandleApplyConfiguration { - b.DriverName = &value - return b -} - -// WithData sets the Data field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Data field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithData(value string) *ResourceHandleApplyConfiguration { - b.Data = &value - return b -} - -// WithStructuredData sets the StructuredData field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the StructuredData field is set to the value of the last call. -func (b *ResourceHandleApplyConfiguration) WithStructuredData(value *StructuredResourceHandleApplyConfiguration) *ResourceHandleApplyConfiguration { - b.StructuredData = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go deleted file mode 100644 index 8ad7bdf23..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcemodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceModelApplyConfiguration represents an declarative configuration of the ResourceModel type for use -// with apply. -type ResourceModelApplyConfiguration struct { - NamedResources *NamedResourcesResourcesApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceModelApplyConfiguration constructs an declarative configuration of the ResourceModel type for use with -// apply. -func ResourceModel() *ResourceModelApplyConfiguration { - return &ResourceModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceModelApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go deleted file mode 100644 index 0243d06f8..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ResourceRequestApplyConfiguration represents an declarative configuration of the ResourceRequest type for use -// with apply. -type ResourceRequestApplyConfiguration struct { - VendorParameters *runtime.RawExtension `json:"vendorParameters,omitempty"` - ResourceRequestModelApplyConfiguration `json:",inline"` -} - -// ResourceRequestApplyConfiguration constructs an declarative configuration of the ResourceRequest type for use with -// apply. -func ResourceRequest() *ResourceRequestApplyConfiguration { - return &ResourceRequestApplyConfiguration{} -} - -// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorParameters field is set to the value of the last call. -func (b *ResourceRequestApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *ResourceRequestApplyConfiguration { - b.VendorParameters = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceRequestApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go deleted file mode 100644 index 35bd1d88f..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcerequestmodel.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -// ResourceRequestModelApplyConfiguration represents an declarative configuration of the ResourceRequestModel type for use -// with apply. -type ResourceRequestModelApplyConfiguration struct { - NamedResources *NamedResourcesRequestApplyConfiguration `json:"namedResources,omitempty"` -} - -// ResourceRequestModelApplyConfiguration constructs an declarative configuration of the ResourceRequestModel type for use with -// apply. -func ResourceRequestModel() *ResourceRequestModelApplyConfiguration { - return &ResourceRequestModelApplyConfiguration{} -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceRequestModelApplyConfiguration) WithNamedResources(value *NamedResourcesRequestApplyConfiguration) *ResourceRequestModelApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go deleted file mode 100644 index ff737ce67..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceslice.go +++ /dev/null @@ -1,265 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ResourceSliceApplyConfiguration represents an declarative configuration of the ResourceSlice type for use -// with apply. -type ResourceSliceApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - NodeName *string `json:"nodeName,omitempty"` - DriverName *string `json:"driverName,omitempty"` - ResourceModelApplyConfiguration `json:",inline"` -} - -// ResourceSlice constructs an declarative configuration of the ResourceSlice type for use with -// apply. -func ResourceSlice(name string) *ResourceSliceApplyConfiguration { - b := &ResourceSliceApplyConfiguration{} - b.WithName(name) - b.WithKind("ResourceSlice") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b -} - -// ExtractResourceSlice extracts the applied configuration owned by fieldManager from -// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a -// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. -// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { - return extractResourceSlice(resourceSlice, fieldManager, "") -} - -// ExtractResourceSliceStatus is the same as ExtractResourceSlice except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { - return extractResourceSlice(resourceSlice, fieldManager, "status") -} - -func extractResourceSlice(resourceSlice *resourcev1alpha2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { - b := &ResourceSliceApplyConfiguration{} - err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha2.ResourceSlice"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(resourceSlice.Name) - - b.WithKind("ResourceSlice") - b.WithAPIVersion("resource.k8s.io/v1alpha2") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithNodeName sets the NodeName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeName field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithNodeName(value string) *ResourceSliceApplyConfiguration { - b.NodeName = &value - return b -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithDriverName(value string) *ResourceSliceApplyConfiguration { - b.DriverName = &value - return b -} - -// WithNamedResources sets the NamedResources field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamedResources field is set to the value of the last call. -func (b *ResourceSliceApplyConfiguration) WithNamedResources(value *NamedResourcesResourcesApplyConfiguration) *ResourceSliceApplyConfiguration { - b.NamedResources = value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go deleted file mode 100644 index e6efcbfef..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/structuredresourcehandle.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// StructuredResourceHandleApplyConfiguration represents an declarative configuration of the StructuredResourceHandle type for use -// with apply. -type StructuredResourceHandleApplyConfiguration struct { - VendorClassParameters *runtime.RawExtension `json:"vendorClassParameters,omitempty"` - VendorClaimParameters *runtime.RawExtension `json:"vendorClaimParameters,omitempty"` - NodeName *string `json:"nodeName,omitempty"` - Results []DriverAllocationResultApplyConfiguration `json:"results,omitempty"` -} - -// StructuredResourceHandleApplyConfiguration constructs an declarative configuration of the StructuredResourceHandle type for use with -// apply. -func StructuredResourceHandle() *StructuredResourceHandleApplyConfiguration { - return &StructuredResourceHandleApplyConfiguration{} -} - -// WithVendorClassParameters sets the VendorClassParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorClassParameters field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClassParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration { - b.VendorClassParameters = &value - return b -} - -// WithVendorClaimParameters sets the VendorClaimParameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VendorClaimParameters field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithVendorClaimParameters(value runtime.RawExtension) *StructuredResourceHandleApplyConfiguration { - b.VendorClaimParameters = &value - return b -} - -// WithNodeName sets the NodeName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeName field is set to the value of the last call. -func (b *StructuredResourceHandleApplyConfiguration) WithNodeName(value string) *StructuredResourceHandleApplyConfiguration { - b.NodeName = &value - return b -} - -// WithResults adds the given value to the Results field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Results field. -func (b *StructuredResourceHandleApplyConfiguration) WithResults(values ...*DriverAllocationResultApplyConfiguration) *StructuredResourceHandleApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResults") - } - b.Results = append(b.Results, *values[i]) - } - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go deleted file mode 100644 index f7a8ff9ec..000000000 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/vendorparameters.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// VendorParametersApplyConfiguration represents an declarative configuration of the VendorParameters type for use -// with apply. -type VendorParametersApplyConfiguration struct { - DriverName *string `json:"driverName,omitempty"` - Parameters *runtime.RawExtension `json:"parameters,omitempty"` -} - -// VendorParametersApplyConfiguration constructs an declarative configuration of the VendorParameters type for use with -// apply. -func VendorParameters() *VendorParametersApplyConfiguration { - return &VendorParametersApplyConfiguration{} -} - -// WithDriverName sets the DriverName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *VendorParametersApplyConfiguration) WithDriverName(value string) *VendorParametersApplyConfiguration { - b.DriverName = &value - return b -} - -// WithParameters sets the Parameters field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Parameters field is set to the value of the last call. -func (b *VendorParametersApplyConfiguration) WithParameters(value runtime.RawExtension) *VendorParametersApplyConfiguration { - b.Parameters = &value - return b -} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go new file mode 100644 index 000000000..da58d4348 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use +// with apply. +type AllocatedDeviceStatusApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"` +} + +// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with +// apply. +func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration { + return &AllocatedDeviceStatusApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Device = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration { + b.Data = &value + return b +} + +// WithNetworkData sets the NetworkData field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkData field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + b.NetworkData = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go new file mode 100644 index 000000000..7c7427ee9 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go new file mode 100644 index 000000000..b58e43294 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[resourcev1alpha3.QualifiedName]resource.Quantity `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[resourcev1alpha3.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1alpha3.QualifiedName]resource.Quantity) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[resourcev1alpha3.QualifiedName]resource.Quantity, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go new file mode 100644 index 000000000..c59b6a2e3 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go new file mode 100644 index 000000000..efdb5f37a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use +// with apply. +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` +} + +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with +// apply. +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { + b.Name = &value + return b +} + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go new file mode 100644 index 000000000..25907e40d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *resourcev1alpha3.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1alpha3.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go new file mode 100644 index 000000000..0cfb264b4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go new file mode 100644 index 000000000..6b0b7a40a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go new file mode 100644 index 000000000..ce3ab56d8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go new file mode 100644 index 000000000..045798856 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..ae3e396e0 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1alpha3.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go new file mode 100644 index 000000000..6daa4a97e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go new file mode 100644 index 000000000..37db6a1cc --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go new file mode 100644 index 000000000..62c0d997d --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go new file mode 100644 index 000000000..712f431f4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *resourcev1alpha3.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1alpha3.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go new file mode 100644 index 000000000..e5c87efe4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1alpha3.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1alpha3.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go new file mode 100644 index 000000000..4c3cffcf4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go new file mode 100644 index 000000000..574299d15 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go new file mode 100644 index 000000000..9ea773ed4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use +// with apply. +type NetworkDeviceDataApplyConfiguration struct { + InterfaceName *string `json:"interfaceName,omitempty"` + IPs []string `json:"ips,omitempty"` + HardwareAddress *string `json:"hardwareAddress,omitempty"` +} + +// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with +// apply. +func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration { + return &NetworkDeviceDataApplyConfiguration{} +} + +// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InterfaceName field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration { + b.InterfaceName = &value + return b +} + +// WithIPs adds the given value to the IPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IPs field. +func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration { + for i := range values { + b.IPs = append(b.IPs, values[i]) + } + return b +} + +// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareAddress field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration { + b.HardwareAddress = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go new file mode 100644 index 000000000..caf9d059c --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use +// with apply. +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` +} + +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with +// apply. +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value + return b +} + +// WithParameters sets the Parameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Parameters field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { + b.Parameters = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..96cf63f1f --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,264 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use +// with apply. +type ResourceClaimApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` +} + +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with +// apply. +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { + b := &ResourceClaimApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "") +} + +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimStatus(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "status") +} + +func extractResourceClaim(resourceClaim *resourcev1alpha3.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { + b := &ResourceClaimApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaim"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaim.Name) + b.WithNamespace(resourceClaim.Namespace) + + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go new file mode 100644 index 000000000..96196d7c9 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimconsumerreference.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use +// with apply. +type ResourceClaimConsumerReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with +// apply. +func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { + return &ResourceClaimConsumerReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go new file mode 100644 index 000000000..dfe8bdb14 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go new file mode 100644 index 000000000..f0c32133a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use +// with apply. +type ResourceClaimStatusApplyConfiguration struct { + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with +// apply. +func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { + return &ResourceClaimStatusApplyConfiguration{} +} + +// WithAllocation sets the Allocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allocation field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + b.Allocation = value + return b +} + +// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReservedFor field. +func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReservedFor") + } + b.ReservedFor = append(b.ReservedFor, *values[i]) + } + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..1eb55eee4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use +// with apply. +type ResourceClaimTemplateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with +// apply. +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { + b := &ResourceClaimTemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") +} + +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") +} + +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { + b := &ResourceClaimTemplateApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceClaimTemplate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaimTemplate.Name) + b.WithNamespace(resourceClaimTemplate.Namespace) + + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go new file mode 100644 index 000000000..578f6bce1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go @@ -0,0 +1,194 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use +// with apply. +type ResourceClaimTemplateSpecApplyConfiguration struct { + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with +// apply. +func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { + return &ResourceClaimTemplateSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go new file mode 100644 index 000000000..23825d137 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..615cf3e06 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslice.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use +// with apply. +type ResourceSliceApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with +// apply. +func ResourceSlice(name string) *ResourceSliceApplyConfiguration { + b := &ResourceSliceApplyConfiguration{} + b.WithName(name) + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b +} + +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "") +} + +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceSliceStatus(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "status") +} + +func extractResourceSlice(resourceSlice *resourcev1alpha3.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { + b := &ResourceSliceApplyConfiguration{} + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1alpha3.ResourceSlice"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceSlice.Name) + + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1alpha3") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go new file mode 100644 index 000000000..2ded75907 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go new file mode 100644 index 000000000..cd5189771 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use +// with apply. +type AllocatedDeviceStatusApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` + Data *runtime.RawExtension `json:"data,omitempty"` + NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"` +} + +// AllocatedDeviceStatusApplyConfiguration constructs a declarative configuration of the AllocatedDeviceStatus type for use with +// apply. +func AllocatedDeviceStatus() *AllocatedDeviceStatusApplyConfiguration { + return &AllocatedDeviceStatusApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDriver(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithPool(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithDevice(value string) *AllocatedDeviceStatusApplyConfiguration { + b.Device = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *AllocatedDeviceStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithData sets the Data field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Data field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithData(value runtime.RawExtension) *AllocatedDeviceStatusApplyConfiguration { + b.Data = &value + return b +} + +// WithNetworkData sets the NetworkData field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetworkData field is set to the value of the last call. +func (b *AllocatedDeviceStatusApplyConfiguration) WithNetworkData(value *NetworkDeviceDataApplyConfiguration) *AllocatedDeviceStatusApplyConfiguration { + b.NetworkData = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go new file mode 100644 index 000000000..549ef71af --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use +// with apply. +type AllocationResultApplyConfiguration struct { + Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` +} + +// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with +// apply. +func AllocationResult() *AllocationResultApplyConfiguration { + return &AllocationResultApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithDevices(value *DeviceAllocationResultApplyConfiguration) *AllocationResultApplyConfiguration { + b.Devices = value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *AllocationResultApplyConfiguration { + b.NodeSelector = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go new file mode 100644 index 000000000..691a8f15a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go @@ -0,0 +1,64 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use +// with apply. +type BasicDeviceApplyConfiguration struct { + Attributes map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"` + Capacity map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"` +} + +// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with +// apply. +func BasicDevice() *BasicDeviceApplyConfiguration { + return &BasicDeviceApplyConfiguration{} +} + +// WithAttributes puts the entries into the Attributes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Attributes field, +// overwriting an existing map entries in Attributes field with the same key. +func (b *BasicDeviceApplyConfiguration) WithAttributes(entries map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Attributes == nil && len(entries) > 0 { + b.Attributes = make(map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Attributes[k] = v + } + return b +} + +// WithCapacity puts the entries into the Capacity field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Capacity field, +// overwriting an existing map entries in Capacity field with the same key. +func (b *BasicDeviceApplyConfiguration) WithCapacity(entries map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration) *BasicDeviceApplyConfiguration { + if b.Capacity == nil && len(entries) > 0 { + b.Capacity = make(map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration, len(entries)) + } + for k, v := range entries { + b.Capacity[k] = v + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go new file mode 100644 index 000000000..c4a28bbf8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use +// with apply. +type CELDeviceSelectorApplyConfiguration struct { + Expression *string `json:"expression,omitempty"` +} + +// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with +// apply. +func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration { + return &CELDeviceSelectorApplyConfiguration{} +} + +// WithExpression sets the Expression field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Expression field is set to the value of the last call. +func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration { + b.Expression = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go new file mode 100644 index 000000000..f635267e2 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceApplyConfiguration represents a declarative configuration of the Device type for use +// with apply. +type DeviceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"` +} + +// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with +// apply. +func Device() *DeviceApplyConfiguration { + return &DeviceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithName(value string) *DeviceApplyConfiguration { + b.Name = &value + return b +} + +// WithBasic sets the Basic field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Basic field is set to the value of the last call. +func (b *DeviceApplyConfiguration) WithBasic(value *BasicDeviceApplyConfiguration) *DeviceApplyConfiguration { + b.Basic = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go new file mode 100644 index 000000000..b5218ba4a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use +// with apply. +type DeviceAllocationConfigurationApplyConfiguration struct { + Source *resourcev1beta1.AllocationConfigSource `json:"source,omitempty"` + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceAllocationConfigurationApplyConfiguration constructs a declarative configuration of the DeviceAllocationConfiguration type for use with +// apply. +func DeviceAllocationConfiguration() *DeviceAllocationConfigurationApplyConfiguration { + return &DeviceAllocationConfigurationApplyConfiguration{} +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithSource(value resourcev1beta1.AllocationConfigSource) *DeviceAllocationConfigurationApplyConfiguration { + b.Source = &value + return b +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceAllocationConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceAllocationConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceAllocationConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go new file mode 100644 index 000000000..bf309cf23 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use +// with apply. +type DeviceAllocationResultApplyConfiguration struct { + Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"` + Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with +// apply. +func DeviceAllocationResult() *DeviceAllocationResultApplyConfiguration { + return &DeviceAllocationResultApplyConfiguration{} +} + +// WithResults adds the given value to the Results field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Results field. +func (b *DeviceAllocationResultApplyConfiguration) WithResults(values ...*DeviceRequestAllocationResultApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResults") + } + b.Results = append(b.Results, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceAllocationResultApplyConfiguration) WithConfig(values ...*DeviceAllocationConfigurationApplyConfiguration) *DeviceAllocationResultApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go new file mode 100644 index 000000000..6e88ae38a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use +// with apply. +type DeviceAttributeApplyConfiguration struct { + IntValue *int64 `json:"int,omitempty"` + BoolValue *bool `json:"bool,omitempty"` + StringValue *string `json:"string,omitempty"` + VersionValue *string `json:"version,omitempty"` +} + +// DeviceAttributeApplyConfiguration constructs a declarative configuration of the DeviceAttribute type for use with +// apply. +func DeviceAttribute() *DeviceAttributeApplyConfiguration { + return &DeviceAttributeApplyConfiguration{} +} + +// WithIntValue sets the IntValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IntValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithIntValue(value int64) *DeviceAttributeApplyConfiguration { + b.IntValue = &value + return b +} + +// WithBoolValue sets the BoolValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BoolValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithBoolValue(value bool) *DeviceAttributeApplyConfiguration { + b.BoolValue = &value + return b +} + +// WithStringValue sets the StringValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StringValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithStringValue(value string) *DeviceAttributeApplyConfiguration { + b.StringValue = &value + return b +} + +// WithVersionValue sets the VersionValue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VersionValue field is set to the value of the last call. +func (b *DeviceAttributeApplyConfiguration) WithVersionValue(value string) *DeviceAttributeApplyConfiguration { + b.VersionValue = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go new file mode 100644 index 000000000..dcb3504b8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" +) + +// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use +// with apply. +type DeviceCapacityApplyConfiguration struct { + Value *resource.Quantity `json:"value,omitempty"` +} + +// DeviceCapacityApplyConfiguration constructs a declarative configuration of the DeviceCapacity type for use with +// apply. +func DeviceCapacity() *DeviceCapacityApplyConfiguration { + return &DeviceCapacityApplyConfiguration{} +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *DeviceCapacityApplyConfiguration) WithValue(value resource.Quantity) *DeviceCapacityApplyConfiguration { + b.Value = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go new file mode 100644 index 000000000..95c1c2e6e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use +// with apply. +type DeviceClaimApplyConfiguration struct { + Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"` + Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"` + Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with +// apply. +func DeviceClaim() *DeviceClaimApplyConfiguration { + return &DeviceClaimApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimApplyConfiguration) WithRequests(values ...*DeviceRequestApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRequests") + } + b.Requests = append(b.Requests, *values[i]) + } + return b +} + +// WithConstraints adds the given value to the Constraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Constraints field. +func (b *DeviceClaimApplyConfiguration) WithConstraints(values ...*DeviceConstraintApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConstraints") + } + b.Constraints = append(b.Constraints, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClaimApplyConfiguration) WithConfig(values ...*DeviceClaimConfigurationApplyConfiguration) *DeviceClaimApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go new file mode 100644 index 000000000..beac5e9d9 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use +// with apply. +type DeviceClaimConfigurationApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClaimConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClaimConfiguration type for use with +// apply. +func DeviceClaimConfiguration() *DeviceClaimConfigurationApplyConfiguration { + return &DeviceClaimConfigurationApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceClaimConfigurationApplyConfiguration) WithRequests(values ...string) *DeviceClaimConfigurationApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClaimConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClaimConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..c71e22259 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use +// with apply. +type DeviceClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"` +} + +// DeviceClass constructs a declarative configuration of the DeviceClass type for use with +// apply. +func DeviceClass(name string) *DeviceClassApplyConfiguration { + b := &DeviceClassApplyConfiguration{} + b.WithName(name) + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractDeviceClass extracts the applied configuration owned by fieldManager from +// deviceClass. If no managedFields are found in deviceClass for fieldManager, a +// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API. +// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "") +} + +// ExtractDeviceClassStatus is the same as ExtractDeviceClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractDeviceClassStatus(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) { + return extractDeviceClass(deviceClass, fieldManager, "status") +} + +func extractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) { + b := &DeviceClassApplyConfiguration{} + err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1beta1.DeviceClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(deviceClass.Name) + + b.WithKind("DeviceClass") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithKind(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithAPIVersion(value string) *DeviceClassApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGenerateName(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithNamespace(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithUID(value types.UID) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithResourceVersion(value string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithGeneration(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *DeviceClassApplyConfiguration) WithLabels(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *DeviceClassApplyConfiguration) WithAnnotations(entries map[string]string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *DeviceClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *DeviceClassApplyConfiguration) WithFinalizers(values ...string) *DeviceClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *DeviceClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *DeviceClassApplyConfiguration) WithSpec(value *DeviceClassSpecApplyConfiguration) *DeviceClassApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *DeviceClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go new file mode 100644 index 000000000..3ce90eab5 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use +// with apply. +type DeviceClassConfigurationApplyConfiguration struct { + DeviceConfigurationApplyConfiguration `json:",inline"` +} + +// DeviceClassConfigurationApplyConfiguration constructs a declarative configuration of the DeviceClassConfiguration type for use with +// apply. +func DeviceClassConfiguration() *DeviceClassConfigurationApplyConfiguration { + return &DeviceClassConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceClassConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceClassConfigurationApplyConfiguration { + b.DeviceConfigurationApplyConfiguration.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go new file mode 100644 index 000000000..901b0800e --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use +// with apply. +type DeviceClassSpecApplyConfiguration struct { + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` +} + +// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with +// apply. +func DeviceClassSpec() *DeviceClassSpecApplyConfiguration { + return &DeviceClassSpecApplyConfiguration{} +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceClassSpecApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithConfig adds the given value to the Config field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Config field. +func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassConfigurationApplyConfiguration) *DeviceClassSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfig") + } + b.Config = append(b.Config, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go new file mode 100644 index 000000000..b0f41f5a1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use +// with apply. +type DeviceConfigurationApplyConfiguration struct { + Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"` +} + +// DeviceConfigurationApplyConfiguration constructs a declarative configuration of the DeviceConfiguration type for use with +// apply. +func DeviceConfiguration() *DeviceConfigurationApplyConfiguration { + return &DeviceConfigurationApplyConfiguration{} +} + +// WithOpaque sets the Opaque field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Opaque field is set to the value of the last call. +func (b *DeviceConfigurationApplyConfiguration) WithOpaque(value *OpaqueDeviceConfigurationApplyConfiguration) *DeviceConfigurationApplyConfiguration { + b.Opaque = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go new file mode 100644 index 000000000..0c5fc2525 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use +// with apply. +type DeviceConstraintApplyConfiguration struct { + Requests []string `json:"requests,omitempty"` + MatchAttribute *resourcev1beta1.FullyQualifiedName `json:"matchAttribute,omitempty"` +} + +// DeviceConstraintApplyConfiguration constructs a declarative configuration of the DeviceConstraint type for use with +// apply. +func DeviceConstraint() *DeviceConstraintApplyConfiguration { + return &DeviceConstraintApplyConfiguration{} +} + +// WithRequests adds the given value to the Requests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Requests field. +func (b *DeviceConstraintApplyConfiguration) WithRequests(values ...string) *DeviceConstraintApplyConfiguration { + for i := range values { + b.Requests = append(b.Requests, values[i]) + } + return b +} + +// WithMatchAttribute sets the MatchAttribute field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MatchAttribute field is set to the value of the last call. +func (b *DeviceConstraintApplyConfiguration) WithMatchAttribute(value resourcev1beta1.FullyQualifiedName) *DeviceConstraintApplyConfiguration { + b.MatchAttribute = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go new file mode 100644 index 000000000..ea454a275 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" +) + +// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use +// with apply. +type DeviceRequestApplyConfiguration struct { + Name *string `json:"name,omitempty"` + DeviceClassName *string `json:"deviceClassName,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"` + Count *int64 `json:"count,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with +// apply. +func DeviceRequest() *DeviceRequestApplyConfiguration { + return &DeviceRequestApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithName(value string) *DeviceRequestApplyConfiguration { + b.Name = &value + return b +} + +// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeviceClassName field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithDeviceClassName(value string) *DeviceRequestApplyConfiguration { + b.DeviceClassName = &value + return b +} + +// WithSelectors adds the given value to the Selectors field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Selectors field. +func (b *DeviceRequestApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceRequestApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSelectors") + } + b.Selectors = append(b.Selectors, *values[i]) + } + return b +} + +// WithAllocationMode sets the AllocationMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllocationMode field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAllocationMode(value resourcev1beta1.DeviceAllocationMode) *DeviceRequestApplyConfiguration { + b.AllocationMode = &value + return b +} + +// WithCount sets the Count field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Count field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithCount(value int64) *DeviceRequestApplyConfiguration { + b.Count = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go new file mode 100644 index 000000000..c28eb26ab --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use +// with apply. +type DeviceRequestAllocationResultApplyConfiguration struct { + Request *string `json:"request,omitempty"` + Driver *string `json:"driver,omitempty"` + Pool *string `json:"pool,omitempty"` + Device *string `json:"device,omitempty"` + AdminAccess *bool `json:"adminAccess,omitempty"` +} + +// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with +// apply. +func DeviceRequestAllocationResult() *DeviceRequestAllocationResultApplyConfiguration { + return &DeviceRequestAllocationResultApplyConfiguration{} +} + +// WithRequest sets the Request field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Request field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithRequest(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Request = &value + return b +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDriver(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithPool(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Pool = &value + return b +} + +// WithDevice sets the Device field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Device field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithDevice(value string) *DeviceRequestAllocationResultApplyConfiguration { + b.Device = &value + return b +} + +// WithAdminAccess sets the AdminAccess field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AdminAccess field is set to the value of the last call. +func (b *DeviceRequestAllocationResultApplyConfiguration) WithAdminAccess(value bool) *DeviceRequestAllocationResultApplyConfiguration { + b.AdminAccess = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go new file mode 100644 index 000000000..bf60bf434 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use +// with apply. +type DeviceSelectorApplyConfiguration struct { + CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"` +} + +// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with +// apply. +func DeviceSelector() *DeviceSelectorApplyConfiguration { + return &DeviceSelectorApplyConfiguration{} +} + +// WithCEL sets the CEL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CEL field is set to the value of the last call. +func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration { + b.CEL = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go new file mode 100644 index 000000000..c9d488019 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use +// with apply. +type NetworkDeviceDataApplyConfiguration struct { + InterfaceName *string `json:"interfaceName,omitempty"` + IPs []string `json:"ips,omitempty"` + HardwareAddress *string `json:"hardwareAddress,omitempty"` +} + +// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with +// apply. +func NetworkDeviceData() *NetworkDeviceDataApplyConfiguration { + return &NetworkDeviceDataApplyConfiguration{} +} + +// WithInterfaceName sets the InterfaceName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the InterfaceName field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithInterfaceName(value string) *NetworkDeviceDataApplyConfiguration { + b.InterfaceName = &value + return b +} + +// WithIPs adds the given value to the IPs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the IPs field. +func (b *NetworkDeviceDataApplyConfiguration) WithIPs(values ...string) *NetworkDeviceDataApplyConfiguration { + for i := range values { + b.IPs = append(b.IPs, values[i]) + } + return b +} + +// WithHardwareAddress sets the HardwareAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HardwareAddress field is set to the value of the last call. +func (b *NetworkDeviceDataApplyConfiguration) WithHardwareAddress(value string) *NetworkDeviceDataApplyConfiguration { + b.HardwareAddress = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go new file mode 100644 index 000000000..0b52fa93a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use +// with apply. +type OpaqueDeviceConfigurationApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Parameters *runtime.RawExtension `json:"parameters,omitempty"` +} + +// OpaqueDeviceConfigurationApplyConfiguration constructs a declarative configuration of the OpaqueDeviceConfiguration type for use with +// apply. +func OpaqueDeviceConfiguration() *OpaqueDeviceConfigurationApplyConfiguration { + return &OpaqueDeviceConfigurationApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithDriver(value string) *OpaqueDeviceConfigurationApplyConfiguration { + b.Driver = &value + return b +} + +// WithParameters sets the Parameters field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Parameters field is set to the value of the last call. +func (b *OpaqueDeviceConfigurationApplyConfiguration) WithParameters(value runtime.RawExtension) *OpaqueDeviceConfigurationApplyConfiguration { + b.Parameters = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..ee16718fd --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go @@ -0,0 +1,264 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use +// with apply. +type ResourceClaimApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` + Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"` +} + +// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with +// apply. +func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration { + b := &ResourceClaimApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractResourceClaim extracts the applied configuration owned by fieldManager from +// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a +// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API. +// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "") +} + +// ExtractResourceClaimStatus is the same as ExtractResourceClaim except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimStatus(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) { + return extractResourceClaim(resourceClaim, fieldManager, "status") +} + +func extractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) { + b := &ResourceClaimApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaim"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaim.Name) + b.WithNamespace(resourceClaim.Namespace) + + b.WithKind("ResourceClaim") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithKind(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithAPIVersion(value string) *ResourceClaimApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGenerateName(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithNamespace(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithUID(value types.UID) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithResourceVersion(value string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithGeneration(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ResourceClaimApplyConfiguration) WithStatus(value *ResourceClaimStatusApplyConfiguration) *ResourceClaimApplyConfiguration { + b.Status = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go new file mode 100644 index 000000000..f6eefdda5 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + types "k8s.io/apimachinery/pkg/types" +) + +// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use +// with apply. +type ResourceClaimConsumerReferenceApplyConfiguration struct { + APIGroup *string `json:"apiGroup,omitempty"` + Resource *string `json:"resource,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` +} + +// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with +// apply. +func ResourceClaimConsumerReference() *ResourceClaimConsumerReferenceApplyConfiguration { + return &ResourceClaimConsumerReferenceApplyConfiguration{} +} + +// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIGroup field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithAPIGroup(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.APIGroup = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithResource(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Resource = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithName(value string) *ResourceClaimConsumerReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimConsumerReferenceApplyConfiguration) WithUID(value types.UID) *ResourceClaimConsumerReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go new file mode 100644 index 000000000..c6b1b0b4b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use +// with apply. +type ResourceClaimSpecApplyConfiguration struct { + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with +// apply. +func ResourceClaimSpec() *ResourceClaimSpecApplyConfiguration { + return &ResourceClaimSpecApplyConfiguration{} +} + +// WithDevices sets the Devices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Devices field is set to the value of the last call. +func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimApplyConfiguration) *ResourceClaimSpecApplyConfiguration { + b.Devices = value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go new file mode 100644 index 000000000..bb3db18be --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use +// with apply. +type ResourceClaimStatusApplyConfiguration struct { + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` + Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with +// apply. +func ResourceClaimStatus() *ResourceClaimStatusApplyConfiguration { + return &ResourceClaimStatusApplyConfiguration{} +} + +// WithAllocation sets the Allocation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Allocation field is set to the value of the last call. +func (b *ResourceClaimStatusApplyConfiguration) WithAllocation(value *AllocationResultApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + b.Allocation = value + return b +} + +// WithReservedFor adds the given value to the ReservedFor field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ReservedFor field. +func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*ResourceClaimConsumerReferenceApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithReservedFor") + } + b.ReservedFor = append(b.ReservedFor, *values[i]) + } + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceClaimStatusApplyConfiguration) WithDevices(values ...*AllocatedDeviceStatusApplyConfiguration) *ResourceClaimStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..490ecf5e7 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,255 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use +// with apply. +type ResourceClaimTemplateApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with +// apply. +func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyConfiguration { + b := &ResourceClaimTemplateApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from +// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a +// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API. +// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "") +} + +// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) { + return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status") +} + +func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) { + b := &ResourceClaimTemplateApplyConfiguration{} + err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaimTemplate"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceClaimTemplate.Name) + b.WithNamespace(resourceClaimTemplate.Namespace) + + b.WithKind("ResourceClaimTemplate") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithKind(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithAPIVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateApplyConfiguration) WithSpec(value *ResourceClaimTemplateSpecApplyConfiguration) *ResourceClaimTemplateApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go new file mode 100644 index 000000000..9df32360f --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go @@ -0,0 +1,194 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use +// with apply. +type ResourceClaimTemplateSpecApplyConfiguration struct { + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with +// apply. +func ResourceClaimTemplateSpec() *ResourceClaimTemplateSpecApplyConfiguration { + return &ResourceClaimTemplateSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGenerateName(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithNamespace(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithUID(value types.UID) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithResourceVersion(value string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithGeneration(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithLabels(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithFinalizers(values ...string) *ResourceClaimTemplateSpecApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceClaimTemplateSpecApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceClaimTemplateSpecApplyConfiguration) WithSpec(value *ResourceClaimSpecApplyConfiguration) *ResourceClaimTemplateSpecApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceClaimTemplateSpecApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go new file mode 100644 index 000000000..33c155b52 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use +// with apply. +type ResourcePoolApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"` +} + +// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with +// apply. +func ResourcePool() *ResourcePoolApplyConfiguration { + return &ResourcePoolApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithName(value string) *ResourcePoolApplyConfiguration { + b.Name = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithGeneration(value int64) *ResourcePoolApplyConfiguration { + b.Generation = &value + return b +} + +// WithResourceSliceCount sets the ResourceSliceCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceSliceCount field is set to the value of the last call. +func (b *ResourcePoolApplyConfiguration) WithResourceSliceCount(value int64) *ResourcePoolApplyConfiguration { + b.ResourceSliceCount = &value + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..d169ad101 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go @@ -0,0 +1,253 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use +// with apply. +type ResourceSliceApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with +// apply. +func ResourceSlice(name string) *ResourceSliceApplyConfiguration { + b := &ResourceSliceApplyConfiguration{} + b.WithName(name) + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b +} + +// ExtractResourceSlice extracts the applied configuration owned by fieldManager from +// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a +// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API. +// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "") +} + +// ExtractResourceSliceStatus is the same as ExtractResourceSlice except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractResourceSliceStatus(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) { + return extractResourceSlice(resourceSlice, fieldManager, "status") +} + +func extractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) { + b := &ResourceSliceApplyConfiguration{} + err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceSlice"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(resourceSlice.Name) + + b.WithKind("ResourceSlice") + b.WithAPIVersion("resource.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithKind(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithAPIVersion(value string) *ResourceSliceApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGenerateName(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithNamespace(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithUID(value types.UID) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithResourceVersion(value string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithGeneration(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ResourceSliceApplyConfiguration) WithLabels(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ResourceSliceApplyConfiguration) WithAnnotations(entries map[string]string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ResourceSliceApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ResourceSliceApplyConfiguration) WithFinalizers(values ...string) *ResourceSliceApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *ResourceSliceApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ResourceSliceApplyConfiguration) WithSpec(value *ResourceSliceSpecApplyConfiguration) *ResourceSliceApplyConfiguration { + b.Spec = value + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *ResourceSliceApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go new file mode 100644 index 000000000..75bbb53c8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use +// with apply. +type ResourceSliceSpecApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` + Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + AllNodes *bool `json:"allNodes,omitempty"` + Devices []DeviceApplyConfiguration `json:"devices,omitempty"` +} + +// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with +// apply. +func ResourceSliceSpec() *ResourceSliceSpecApplyConfiguration { + return &ResourceSliceSpecApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithDriver(value string) *ResourceSliceSpecApplyConfiguration { + b.Driver = &value + return b +} + +// WithPool sets the Pool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Pool field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithPool(value *ResourcePoolApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.Pool = value + return b +} + +// WithNodeName sets the NodeName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeName field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeName(value string) *ResourceSliceSpecApplyConfiguration { + b.NodeName = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithAllNodes sets the AllNodes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllNodes field is set to the value of the last call. +func (b *ResourceSliceSpecApplyConfiguration) WithAllNodes(value bool) *ResourceSliceSpecApplyConfiguration { + b.AllNodes = &value + return b +} + +// WithDevices adds the given value to the Devices field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Devices field. +func (b *ResourceSliceSpecApplyConfiguration) WithDevices(values ...*DeviceApplyConfiguration) *ResourceSliceSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDevices") + } + b.Devices = append(b.Devices, *values[i]) + } + return b +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go index b57e8ba57..24f122cc0 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go @@ -21,25 +21,25 @@ package v1 import ( corev1 "k8s.io/api/core/v1" schedulingv1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Value *int32 `json:"value,omitempty"` - GlobalDefault *bool `json:"globalDefault,omitempty"` - Description *string `json:"description,omitempty"` - PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Value *int32 `json:"value,omitempty"` + GlobalDefault *bool `json:"globalDefault,omitempty"` + Description *string `json:"description,omitempty"` + PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManage // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,25 +150,25 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -214,13 +214,13 @@ func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]str // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration { +func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,14 +231,14 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *PriorityClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go index 0cd09d5d1..37a50ef6a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go @@ -20,7 +20,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "") } // ExtractPriorityClassStatus is the same as ExtractPriorityClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityClassStatus(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClassStatus(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "status") } -func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { +func extractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager st // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go index 98cfb14c7..4b6d52039 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go @@ -20,7 +20,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// PriorityClassApplyConfiguration represents an declarative configuration of the PriorityClass type for use +// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use // with apply. type PriorityClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type PriorityClassApplyConfiguration struct { PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"` } -// PriorityClass constructs an declarative configuration of the PriorityClass type for use with +// PriorityClass constructs a declarative configuration of the PriorityClass type for use with // apply. func PriorityClass(name string) *PriorityClassApplyConfiguration { b := &PriorityClassApplyConfiguration{} @@ -60,18 +60,18 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "") } // ExtractPriorityClassStatus is the same as ExtractPriorityClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityClassStatus(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { +func ExtractPriorityClassStatus(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) { return extractPriorityClass(priorityClass, fieldManager, "status") } -func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { +func extractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) { b := &PriorityClassApplyConfiguration{} err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource) if err != nil { @@ -88,7 +88,7 @@ func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager str // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -96,7 +96,7 @@ func (b *PriorityClassApplyConfiguration) WithKind(value string) *PriorityClassA // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *PriorityClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -105,7 +105,7 @@ func (b *PriorityClassApplyConfiguration) WithAPIVersion(value string) *Priority // If called multiple times, the Name field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -114,7 +114,7 @@ func (b *PriorityClassApplyConfiguration) WithName(value string) *PriorityClassA // If called multiple times, the GenerateName field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -123,7 +123,7 @@ func (b *PriorityClassApplyConfiguration) WithGenerateName(value string) *Priori // If called multiple times, the Namespace field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -132,7 +132,7 @@ func (b *PriorityClassApplyConfiguration) WithNamespace(value string) *PriorityC // If called multiple times, the UID field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -141,7 +141,7 @@ func (b *PriorityClassApplyConfiguration) WithUID(value types.UID) *PriorityClas // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -150,7 +150,7 @@ func (b *PriorityClassApplyConfiguration) WithResourceVersion(value string) *Pri // If called multiple times, the Generation field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -159,7 +159,7 @@ func (b *PriorityClassApplyConfiguration) WithGeneration(value int64) *PriorityC // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -168,7 +168,7 @@ func (b *PriorityClassApplyConfiguration) WithCreationTimestamp(value metav1.Tim // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -177,7 +177,7 @@ func (b *PriorityClassApplyConfiguration) WithDeletionTimestamp(value metav1.Tim // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -187,11 +187,11 @@ func (b *PriorityClassApplyConfiguration) WithDeletionGracePeriodSeconds(value i // overwriting an existing map entries in Labels field with the same key. func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -202,11 +202,11 @@ func (b *PriorityClassApplyConfiguration) WithLabels(entries map[string]string) // overwriting an existing map entries in Annotations field with the same key. func (b *PriorityClassApplyConfiguration) WithAnnotations(entries map[string]string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -220,7 +220,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -231,7 +231,7 @@ func (b *PriorityClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owne func (b *PriorityClassApplyConfiguration) WithFinalizers(values ...string) *PriorityClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -273,3 +273,9 @@ func (b *PriorityClassApplyConfiguration) WithPreemptionPolicy(value corev1.Pree b.PreemptionPolicy = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *PriorityClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go index aeead0861..6941e4cdc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -56,18 +56,18 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { +func ExtractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { return extractCSIDriver(cSIDriver, fieldManager, "") } // ExtractCSIDriverStatus is the same as ExtractCSIDriver except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIDriverStatus(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { +func ExtractCSIDriverStatus(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) { return extractCSIDriver(cSIDriver, fieldManager, "status") } -func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { +func extractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) { b := &CSIDriverApplyConfiguration{} err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, su // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp // If called multiple times, the Name field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl // If called multiple times, the UID field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI // overwriting an existing map entries in Annotations field with the same key. func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration { +func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSIDriverApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go index a1ef00656..1b58c6db8 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { AttachRequired *bool `json:"attachRequired,omitempty"` PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` - VolumeLifecycleModes []v1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` + VolumeLifecycleModes []storagev1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` StorageCapacity *bool `json:"storageCapacity,omitempty"` - FSGroupPolicy *v1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` + FSGroupPolicy *storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` RequiresRepublish *bool `json:"requiresRepublish,omitempty"` SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} @@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri // WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field. -func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { for i := range values { b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i]) } @@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr // WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroupPolicy field is set to the value of the last call. -func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { b.FSGroupPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go index d8296e485..f31620709 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go @@ -19,23 +19,23 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -56,18 +56,18 @@ func CSINode(name string) *CSINodeApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSINode(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { +func ExtractCSINode(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { return extractCSINode(cSINode, fieldManager, "") } // ExtractCSINodeStatus is the same as ExtractCSINode except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSINodeStatus(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { +func ExtractCSINodeStatus(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) { return extractCSINode(cSINode, fieldManager, "status") } -func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { +func extractCSINode(cSINode *storagev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) { b := &CSINodeApplyConfiguration{} err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource) if err != nil { @@ -84,7 +84,7 @@ func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresou // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,25 +146,25 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo // overwriting an existing map entries in Annotations field with the same key. func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -210,13 +210,13 @@ func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) * // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration { +func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,14 +227,14 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSINodeApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go index 6219ef115..8c69e435e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go index 30d1d4546..21d3ba7cc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go index c47c6b821..226fb1f70 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go @@ -21,25 +21,25 @@ package v1 import ( storagev1 "k8s.io/api/storage/v1" resource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - Capacity *resource.Quantity `json:"capacity,omitempty"` - MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + NodeTopology *metav1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + Capacity *resource.Quantity `json:"capacity,omitempty"` + MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,25 +152,25 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,13 +216,13 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,21 +233,21 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *CSIStorageCapacityApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } // WithNodeTopology sets the NodeTopology field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the NodeTopology field is set to the value of the last call. -func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *v1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration { +func (b *CSIStorageCapacityApplyConfiguration) WithNodeTopology(value *metav1.LabelSelectorApplyConfiguration) *CSIStorageCapacityApplyConfiguration { b.NodeTopology = value return b } @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go index 98c4c2233..cab39900e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go @@ -21,29 +21,29 @@ package v1 import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Provisioner *string `json:"provisioner,omitempty"` - Parameters map[string]string `json:"parameters,omitempty"` - ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` - MountOptions []string `json:"mountOptions,omitempty"` - AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` - VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` - AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Provisioner *string `json:"provisioner,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` + ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` + MountOptions []string `json:"mountOptions,omitempty"` + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` + VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` + AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -92,7 +92,7 @@ func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager stri // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl // If called multiple times, the Name field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla // If called multiple times, the UID field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -154,25 +154,25 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -218,13 +218,13 @@ func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]stri // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -235,14 +235,14 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *StorageClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go index 6665a1ff2..77b96db2f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go index 4c74f09aa..b28b8c33f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go @@ -19,24 +19,24 @@ limitations under the License. package v1 import ( - apistoragev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storagev1 "k8s.io/api/storage/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"` - Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` + metav1.TypeMetaApplyConfiguration `json:",inline"` + *metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"` + Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -57,18 +57,18 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { +func ExtractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { return extractVolumeAttachment(volumeAttachment, fieldManager, "") } // ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttachmentStatus(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { +func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) { return extractVolumeAttachment(volumeAttachment, fieldManager, "status") } -func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { +func extractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) { b := &VolumeAttachmentApplyConfiguration{} err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fi // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,25 +147,25 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value apismetav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -211,13 +211,13 @@ func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string] // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration { +func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*metav1.OwnerReferenceApplyConfiguration) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,14 +228,14 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } func (b *VolumeAttachmentApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + b.ObjectMetaApplyConfiguration = &metav1.ObjectMetaApplyConfiguration{} } } @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go index 2bf3f7720..1c865c001 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" + corev1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { - PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` - InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` + PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` + InlineVolumeSpec *corev1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} @@ -46,7 +46,7 @@ func (b *VolumeAttachmentSourceApplyConfiguration) WithPersistentVolumeName(valu // WithInlineVolumeSpec sets the InlineVolumeSpec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the InlineVolumeSpec field is set to the value of the last call. -func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *v1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration { +func (b *VolumeAttachmentSourceApplyConfiguration) WithInlineVolumeSpec(value *corev1.PersistentVolumeSpecApplyConfiguration) *VolumeAttachmentSourceApplyConfiguration { b.InlineVolumeSpec = value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go index a55f7c8ea..896539235 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go index 015b08e6e..14293376d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go index 4bf829f8a..c16c5c3af 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go @@ -19,17 +19,17 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { - Time *v1.Time `json:"time,omitempty"` - Message *string `json:"message,omitempty"` + Time *metav1.Time `json:"time,omitempty"` + Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} @@ -38,7 +38,7 @@ func VolumeError() *VolumeErrorApplyConfiguration { // WithTime sets the Time field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Time field is set to the value of the last call. -func (b *VolumeErrorApplyConfiguration) WithTime(value v1.Time) *VolumeErrorApplyConfiguration { +func (b *VolumeErrorApplyConfiguration) WithTime(value metav1.Time) *VolumeErrorApplyConfiguration { b.Time = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go index 3c5fd3dc2..735853c48 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go index 8b810fed1..518f7a7f6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") } // ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") } -func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { +func extractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go index bcefb5778..b66cf0094 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go index 82872cc35..be7da5dd1 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go index 2710ff886..e97487a64 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go index 43803496e..a287fc6b2 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go index 9d4c47625..898726b62 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use // with apply. type VolumeAttributesClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttributesClassApplyConfiguration struct { Parameters map[string]string `json:"parameters,omitempty"` } -// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with // apply. func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { b := &VolumeAttributesClassApplyConfiguration{} @@ -57,18 +57,18 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") } // ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") } -func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { +func extractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { b := &VolumeAttributesClassApplyConfiguration{} err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) if err != nil { @@ -85,7 +85,7 @@ func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttribut // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) * // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string] // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -260,3 +260,9 @@ func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[str } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go index cbff16fd0..ef8f6bbe6 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go index 4266f0b6e..0fe9421de 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIDriverApplyConfiguration represents an declarative configuration of the CSIDriver type for use +// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use // with apply. type CSIDriverApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSIDriverApplyConfiguration struct { Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"` } -// CSIDriver constructs an declarative configuration of the CSIDriver type for use with +// CSIDriver constructs a declarative configuration of the CSIDriver type for use with // apply. func CSIDriver(name string) *CSIDriverApplyConfiguration { b := &CSIDriverApplyConfiguration{} @@ -84,7 +84,7 @@ func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSIDriverApplyConfiguration) WithKind(value string) *CSIDriverApplyConf // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSIDriverApplyConfiguration) WithAPIVersion(value string) *CSIDriverApp // If called multiple times, the Name field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSIDriverApplyConfiguration) WithName(value string) *CSIDriverApplyConf // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSIDriverApplyConfiguration) WithGenerateName(value string) *CSIDriverA // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSIDriverApplyConfiguration) WithNamespace(value string) *CSIDriverAppl // If called multiple times, the UID field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSIDriverApplyConfiguration) WithUID(value types.UID) *CSIDriverApplyCo // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *CSIDriverApplyConfiguration) WithResourceVersion(value string) *CSIDriv // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *CSIDriverApplyConfiguration) WithGeneration(value int64) *CSIDriverAppl // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *CSIDriverApplyConfiguration) WithCreationTimestamp(value metav1.Time) * // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSIDriverApplyConfiguration) WithDeletionTimestamp(value metav1.Time) * // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSIDriverApplyConfiguration) WithDeletionGracePeriodSeconds(value int64 // overwriting an existing map entries in Labels field with the same key. func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSIDriverApplyConfiguration) WithLabels(entries map[string]string) *CSI // overwriting an existing map entries in Annotations field with the same key. func (b *CSIDriverApplyConfiguration) WithAnnotations(entries map[string]string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *CSIDriverApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRef func (b *CSIDriverApplyConfiguration) WithFinalizers(values ...string) *CSIDriverApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *CSIDriverApplyConfiguration) WithSpec(value *CSIDriverSpecApplyConfigur b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIDriverApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go index 6097a615b..e62fe5888 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go @@ -19,23 +19,23 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" ) -// CSIDriverSpecApplyConfiguration represents an declarative configuration of the CSIDriverSpec type for use +// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use // with apply. type CSIDriverSpecApplyConfiguration struct { - AttachRequired *bool `json:"attachRequired,omitempty"` - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` - VolumeLifecycleModes []v1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` - StorageCapacity *bool `json:"storageCapacity,omitempty"` - FSGroupPolicy *v1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` - TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` - RequiresRepublish *bool `json:"requiresRepublish,omitempty"` - SELinuxMount *bool `json:"seLinuxMount,omitempty"` + AttachRequired *bool `json:"attachRequired,omitempty"` + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"` + VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"` + StorageCapacity *bool `json:"storageCapacity,omitempty"` + FSGroupPolicy *storagev1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` + TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"` + RequiresRepublish *bool `json:"requiresRepublish,omitempty"` + SELinuxMount *bool `json:"seLinuxMount,omitempty"` } -// CSIDriverSpecApplyConfiguration constructs an declarative configuration of the CSIDriverSpec type for use with +// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with // apply. func CSIDriverSpec() *CSIDriverSpecApplyConfiguration { return &CSIDriverSpecApplyConfiguration{} @@ -60,7 +60,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithPodInfoOnMount(value bool) *CSIDri // WithVolumeLifecycleModes adds the given value to the VolumeLifecycleModes field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the VolumeLifecycleModes field. -func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...v1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithVolumeLifecycleModes(values ...storagev1beta1.VolumeLifecycleMode) *CSIDriverSpecApplyConfiguration { for i := range values { b.VolumeLifecycleModes = append(b.VolumeLifecycleModes, values[i]) } @@ -78,7 +78,7 @@ func (b *CSIDriverSpecApplyConfiguration) WithStorageCapacity(value bool) *CSIDr // WithFSGroupPolicy sets the FSGroupPolicy field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the FSGroupPolicy field is set to the value of the last call. -func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value v1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { +func (b *CSIDriverSpecApplyConfiguration) WithFSGroupPolicy(value storagev1beta1.FSGroupPolicy) *CSIDriverSpecApplyConfiguration { b.FSGroupPolicy = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go index 91588fd9f..4e7ad8997 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSINodeApplyConfiguration represents an declarative configuration of the CSINode type for use +// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use // with apply. type CSINodeApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -35,7 +35,7 @@ type CSINodeApplyConfiguration struct { Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"` } -// CSINode constructs an declarative configuration of the CSINode type for use with +// CSINode constructs a declarative configuration of the CSINode type for use with // apply. func CSINode(name string) *CSINodeApplyConfiguration { b := &CSINodeApplyConfiguration{} @@ -84,7 +84,7 @@ func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subres // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -92,7 +92,7 @@ func (b *CSINodeApplyConfiguration) WithKind(value string) *CSINodeApplyConfigur // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -101,7 +101,7 @@ func (b *CSINodeApplyConfiguration) WithAPIVersion(value string) *CSINodeApplyCo // If called multiple times, the Name field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -110,7 +110,7 @@ func (b *CSINodeApplyConfiguration) WithName(value string) *CSINodeApplyConfigur // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -119,7 +119,7 @@ func (b *CSINodeApplyConfiguration) WithGenerateName(value string) *CSINodeApply // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -128,7 +128,7 @@ func (b *CSINodeApplyConfiguration) WithNamespace(value string) *CSINodeApplyCon // If called multiple times, the UID field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -137,7 +137,7 @@ func (b *CSINodeApplyConfiguration) WithUID(value types.UID) *CSINodeApplyConfig // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -146,7 +146,7 @@ func (b *CSINodeApplyConfiguration) WithResourceVersion(value string) *CSINodeAp // If called multiple times, the Generation field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -155,7 +155,7 @@ func (b *CSINodeApplyConfiguration) WithGeneration(value int64) *CSINodeApplyCon // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -164,7 +164,7 @@ func (b *CSINodeApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -173,7 +173,7 @@ func (b *CSINodeApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CS // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -183,11 +183,11 @@ func (b *CSINodeApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) // overwriting an existing map entries in Labels field with the same key. func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -198,11 +198,11 @@ func (b *CSINodeApplyConfiguration) WithLabels(entries map[string]string) *CSINo // overwriting an existing map entries in Annotations field with the same key. func (b *CSINodeApplyConfiguration) WithAnnotations(entries map[string]string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -216,7 +216,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -227,7 +227,7 @@ func (b *CSINodeApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerRefer func (b *CSINodeApplyConfiguration) WithFinalizers(values ...string) *CSINodeApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -245,3 +245,9 @@ func (b *CSINodeApplyConfiguration) WithSpec(value *CSINodeSpecApplyConfiguratio b.Spec = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSINodeApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go index 2c7de497b..65ad771bb 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// CSINodeDriverApplyConfiguration represents an declarative configuration of the CSINodeDriver type for use +// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use // with apply. type CSINodeDriverApplyConfiguration struct { Name *string `json:"name,omitempty"` @@ -27,7 +27,7 @@ type CSINodeDriverApplyConfiguration struct { Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"` } -// CSINodeDriverApplyConfiguration constructs an declarative configuration of the CSINodeDriver type for use with +// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with // apply. func CSINodeDriver() *CSINodeDriverApplyConfiguration { return &CSINodeDriverApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go index 94ff1b461..c9cbea1d9 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// CSINodeSpecApplyConfiguration represents an declarative configuration of the CSINodeSpec type for use +// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use // with apply. type CSINodeSpecApplyConfiguration struct { Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"` } -// CSINodeSpecApplyConfiguration constructs an declarative configuration of the CSINodeSpec type for use with +// CSINodeSpecApplyConfiguration constructs a declarative configuration of the CSINodeSpec type for use with // apply. func CSINodeSpec() *CSINodeSpecApplyConfiguration { return &CSINodeSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go index 2854a15da..c8acaf923 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go @@ -19,7 +19,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" resource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -28,7 +28,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// CSIStorageCapacityApplyConfiguration represents an declarative configuration of the CSIStorageCapacity type for use +// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use // with apply. type CSIStorageCapacityApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,7 +39,7 @@ type CSIStorageCapacityApplyConfiguration struct { MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"` } -// CSIStorageCapacity constructs an declarative configuration of the CSIStorageCapacity type for use with +// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with // apply. func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfiguration { b := &CSIStorageCapacityApplyConfiguration{} @@ -61,18 +61,18 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "") } // ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { +func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) { return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status") } -func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { +func extractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) { b := &CSIStorageCapacityApplyConfiguration{} err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource) if err != nil { @@ -90,7 +90,7 @@ func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, f // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorageCapacityApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -98,7 +98,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithKind(value string) *CSIStorag // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSIStorageCapacityApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -107,7 +107,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithAPIVersion(value string) *CSI // If called multiple times, the Name field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -116,7 +116,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithName(value string) *CSIStorag // If called multiple times, the GenerateName field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -125,7 +125,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGenerateName(value string) *C // If called multiple times, the Namespace field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -134,7 +134,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithNamespace(value string) *CSIS // If called multiple times, the UID field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -143,7 +143,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithUID(value types.UID) *CSIStor // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -152,7 +152,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithResourceVersion(value string) // If called multiple times, the Generation field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -161,7 +161,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithGeneration(value int64) *CSIS // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -170,7 +170,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithCreationTimestamp(value metav // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -179,7 +179,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionTimestamp(value metav // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -189,11 +189,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithDeletionGracePeriodSeconds(va // overwriting an existing map entries in Labels field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -204,11 +204,11 @@ func (b *CSIStorageCapacityApplyConfiguration) WithLabels(entries map[string]str // overwriting an existing map entries in Annotations field with the same key. func (b *CSIStorageCapacityApplyConfiguration) WithAnnotations(entries map[string]string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -222,7 +222,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -233,7 +233,7 @@ func (b *CSIStorageCapacityApplyConfiguration) WithOwnerReferences(values ...*v1 func (b *CSIStorageCapacityApplyConfiguration) WithFinalizers(values ...string) *CSIStorageCapacityApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -275,3 +275,9 @@ func (b *CSIStorageCapacityApplyConfiguration) WithMaximumVolumeSize(value resou b.MaximumVolumeSize = &value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *CSIStorageCapacityApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go index 02194f108..2d211754e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go @@ -20,7 +20,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -29,7 +29,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageClassApplyConfiguration represents an declarative configuration of the StorageClass type for use +// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use // with apply. type StorageClassApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -39,11 +39,11 @@ type StorageClassApplyConfiguration struct { ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"` MountOptions []string `json:"mountOptions,omitempty"` AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"` - VolumeBindingMode *v1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` + VolumeBindingMode *storagev1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"` AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"` } -// StorageClass constructs an declarative configuration of the StorageClass type for use with +// StorageClass constructs a declarative configuration of the StorageClass type for use with // apply. func StorageClass(name string) *StorageClassApplyConfiguration { b := &StorageClassApplyConfiguration{} @@ -64,18 +64,18 @@ func StorageClass(name string) *StorageClassApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { +func ExtractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { return extractStorageClass(storageClass, fieldManager, "") } // ExtractStorageClassStatus is the same as ExtractStorageClass except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractStorageClassStatus(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { +func ExtractStorageClassStatus(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) { return extractStorageClass(storageClass, fieldManager, "status") } -func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { +func extractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) { b := &StorageClassApplyConfiguration{} err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource) if err != nil { @@ -92,7 +92,7 @@ func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -100,7 +100,7 @@ func (b *StorageClassApplyConfiguration) WithKind(value string) *StorageClassApp // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageClassApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -109,7 +109,7 @@ func (b *StorageClassApplyConfiguration) WithAPIVersion(value string) *StorageCl // If called multiple times, the Name field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -118,7 +118,7 @@ func (b *StorageClassApplyConfiguration) WithName(value string) *StorageClassApp // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -127,7 +127,7 @@ func (b *StorageClassApplyConfiguration) WithGenerateName(value string) *Storage // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -136,7 +136,7 @@ func (b *StorageClassApplyConfiguration) WithNamespace(value string) *StorageCla // If called multiple times, the UID field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -145,7 +145,7 @@ func (b *StorageClassApplyConfiguration) WithUID(value types.UID) *StorageClassA // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -154,7 +154,7 @@ func (b *StorageClassApplyConfiguration) WithResourceVersion(value string) *Stor // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -163,7 +163,7 @@ func (b *StorageClassApplyConfiguration) WithGeneration(value int64) *StorageCla // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -172,7 +172,7 @@ func (b *StorageClassApplyConfiguration) WithCreationTimestamp(value metav1.Time // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -181,7 +181,7 @@ func (b *StorageClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -191,11 +191,11 @@ func (b *StorageClassApplyConfiguration) WithDeletionGracePeriodSeconds(value in // overwriting an existing map entries in Labels field with the same key. func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -206,11 +206,11 @@ func (b *StorageClassApplyConfiguration) WithLabels(entries map[string]string) * // overwriting an existing map entries in Annotations field with the same key. func (b *StorageClassApplyConfiguration) WithAnnotations(entries map[string]string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -224,7 +224,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -235,7 +235,7 @@ func (b *StorageClassApplyConfiguration) WithOwnerReferences(values ...*v1.Owner func (b *StorageClassApplyConfiguration) WithFinalizers(values ...string) *StorageClassApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -297,7 +297,7 @@ func (b *StorageClassApplyConfiguration) WithAllowVolumeExpansion(value bool) *S // WithVolumeBindingMode sets the VolumeBindingMode field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VolumeBindingMode field is set to the value of the last call. -func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value v1beta1.VolumeBindingMode) *StorageClassApplyConfiguration { +func (b *StorageClassApplyConfiguration) WithVolumeBindingMode(value storagev1beta1.VolumeBindingMode) *StorageClassApplyConfiguration { b.VolumeBindingMode = &value return b } @@ -314,3 +314,9 @@ func (b *StorageClassApplyConfiguration) WithAllowedTopologies(values ...*applyc } return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go index 89c99d560..e0f2df28e 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go @@ -18,14 +18,14 @@ limitations under the License. package v1beta1 -// TokenRequestApplyConfiguration represents an declarative configuration of the TokenRequest type for use +// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use // with apply. type TokenRequestApplyConfiguration struct { Audience *string `json:"audience,omitempty"` ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"` } -// TokenRequestApplyConfiguration constructs an declarative configuration of the TokenRequest type for use with +// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with // apply. func TokenRequest() *TokenRequestApplyConfiguration { return &TokenRequestApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go index 9fccaf5cf..3f7110bf4 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttachmentApplyConfiguration represents an declarative configuration of the VolumeAttachment type for use +// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use // with apply. type VolumeAttachmentApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type VolumeAttachmentApplyConfiguration struct { Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"` } -// VolumeAttachment constructs an declarative configuration of the VolumeAttachment type for use with +// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with // apply. func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration { b := &VolumeAttachmentApplyConfiguration{} @@ -85,7 +85,7 @@ func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttachmentApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithKind(value string) *VolumeAttac // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *VolumeAttachmentApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithAPIVersion(value string) *Volum // If called multiple times, the Name field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithName(value string) *VolumeAttac // If called multiple times, the GenerateName field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGenerateName(value string) *Vol // If called multiple times, the Namespace field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithNamespace(value string) *Volume // If called multiple times, the UID field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithUID(value types.UID) *VolumeAtt // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithResourceVersion(value string) * // If called multiple times, the Generation field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithGeneration(value int64) *Volume // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithCreationTimestamp(value metav1. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionTimestamp(value metav1. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithDeletionGracePeriodSeconds(valu // overwriting an existing map entries in Labels field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *VolumeAttachmentApplyConfiguration) WithLabels(entries map[string]strin // overwriting an existing map entries in Annotations field with the same key. func (b *VolumeAttachmentApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *VolumeAttachmentApplyConfiguration) WithOwnerReferences(values ...*v1.O func (b *VolumeAttachmentApplyConfiguration) WithFinalizers(values ...string) *VolumeAttachmentApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *VolumeAttachmentApplyConfiguration) WithStatus(value *VolumeAttachmentS b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttachmentApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go index 9700b38ee..b08dd3148 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/client-go/applyconfigurations/core/v1" ) -// VolumeAttachmentSourceApplyConfiguration represents an declarative configuration of the VolumeAttachmentSource type for use +// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use // with apply. type VolumeAttachmentSourceApplyConfiguration struct { PersistentVolumeName *string `json:"persistentVolumeName,omitempty"` InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"` } -// VolumeAttachmentSourceApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSource type for use with +// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with // apply. func VolumeAttachmentSource() *VolumeAttachmentSourceApplyConfiguration { return &VolumeAttachmentSourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go index 1d5e304bb..3bdaeb45d 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentSpecApplyConfiguration represents an declarative configuration of the VolumeAttachmentSpec type for use +// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use // with apply. type VolumeAttachmentSpecApplyConfiguration struct { Attacher *string `json:"attacher,omitempty"` @@ -26,7 +26,7 @@ type VolumeAttachmentSpecApplyConfiguration struct { NodeName *string `json:"nodeName,omitempty"` } -// VolumeAttachmentSpecApplyConfiguration constructs an declarative configuration of the VolumeAttachmentSpec type for use with +// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with // apply. func VolumeAttachmentSpec() *VolumeAttachmentSpecApplyConfiguration { return &VolumeAttachmentSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go index fa1855a24..f7046cdb3 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 -// VolumeAttachmentStatusApplyConfiguration represents an declarative configuration of the VolumeAttachmentStatus type for use +// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use // with apply. type VolumeAttachmentStatusApplyConfiguration struct { Attached *bool `json:"attached,omitempty"` @@ -27,7 +27,7 @@ type VolumeAttachmentStatusApplyConfiguration struct { DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"` } -// VolumeAttachmentStatusApplyConfiguration constructs an declarative configuration of the VolumeAttachmentStatus type for use with +// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with // apply. func VolumeAttachmentStatus() *VolumeAttachmentStatusApplyConfiguration { return &VolumeAttachmentStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..ab1bda330 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,268 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + storagev1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use +// with apply. +type VolumeAttributesClassApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DriverName *string `json:"driverName,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with +// apply. +func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { + b := &VolumeAttributesClassApplyConfiguration{} + b.WithName(name) + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b +} + +// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from +// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a +// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. +// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +} + +// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { + return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +} + +func extractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { + b := &VolumeAttributesClassApplyConfiguration{} + err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(volumeAttributesClass.Name) + + b.WithKind("VolumeAttributesClass") + b.WithAPIVersion("storage.k8s.io/v1beta1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { + b.TypeMetaApplyConfiguration.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.TypeMetaApplyConfiguration.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.ObjectMetaApplyConfiguration.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) + } + return b +} + +func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDriverName sets the DriverName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverName field is set to the value of the last call. +func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { + b.DriverName = &value + return b +} + +// WithParameters puts the entries into the Parameters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Parameters field, +// overwriting an existing map entries in Parameters field with the same key. +func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { + if b.Parameters == nil && len(entries) > 0 { + b.Parameters = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Parameters[k] = v + } + return b +} + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *VolumeAttributesClassApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go index 3f255fce7..fec1c9ade 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go @@ -22,14 +22,14 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// VolumeErrorApplyConfiguration represents an declarative configuration of the VolumeError type for use +// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use // with apply. type VolumeErrorApplyConfiguration struct { Time *v1.Time `json:"time,omitempty"` Message *string `json:"message,omitempty"` } -// VolumeErrorApplyConfiguration constructs an declarative configuration of the VolumeError type for use with +// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with // apply. func VolumeError() *VolumeErrorApplyConfiguration { return &VolumeErrorApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go index 4b69b64c9..b42c9decc 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go @@ -18,13 +18,13 @@ limitations under the License. package v1beta1 -// VolumeNodeResourcesApplyConfiguration represents an declarative configuration of the VolumeNodeResources type for use +// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use // with apply. type VolumeNodeResourcesApplyConfiguration struct { Count *int32 `json:"count,omitempty"` } -// VolumeNodeResourcesApplyConfiguration constructs an declarative configuration of the VolumeNodeResources type for use with +// VolumeNodeResourcesApplyConfiguration constructs a declarative configuration of the VolumeNodeResources type for use with // apply. func VolumeNodeResources() *VolumeNodeResourcesApplyConfiguration { return &VolumeNodeResourcesApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go index c733ac5c0..c8f9f009a 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 -// GroupVersionResourceApplyConfiguration represents an declarative configuration of the GroupVersionResource type for use +// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use // with apply. type GroupVersionResourceApplyConfiguration struct { Group *string `json:"group,omitempty"` @@ -26,7 +26,7 @@ type GroupVersionResourceApplyConfiguration struct { Resource *string `json:"resource,omitempty"` } -// GroupVersionResourceApplyConfiguration constructs an declarative configuration of the GroupVersionResource type for use with +// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with // apply. func GroupVersionResource() *GroupVersionResourceApplyConfiguration { return &GroupVersionResourceApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go index d0f863446..5ffd572ee 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go @@ -20,21 +20,21 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// MigrationConditionApplyConfiguration represents an declarative configuration of the MigrationCondition type for use +// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use // with apply. type MigrationConditionApplyConfiguration struct { - Type *v1alpha1.MigrationConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *storagemigrationv1alpha1.MigrationConditionType `json:"type,omitempty"` + Status *v1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } -// MigrationConditionApplyConfiguration constructs an declarative configuration of the MigrationCondition type for use with +// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with // apply. func MigrationCondition() *MigrationConditionApplyConfiguration { return &MigrationConditionApplyConfiguration{} @@ -43,7 +43,7 @@ func MigrationCondition() *MigrationConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *MigrationConditionApplyConfiguration) WithType(value v1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { +func (b *MigrationConditionApplyConfiguration) WithType(value storagemigrationv1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration { b.Type = &value return b } diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go index cc57b2b12..a6dbc13a5 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// StorageVersionMigrationApplyConfiguration represents an declarative configuration of the StorageVersionMigration type for use +// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use // with apply. type StorageVersionMigrationApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` @@ -36,7 +36,7 @@ type StorageVersionMigrationApplyConfiguration struct { Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"` } -// StorageVersionMigration constructs an declarative configuration of the StorageVersionMigration type for use with +// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with // apply. func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfiguration { b := &StorageVersionMigrationApplyConfiguration{} @@ -85,7 +85,7 @@ func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1a // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *StorageVersionMigrationApplyConfiguration { - b.Kind = &value + b.TypeMetaApplyConfiguration.Kind = &value return b } @@ -93,7 +93,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithKind(value string) *Stor // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) *StorageVersionMigrationApplyConfiguration { - b.APIVersion = &value + b.TypeMetaApplyConfiguration.APIVersion = &value return b } @@ -102,7 +102,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithAPIVersion(value string) // If called multiple times, the Name field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value + b.ObjectMetaApplyConfiguration.Name = &value return b } @@ -111,7 +111,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithName(value string) *Stor // If called multiple times, the GenerateName field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value + b.ObjectMetaApplyConfiguration.GenerateName = &value return b } @@ -120,7 +120,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGenerateName(value strin // If called multiple times, the Namespace field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value + b.ObjectMetaApplyConfiguration.Namespace = &value return b } @@ -129,7 +129,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithNamespace(value string) // If called multiple times, the UID field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value + b.ObjectMetaApplyConfiguration.UID = &value return b } @@ -138,7 +138,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithUID(value types.UID) *St // If called multiple times, the ResourceVersion field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value + b.ObjectMetaApplyConfiguration.ResourceVersion = &value return b } @@ -147,7 +147,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithResourceVersion(value st // If called multiple times, the Generation field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value + b.ObjectMetaApplyConfiguration.Generation = &value return b } @@ -156,7 +156,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithGeneration(value int64) // If called multiple times, the CreationTimestamp field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value + b.ObjectMetaApplyConfiguration.CreationTimestamp = &value return b } @@ -165,7 +165,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithCreationTimestamp(value // If called multiple times, the DeletionTimestamp field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value + b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value return b } @@ -174,7 +174,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionTimestamp(value // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value + b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value return b } @@ -184,11 +184,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithDeletionGracePeriodSecon // overwriting an existing map entries in Labels field with the same key. func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[string]string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) } for k, v := range entries { - b.Labels[k] = v + b.ObjectMetaApplyConfiguration.Labels[k] = v } return b } @@ -199,11 +199,11 @@ func (b *StorageVersionMigrationApplyConfiguration) WithLabels(entries map[strin // overwriting an existing map entries in Annotations field with the same key. func (b *StorageVersionMigrationApplyConfiguration) WithAnnotations(entries map[string]string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) + if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { + b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) } for k, v := range entries { - b.Annotations[k] = v + b.ObjectMetaApplyConfiguration.Annotations[k] = v } return b } @@ -217,7 +217,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values . if values[i] == nil { panic("nil value passed to WithOwnerReferences") } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) + b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) } return b } @@ -228,7 +228,7 @@ func (b *StorageVersionMigrationApplyConfiguration) WithOwnerReferences(values . func (b *StorageVersionMigrationApplyConfiguration) WithFinalizers(values ...string) *StorageVersionMigrationApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) + b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) } return b } @@ -254,3 +254,9 @@ func (b *StorageVersionMigrationApplyConfiguration) WithStatus(value *StorageVer b.Status = value return b } + +// GetName retrieves the value of the Name field in the declarative configuration. +func (b *StorageVersionMigrationApplyConfiguration) GetName() *string { + b.ensureObjectMetaApplyConfigurationExists() + return b.ObjectMetaApplyConfiguration.Name +} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go index 6c7c5b264..02ddb540f 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// StorageVersionMigrationSpecApplyConfiguration represents an declarative configuration of the StorageVersionMigrationSpec type for use +// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use // with apply. type StorageVersionMigrationSpecApplyConfiguration struct { Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"` ContinueToken *string `json:"continueToken,omitempty"` } -// StorageVersionMigrationSpecApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationSpec type for use with +// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with // apply. func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguration { return &StorageVersionMigrationSpecApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go index b8d397548..fc957cb15 100644 --- a/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go +++ b/constraint/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go @@ -18,14 +18,14 @@ limitations under the License. package v1alpha1 -// StorageVersionMigrationStatusApplyConfiguration represents an declarative configuration of the StorageVersionMigrationStatus type for use +// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use // with apply. type StorageVersionMigrationStatusApplyConfiguration struct { Conditions []MigrationConditionApplyConfiguration `json:"conditions,omitempty"` ResourceVersion *string `json:"resourceVersion,omitempty"` } -// StorageVersionMigrationStatusApplyConfiguration constructs an declarative configuration of the StorageVersionMigrationStatus type for use with +// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with // apply. func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfiguration { return &StorageVersionMigrationStatusApplyConfiguration{} diff --git a/constraint/vendor/k8s.io/client-go/discovery/fake/discovery.go b/constraint/vendor/k8s.io/client-go/discovery/fake/discovery.go deleted file mode 100644 index f8a78e1ef..000000000 --- a/constraint/vendor/k8s.io/client-go/discovery/fake/discovery.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "fmt" - "net/http" - - openapi_v2 "github.com/google/gnostic-models/openapiv2" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/discovery" - "k8s.io/client-go/openapi" - kubeversion "k8s.io/client-go/pkg/version" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/testing" -) - -// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, -// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. -type FakeDiscovery struct { - *testing.Fake - FakedServerVersion *version.Info -} - -// ServerResourcesForGroupVersion returns the supported resources for a group -// and version. -func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - action := testing.ActionImpl{ - Verb: "get", - Resource: schema.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - for _, resourceList := range c.Resources { - if resourceList.GroupVersion == groupVersion { - return resourceList, nil - } - } - return nil, &errors.StatusError{ - ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusNotFound, - Reason: metav1.StatusReasonNotFound, - Message: fmt.Sprintf("the server could not find the requested resource, GroupVersion %q not found", groupVersion), - }} -} - -// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. -func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := c.ServerGroups() - if err != nil { - return nil, nil, err - } - resultGroups := []*metav1.APIGroup{} - for i := range sgs.Groups { - resultGroups = append(resultGroups, &sgs.Groups[i]) - } - - action := testing.ActionImpl{ - Verb: "get", - Resource: schema.GroupVersionResource{Resource: "resource"}, - } - c.Invokes(action, nil) - return resultGroups, c.Resources, nil -} - -// ServerPreferredResources returns the supported resources with the version -// preferred by the server. -func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return nil, nil -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources -// with the version preferred by the server. -func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return nil, nil -} - -// ServerGroups returns the supported groups, with information like supported -// versions and the preferred version. -func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { - action := testing.ActionImpl{ - Verb: "get", - Resource: schema.GroupVersionResource{Resource: "group"}, - } - c.Invokes(action, nil) - - groups := map[string]*metav1.APIGroup{} - - for _, res := range c.Resources { - gv, err := schema.ParseGroupVersion(res.GroupVersion) - if err != nil { - return nil, err - } - group := groups[gv.Group] - if group == nil { - group = &metav1.APIGroup{ - Name: gv.Group, - PreferredVersion: metav1.GroupVersionForDiscovery{ - GroupVersion: res.GroupVersion, - Version: gv.Version, - }, - } - groups[gv.Group] = group - } - - group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ - GroupVersion: res.GroupVersion, - Version: gv.Version, - }) - } - - list := &metav1.APIGroupList{} - for _, apiGroup := range groups { - list.Groups = append(list.Groups, *apiGroup) - } - - return list, nil - -} - -// ServerVersion retrieves and parses the server's version. -func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { - action := testing.ActionImpl{} - action.Verb = "get" - action.Resource = schema.GroupVersionResource{Resource: "version"} - _, err := c.Invokes(action, nil) - if err != nil { - return nil, err - } - - if c.FakedServerVersion != nil { - return c.FakedServerVersion, nil - } - - versionInfo := kubeversion.Get() - return &versionInfo, nil -} - -// OpenAPISchema retrieves and parses the swagger API schema the server supports. -func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { - return &openapi_v2.Document{}, nil -} - -func (c *FakeDiscovery) OpenAPIV3() openapi.Client { - panic("unimplemented") -} - -// RESTClient returns a RESTClient that is used to communicate with API server -// by this client implementation. -func (c *FakeDiscovery) RESTClient() restclient.Interface { - return nil -} - -func (c *FakeDiscovery) WithLegacy() discovery.DiscoveryInterface { - panic("unimplemented") -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go b/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go deleted file mode 100644 index 62d01339d..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/informer.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamicinformer - -import ( - "context" - "sync" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamiclister" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" -) - -// NewDynamicSharedInformerFactory constructs a new instance of dynamicSharedInformerFactory for all namespaces. -func NewDynamicSharedInformerFactory(client dynamic.Interface, defaultResync time.Duration) DynamicSharedInformerFactory { - return NewFilteredDynamicSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) -} - -// NewFilteredDynamicSharedInformerFactory constructs a new instance of dynamicSharedInformerFactory. -// Listers obtained via this factory will be subject to the same filters as specified here. -func NewFilteredDynamicSharedInformerFactory(client dynamic.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) DynamicSharedInformerFactory { - return &dynamicSharedInformerFactory{ - client: client, - defaultResync: defaultResync, - namespace: namespace, - informers: map[schema.GroupVersionResource]informers.GenericInformer{}, - startedInformers: make(map[schema.GroupVersionResource]bool), - tweakListOptions: tweakListOptions, - } -} - -type dynamicSharedInformerFactory struct { - client dynamic.Interface - defaultResync time.Duration - namespace string - - lock sync.Mutex - informers map[schema.GroupVersionResource]informers.GenericInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[schema.GroupVersionResource]bool - tweakListOptions TweakListOptionsFunc - - // wg tracks how many goroutines were started. - wg sync.WaitGroup - // shuttingDown is true when Shutdown has been called. It may still be running - // because it needs to wait for goroutines. - shuttingDown bool -} - -var _ DynamicSharedInformerFactory = &dynamicSharedInformerFactory{} - -func (f *dynamicSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { - f.lock.Lock() - defer f.lock.Unlock() - - key := gvr - informer, exists := f.informers[key] - if exists { - return informer - } - - informer = NewFilteredDynamicInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) - f.informers[key] = informer - - return informer -} - -// Start initializes all requested informers. -func (f *dynamicSharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.shuttingDown { - return - } - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - f.wg.Add(1) - // We need a new variable in each loop iteration, - // otherwise the goroutine would use the loop variable - // and that keeps changing. - informer := informer.Informer() - go func() { - defer f.wg.Done() - informer.Run(stopCh) - }() - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *dynamicSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { - informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer.Informer() - } - } - return informers - }() - - res := map[schema.GroupVersionResource]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -func (f *dynamicSharedInformerFactory) Shutdown() { - // Will return immediately if there is nothing to wait for. - defer f.wg.Wait() - - f.lock.Lock() - defer f.lock.Unlock() - f.shuttingDown = true -} - -// NewFilteredDynamicInformer constructs a new informer for a dynamic type. -func NewFilteredDynamicInformer(client dynamic.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { - return &dynamicInformer{ - gvr: gvr, - informer: cache.NewSharedIndexInformerWithOptions( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) - }, - }, - &unstructured.Unstructured{}, - cache.SharedIndexInformerOptions{ - ResyncPeriod: resyncPeriod, - Indexers: indexers, - ObjectDescription: gvr.String(), - }, - ), - } -} - -type dynamicInformer struct { - informer cache.SharedIndexInformer - gvr schema.GroupVersionResource -} - -var _ informers.GenericInformer = &dynamicInformer{} - -func (d *dynamicInformer) Informer() cache.SharedIndexInformer { - return d.informer -} - -func (d *dynamicInformer) Lister() cache.GenericLister { - return dynamiclister.NewRuntimeObjectShim(dynamiclister.New(d.informer.GetIndexer(), d.gvr)) -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go b/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go deleted file mode 100644 index 0419ef4f8..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/dynamicinformer/interface.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamicinformer - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/informers" -) - -// DynamicSharedInformerFactory provides access to a shared informer and lister for dynamic client -type DynamicSharedInformerFactory interface { - // Start initializes all requested informers. They are handled in goroutines - // which run until the stop channel gets closed. - Start(stopCh <-chan struct{}) - - // ForResource gives generic access to a shared informer of the matching type. - ForResource(gvr schema.GroupVersionResource) informers.GenericInformer - - // WaitForCacheSync blocks until all started informers' caches were synced - // or the stop channel gets closed. - WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool - - // Shutdown marks a factory as shutting down. At that point no new - // informers can be started anymore and Start will return without - // doing anything. - // - // In addition, Shutdown blocks until all goroutines have terminated. For that - // to happen, the close channel(s) that they were started with must be closed, - // either before Shutdown gets called or while it is waiting. - // - // Shutdown may be called multiple times, even concurrently. All such calls will - // block until all goroutines have terminated. - Shutdown() -} - -// TweakListOptionsFunc defines the signature of a helper function -// that wants to provide more listing options to API -type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go b/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go deleted file mode 100644 index c39cbee92..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/interface.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamiclister - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" -) - -// Lister helps list resources. -type Lister interface { - // List lists all resources in the indexer. - List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) - // Get retrieves a resource from the indexer with the given name - Get(name string) (*unstructured.Unstructured, error) - // Namespace returns an object that can list and get resources in a given namespace. - Namespace(namespace string) NamespaceLister -} - -// NamespaceLister helps list and get resources. -type NamespaceLister interface { - // List lists all resources in the indexer for a given namespace. - List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) - // Get retrieves a resource from the indexer for a given namespace and name. - Get(name string) (*unstructured.Unstructured, error) -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go b/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go deleted file mode 100644 index a50fc471e..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/lister.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamiclister - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -var _ Lister = &dynamicLister{} -var _ NamespaceLister = &dynamicNamespaceLister{} - -// dynamicLister implements the Lister interface. -type dynamicLister struct { - indexer cache.Indexer - gvr schema.GroupVersionResource -} - -// New returns a new Lister. -func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { - return &dynamicLister{indexer: indexer, gvr: gvr} -} - -// List lists all resources in the indexer. -func (l *dynamicLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { - err = cache.ListAll(l.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*unstructured.Unstructured)) - }) - return ret, err -} - -// Get retrieves a resource from the indexer with the given name -func (l *dynamicLister) Get(name string) (*unstructured.Unstructured, error) { - obj, exists, err := l.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(l.gvr.GroupResource(), name) - } - return obj.(*unstructured.Unstructured), nil -} - -// Namespace returns an object that can list and get resources from a given namespace. -func (l *dynamicLister) Namespace(namespace string) NamespaceLister { - return &dynamicNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} -} - -// dynamicNamespaceLister implements the NamespaceLister interface. -type dynamicNamespaceLister struct { - indexer cache.Indexer - namespace string - gvr schema.GroupVersionResource -} - -// List lists all resources in the indexer for a given namespace. -func (l *dynamicNamespaceLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { - err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*unstructured.Unstructured)) - }) - return ret, err -} - -// Get retrieves a resource from the indexer for a given namespace and name. -func (l *dynamicNamespaceLister) Get(name string) (*unstructured.Unstructured, error) { - obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(l.gvr.GroupResource(), name) - } - return obj.(*unstructured.Unstructured), nil -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go b/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go deleted file mode 100644 index 92a5f54af..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/dynamiclister/shim.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dynamiclister - -import ( - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/cache" -) - -var _ cache.GenericLister = &dynamicListerShim{} -var _ cache.GenericNamespaceLister = &dynamicNamespaceListerShim{} - -// dynamicListerShim implements the cache.GenericLister interface. -type dynamicListerShim struct { - lister Lister -} - -// NewRuntimeObjectShim returns a new shim for Lister. -// It wraps Lister so that it implements cache.GenericLister interface -func NewRuntimeObjectShim(lister Lister) cache.GenericLister { - return &dynamicListerShim{lister: lister} -} - -// List will return all objects across namespaces -func (s *dynamicListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { - objs, err := s.lister.List(selector) - if err != nil { - return nil, err - } - - ret = make([]runtime.Object, len(objs)) - for index, obj := range objs { - ret[index] = obj - } - return ret, err -} - -// Get will attempt to retrieve assuming that name==key -func (s *dynamicListerShim) Get(name string) (runtime.Object, error) { - return s.lister.Get(name) -} - -func (s *dynamicListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { - return &dynamicNamespaceListerShim{ - namespaceLister: s.lister.Namespace(namespace), - } -} - -// dynamicNamespaceListerShim implements the NamespaceLister interface. -// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface -type dynamicNamespaceListerShim struct { - namespaceLister NamespaceLister -} - -// List will return all objects in this namespace -func (ns *dynamicNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { - objs, err := ns.namespaceLister.List(selector) - if err != nil { - return nil, err - } - - ret = make([]runtime.Object, len(objs)) - for index, obj := range objs { - ret[index] = obj - } - return ret, err -} - -// Get will attempt to retrieve by namespace and name -func (ns *dynamicNamespaceListerShim) Get(name string) (runtime.Object, error) { - return ns.namespaceLister.Get(name) -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/fake/simple.go b/constraint/vendor/k8s.io/client-go/dynamic/fake/simple.go deleted file mode 100644 index 5d0a6f69f..000000000 --- a/constraint/vendor/k8s.io/client-go/dynamic/fake/simple.go +++ /dev/null @@ -1,539 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/testing" -) - -func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *FakeDynamicClient { - unstructuredScheme := runtime.NewScheme() - for gvk := range scheme.AllKnownTypes() { - if unstructuredScheme.Recognizes(gvk) { - continue - } - if strings.HasSuffix(gvk.Kind, "List") { - unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{}) - continue - } - unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{}) - } - - objects, err := convertObjectsToUnstructured(scheme, objects) - if err != nil { - panic(err) - } - - for _, obj := range objects { - gvk := obj.GetObjectKind().GroupVersionKind() - if !unstructuredScheme.Recognizes(gvk) { - unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{}) - } - gvk.Kind += "List" - if !unstructuredScheme.Recognizes(gvk) { - unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{}) - } - } - - return NewSimpleDynamicClientWithCustomListKinds(unstructuredScheme, nil, objects...) -} - -// NewSimpleDynamicClientWithCustomListKinds try not to use this. In general you want to have the scheme have the List types registered -// and allow the default guessing for resources match. Sometimes that doesn't work, so you can specify a custom mapping here. -func NewSimpleDynamicClientWithCustomListKinds(scheme *runtime.Scheme, gvrToListKind map[schema.GroupVersionResource]string, objects ...runtime.Object) *FakeDynamicClient { - // In order to use List with this client, you have to have your lists registered so that the object tracker will find them - // in the scheme to support the t.scheme.New(listGVK) call when it's building the return value. - // Since the base fake client needs the listGVK passed through the action (in cases where there are no instances, it - // cannot look up the actual hits), we need to know a mapping of GVR to listGVK here. For GETs and other types of calls, - // there is no return value that contains a GVK, so it doesn't have to know the mapping in advance. - - // first we attempt to invert known List types from the scheme to auto guess the resource with unsafe guesses - // this covers common usage of registering types in scheme and passing them - completeGVRToListKind := map[schema.GroupVersionResource]string{} - for listGVK := range scheme.AllKnownTypes() { - if !strings.HasSuffix(listGVK.Kind, "List") { - continue - } - nonListGVK := listGVK.GroupVersion().WithKind(listGVK.Kind[:len(listGVK.Kind)-4]) - plural, _ := meta.UnsafeGuessKindToResource(nonListGVK) - completeGVRToListKind[plural] = listGVK.Kind - } - - for gvr, listKind := range gvrToListKind { - if !strings.HasSuffix(listKind, "List") { - panic("coding error, listGVK must end in List or this fake client doesn't work right") - } - listGVK := gvr.GroupVersion().WithKind(listKind) - - // if we already have this type registered, just skip it - if _, err := scheme.New(listGVK); err == nil { - completeGVRToListKind[gvr] = listKind - continue - } - - scheme.AddKnownTypeWithName(listGVK, &unstructured.UnstructuredList{}) - completeGVRToListKind[gvr] = listKind - } - - codecs := serializer.NewCodecFactory(scheme) - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &FakeDynamicClient{scheme: scheme, gvrToListKind: completeGVRToListKind, tracker: o} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type FakeDynamicClient struct { - testing.Fake - scheme *runtime.Scheme - gvrToListKind map[schema.GroupVersionResource]string - tracker testing.ObjectTracker -} - -type dynamicResourceClient struct { - client *FakeDynamicClient - namespace string - resource schema.GroupVersionResource - listKind string -} - -var ( - _ dynamic.Interface = &FakeDynamicClient{} - _ testing.FakeClient = &FakeDynamicClient{} -) - -func (c *FakeDynamicClient) Tracker() testing.ObjectTracker { - return c.tracker -} - -func (c *FakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { - return &dynamicResourceClient{client: c, resource: resource, listKind: c.gvrToListKind[resource]} -} - -func (c *dynamicResourceClient) Namespace(ns string) dynamic.ResourceInterface { - ret := *c - ret.namespace = ns - return &ret -} - -func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { - var uncastRet runtime.Object - var err error - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootCreateAction(c.resource, obj), obj) - - case len(c.namespace) == 0 && len(subresources) > 0: - var accessor metav1.Object // avoid shadowing err - accessor, err = meta.Accessor(obj) - if err != nil { - return nil, err - } - name := accessor.GetName() - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), obj), obj) - - case len(c.namespace) > 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewCreateAction(c.resource, c.namespace, obj), obj) - - case len(c.namespace) > 0 && len(subresources) > 0: - var accessor metav1.Object // avoid shadowing err - accessor, err = meta.Accessor(obj) - if err != nil { - return nil, err - } - name := accessor.GetName() - uncastRet, err = c.client.Fake. - Invokes(testing.NewCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), c.namespace, obj), obj) - - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, err -} - -func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { - var uncastRet runtime.Object - var err error - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootUpdateAction(c.resource, obj), obj) - - case len(c.namespace) == 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), obj), obj) - - case len(c.namespace) > 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewUpdateAction(c.resource, c.namespace, obj), obj) - - case len(c.namespace) > 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, obj), obj) - - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, err -} - -func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { - var uncastRet runtime.Object - var err error - switch { - case len(c.namespace) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(c.resource, "status", obj), obj) - - case len(c.namespace) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewUpdateSubresourceAction(c.resource, "status", c.namespace, obj), obj) - - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, err -} - -func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { - var err error - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - _, err = c.client.Fake. - Invokes(testing.NewRootDeleteAction(c.resource, name), &metav1.Status{Status: "dynamic delete fail"}) - - case len(c.namespace) == 0 && len(subresources) > 0: - _, err = c.client.Fake. - Invokes(testing.NewRootDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic delete fail"}) - - case len(c.namespace) > 0 && len(subresources) == 0: - _, err = c.client.Fake. - Invokes(testing.NewDeleteAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) - - case len(c.namespace) > 0 && len(subresources) > 0: - _, err = c.client.Fake. - Invokes(testing.NewDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, name), &metav1.Status{Status: "dynamic delete fail"}) - } - - return err -} - -func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var err error - switch { - case len(c.namespace) == 0: - action := testing.NewRootDeleteCollectionAction(c.resource, listOptions) - _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) - - case len(c.namespace) > 0: - action := testing.NewDeleteCollectionAction(c.resource, c.namespace, listOptions) - _, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"}) - - } - - return err -} - -func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { - var uncastRet runtime.Object - var err error - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootGetAction(c.resource, name), &metav1.Status{Status: "dynamic get fail"}) - - case len(c.namespace) == 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootGetSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) - - case len(c.namespace) > 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewGetAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic get fail"}) - - case len(c.namespace) > 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewGetSubresourceAction(c.resource, c.namespace, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"}) - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, err -} - -func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { - if len(c.listKind) == 0 { - panic(fmt.Sprintf("coding error: you must register resource to list kind for every resource you're going to LIST when creating the client. See NewSimpleDynamicClientWithCustomListKinds or register the list into the scheme: %v out of %v", c.resource, c.client.gvrToListKind)) - } - listGVK := c.resource.GroupVersion().WithKind(c.listKind) - listForFakeClientGVK := c.resource.GroupVersion().WithKind(c.listKind[:len(c.listKind)-4]) /*base library appends List*/ - - var obj runtime.Object - var err error - switch { - case len(c.namespace) == 0: - obj, err = c.client.Fake. - Invokes(testing.NewRootListAction(c.resource, listForFakeClientGVK, opts), &metav1.Status{Status: "dynamic list fail"}) - - case len(c.namespace) > 0: - obj, err = c.client.Fake. - Invokes(testing.NewListAction(c.resource, listForFakeClientGVK, c.namespace, opts), &metav1.Status{Status: "dynamic list fail"}) - - } - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - - retUnstructured := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(obj, retUnstructured, nil); err != nil { - return nil, err - } - entireList, err := retUnstructured.ToList() - if err != nil { - return nil, err - } - - list := &unstructured.UnstructuredList{} - list.SetRemainingItemCount(entireList.GetRemainingItemCount()) - list.SetResourceVersion(entireList.GetResourceVersion()) - list.SetContinue(entireList.GetContinue()) - list.GetObjectKind().SetGroupVersionKind(listGVK) - for i := range entireList.Items { - item := &entireList.Items[i] - metadata, err := meta.Accessor(item) - if err != nil { - return nil, err - } - if label.Matches(labels.Set(metadata.GetLabels())) { - list.Items = append(list.Items, *item) - } - } - return list, nil -} - -func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - switch { - case len(c.namespace) == 0: - return c.client.Fake. - InvokesWatch(testing.NewRootWatchAction(c.resource, opts)) - - case len(c.namespace) > 0: - return c.client.Fake. - InvokesWatch(testing.NewWatchAction(c.resource, c.namespace, opts)) - - } - - panic("math broke") -} - -// TODO: opts are currently ignored. -func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { - var uncastRet runtime.Object - var err error - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootPatchAction(c.resource, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) == 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) > 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewPatchAction(c.resource, c.namespace, name, pt, data), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) > 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"}) - - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, err -} - -// TODO: opts are currently ignored. -func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - var uncastRet runtime.Object - switch { - case len(c.namespace) == 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootPatchAction(c.resource, name, types.ApplyPatchType, outBytes), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) == 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, types.ApplyPatchType, outBytes, subresources...), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) > 0 && len(subresources) == 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewPatchAction(c.resource, c.namespace, name, types.ApplyPatchType, outBytes), &metav1.Status{Status: "dynamic patch fail"}) - - case len(c.namespace) > 0 && len(subresources) > 0: - uncastRet, err = c.client.Fake. - Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, types.ApplyPatchType, outBytes, subresources...), &metav1.Status{Status: "dynamic patch fail"}) - - } - - if err != nil { - return nil, err - } - if uncastRet == nil { - return nil, err - } - - ret := &unstructured.Unstructured{} - if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil { - return nil, err - } - return ret, nil -} - -func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) { - return c.Apply(ctx, name, obj, options, "status") -} - -func convertObjectsToUnstructured(s *runtime.Scheme, objs []runtime.Object) ([]runtime.Object, error) { - ul := make([]runtime.Object, 0, len(objs)) - - for _, obj := range objs { - u, err := convertToUnstructured(s, obj) - if err != nil { - return nil, err - } - - ul = append(ul, u) - } - return ul, nil -} - -func convertToUnstructured(s *runtime.Scheme, obj runtime.Object) (runtime.Object, error) { - var ( - err error - u unstructured.Unstructured - ) - - u.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, fmt.Errorf("failed to convert to unstructured: %w", err) - } - - gvk := u.GroupVersionKind() - if gvk.Group == "" || gvk.Kind == "" { - gvks, _, err := s.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("failed to convert to unstructured - unable to get GVK %w", err) - } - apiv, k := gvks[0].ToAPIVersionAndKind() - u.SetAPIVersion(apiv) - u.SetKind(k) - } - return &u, nil -} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/scheme.go b/constraint/vendor/k8s.io/client-go/dynamic/scheme.go index 3168c872c..28316f1dd 100644 --- a/constraint/vendor/k8s.io/client-go/dynamic/scheme.go +++ b/constraint/vendor/k8s.io/client-go/dynamic/scheme.go @@ -21,52 +21,66 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/client-go/features" ) -var watchScheme = runtime.NewScheme() var basicScheme = runtime.NewScheme() -var deleteScheme = runtime.NewScheme() var parameterScheme = runtime.NewScheme() -var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) var versionV1 = schema.GroupVersion{Version: "v1"} func init() { - metav1.AddToGroupVersion(watchScheme, versionV1) metav1.AddToGroupVersion(basicScheme, versionV1) metav1.AddToGroupVersion(parameterScheme, versionV1) - metav1.AddToGroupVersion(deleteScheme, versionV1) } -// basicNegotiatedSerializer is used to handle discovery and error handling serialization -type basicNegotiatedSerializer struct{} - -func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { - return []runtime.SerializerInfo{ +func newBasicNegotiatedSerializer() basicNegotiatedSerializer { + supportedMediaTypes := []runtime.SerializerInfo{ { MediaType: "application/json", MediaTypeType: "application", MediaTypeSubType: "json", EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false), - PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true), + Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, json.SerializerOptions{}), + PrettySerializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, json.SerializerOptions{Pretty: true}), StreamSerializer: &runtime.StreamSerializerInfo{ EncodesAsText: true, - Serializer: json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false), + Serializer: json.NewSerializerWithOptions(json.DefaultMetaFactory, basicScheme, basicScheme, json.SerializerOptions{}), Framer: json.Framer, }, }, } + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + supportedMediaTypes = append(supportedMediaTypes, runtime.SerializerInfo{ + MediaType: "application/cbor", + MediaTypeType: "application", + MediaTypeSubType: "cbor", + Serializer: cbor.NewSerializer(unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}), + StreamSerializer: &runtime.StreamSerializerInfo{ + Serializer: cbor.NewSerializer(basicScheme, basicScheme, cbor.Transcode(false)), + Framer: cbor.NewFramer(), + }, + }) + } + return basicNegotiatedSerializer{supportedMediaTypes: supportedMediaTypes} +} + +type basicNegotiatedSerializer struct { + supportedMediaTypes []runtime.SerializerInfo +} + +func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return s.supportedMediaTypes } func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { return runtime.WithVersionEncoder{ Version: gv, Encoder: encoder, - ObjectTyper: unstructuredTyper{basicScheme}, + ObjectTyper: permissiveTyper{basicScheme}, } } @@ -106,3 +120,25 @@ func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersio func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool { return true } + +// The dynamic client has historically accepted Unstructured objects with missing or empty +// apiVersion and/or kind as arguments to its write request methods. This typer will return the type +// of a runtime.Unstructured with no error, even if the type is missing or empty. +type permissiveTyper struct { + nested runtime.ObjectTyper +} + +func (t permissiveTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + kinds, unversioned, err := t.nested.ObjectKinds(obj) + if err == nil { + return kinds, unversioned, nil + } + if _, ok := obj.(runtime.Unstructured); ok { + return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil + } + return nil, false, err +} + +func (t permissiveTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return true +} diff --git a/constraint/vendor/k8s.io/client-go/dynamic/simple.go b/constraint/vendor/k8s.io/client-go/dynamic/simple.go index 4b5485953..62b2999ca 100644 --- a/constraint/vendor/k8s.io/client-go/dynamic/simple.go +++ b/constraint/vendor/k8s.io/client-go/dynamic/simple.go @@ -20,15 +20,20 @@ import ( "context" "fmt" "net/http" + "time" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/features" "k8s.io/client-go/rest" + "k8s.io/client-go/util/apply" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" ) type DynamicClient struct { @@ -41,9 +46,17 @@ var _ Interface = &DynamicClient{} // appropriate dynamic client defaults set. func ConfigFor(inConfig *rest.Config) *rest.Config { config := rest.CopyConfig(inConfig) - config.AcceptContentTypes = "application/json" + config.ContentType = "application/json" - config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types + config.AcceptContentTypes = "application/json" + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + config.AcceptContentTypes = "application/json;q=0.9,application/cbor;q=1" + if features.FeatureGates().Enabled(features.ClientsPreferCBOR) { + config.ContentType = "application/cbor" + } + } + + config.NegotiatedSerializer = newBasicNegotiatedSerializer() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() } @@ -82,11 +95,10 @@ func NewForConfig(inConfig *rest.Config) (*DynamicClient, error) { // Note the http client provided takes precedence over the configured transport values. func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (*DynamicClient, error) { config := ConfigFor(inConfig) - // for serializing the options - config.GroupVersion = &schema.GroupVersion{} + config.GroupVersion = nil config.APIPath = "/if-you-see-this-search-for-the-break" - restClient, err := rest.RESTClientForConfigAndClient(config, h) + restClient, err := rest.UnversionedRESTClientForConfigAndClient(config, h) if err != nil { return nil, err } @@ -110,10 +122,6 @@ func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface { } func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } name := "" if len(subresources) > 0 { accessor, err := meta.Accessor(obj) @@ -129,26 +137,17 @@ func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Un return nil, err } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Post(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { @@ -163,31 +162,18 @@ func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Un if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Put(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) { @@ -202,31 +188,18 @@ func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructu if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Put(). AbsPath(append(c.makeURLSegments(name), "status")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(outBytes). + Body(obj). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err - } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { @@ -236,16 +209,11 @@ func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts me if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return err } - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } result := c.client.client. Delete(). AbsPath(append(c.makeURLSegments(name), subresources...)...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). + Body(&opts). Do(ctx) return result.Error() } @@ -255,16 +223,10 @@ func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav return err } - deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) - if err != nil { - return err - } - result := c.client.client. Delete(). AbsPath(c.makeURLSegments("")...). - SetHeader("Content-Type", runtime.ContentTypeJSON). - Body(deleteOptionsByte). + Body(&opts). SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). Do(ctx) return result.Error() @@ -277,46 +239,69 @@ func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { + var out unstructured.Unstructured + if err := c.client.client. + Get(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - retBytes, err := result.Raw() - if err != nil { - return nil, err + return &out, nil +} + +func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for %v, falling back to the standard LIST semantics, err = %v", c.resource, watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %v ended with an error, falling back to the standard LIST semantics, err = %v", c.resource, err) } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) } - return uncastObj.(*unstructured.Unstructured), nil + return result, err } -func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { +func (c *dynamicResourceClient) list(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { if err := validateNamespaceWithOptionalName(c.namespace); err != nil { return nil, err } - result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { + var out unstructured.UnstructuredList + if err := c.client.client. + Get(). + AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { + return &out, nil +} + +// watchList establishes a watch stream with the server and returns an unstructured list. +func (c *dynamicResourceClient) watchList(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { + if err := validateNamespaceWithOptionalName(c.namespace); err != nil { return nil, err } - if list, ok := uncastObj.(*unstructured.UnstructuredList); ok { - return list, nil - } - list, err := uncastObj.(*unstructured.Unstructured).ToList() - if err != nil { - return nil, err + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - return list, nil + + result := &unstructured.UnstructuredList{} + err := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err } func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { @@ -336,24 +321,16 @@ func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - result := c.client.client. + var out unstructured.Unstructured + if err := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). Body(data). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() - if err != nil { + Do(ctx).Into(&out); err != nil { return nil, err } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { - return nil, err - } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { @@ -363,10 +340,6 @@ func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *uns if err := validateNamespaceWithOptionalName(c.namespace, name); err != nil { return nil, err } - outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - if err != nil { - return nil, err - } accessor, err := meta.Accessor(obj) if err != nil { return nil, err @@ -378,25 +351,21 @@ func (c *dynamicResourceClient) Apply(ctx context.Context, name string, obj *uns } patchOpts := opts.ToPatchOptions() - result := c.client.client. - Patch(types.ApplyPatchType). - AbsPath(append(c.makeURLSegments(name), subresources...)...). - Body(outBytes). - SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). - Do(ctx) - if err := result.Error(); err != nil { - return nil, err - } - retBytes, err := result.Raw() + request, err := apply.NewRequest(c.client.client, obj.Object) if err != nil { return nil, err } - uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes) - if err != nil { + + var out unstructured.Unstructured + if err := request. + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SpecificallyVersionedParams(&patchOpts, dynamicParameterCodec, versionV1). + Do(ctx).Into(&out); err != nil { return nil, err } - return uncastObj.(*unstructured.Unstructured), nil + return &out, nil } + func (c *dynamicResourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, opts metav1.ApplyOptions) (*unstructured.Unstructured, error) { return c.Apply(ctx, name, obj, opts, "status") } diff --git a/constraint/vendor/k8s.io/client-go/features/envvar.go b/constraint/vendor/k8s.io/client-go/features/envvar.go index f9edfdf0d..8c3f887dc 100644 --- a/constraint/vendor/k8s.io/client-go/features/envvar.go +++ b/constraint/vendor/k8s.io/client-go/features/envvar.go @@ -47,6 +47,10 @@ var _ Gates = &envVarFeatureGates{} // // Please note that environmental variables can only be set to the boolean value. // Incorrect values will be ignored and logged. +// +// Features can also be set directly via the Set method. +// In that case, these features take precedence over +// features set via environmental variables. func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates { known := map[Feature]FeatureSpec{} for name, spec := range features { @@ -57,7 +61,8 @@ func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates callSiteName: naming.GetNameFromCallsite(internalPackages...), known: known, } - fg.enabled.Store(map[Feature]bool{}) + fg.enabledViaEnvVar.Store(map[Feature]bool{}) + fg.enabledViaSetMethod = map[Feature]bool{} return fg } @@ -74,17 +79,34 @@ type envVarFeatureGates struct { // known holds known feature gates known map[Feature]FeatureSpec - // enabled holds a map[Feature]bool + // enabledViaEnvVar holds a map[Feature]bool // with values explicitly set via env var - enabled atomic.Value + enabledViaEnvVar atomic.Value + + // lockEnabledViaSetMethod protects enabledViaSetMethod + lockEnabledViaSetMethod sync.RWMutex + + // enabledViaSetMethod holds values explicitly set + // via Set method, features stored in this map take + // precedence over features stored in enabledViaEnvVar + enabledViaSetMethod map[Feature]bool // readEnvVars holds the boolean value which // indicates whether readEnvVarsOnce has been called. readEnvVars atomic.Bool } -// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. func (f *envVarFeatureGates) Enabled(key Feature) bool { + if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok { + // ensue that the state of all known features + // is loaded from environment variables + // on the first call to Enabled method. + if !f.hasAlreadyReadEnvVar() { + _ = f.getEnabledMapFromEnvVar() + } + return v + } if v, ok := f.getEnabledMapFromEnvVar()[key]; ok { return v } @@ -94,6 +116,26 @@ func (f *envVarFeatureGates) Enabled(key Feature) bool { panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName)) } +// Set sets the given feature to the given value. +// +// Features set via this method take precedence over +// the features set via environment variables. +func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error { + feature, ok := f.known[featureName] + if !ok { + return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName) + } + if feature.LockToDefault && feature.Default != featureValue { + return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default) + } + + f.lockEnabledViaSetMethod.Lock() + defer f.lockEnabledViaSetMethod.Unlock() + f.enabledViaSetMethod[featureName] = featureValue + + return nil +} + // getEnabledMapFromEnvVar will fill the enabled map on the first call. // This is the only time a known feature can be set to a value // read from the corresponding environmental variable. @@ -119,7 +161,7 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { featureGatesState[feature] = boolVal } } - f.enabled.Store(featureGatesState) + f.enabledViaEnvVar.Store(featureGatesState) f.readEnvVars.Store(true) for feature, featureSpec := range f.known { @@ -130,7 +172,15 @@ func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool { klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default) } }) - return f.enabled.Load().(map[Feature]bool) + return f.enabledViaEnvVar.Load().(map[Feature]bool) +} + +func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) { + f.lockEnabledViaSetMethod.RLock() + defer f.lockEnabledViaSetMethod.RUnlock() + + value, found := f.enabledViaSetMethod[key] + return value, found } func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool { diff --git a/constraint/vendor/k8s.io/client-go/features/features.go b/constraint/vendor/k8s.io/client-go/features/features.go index afb67f509..5ccdcc55f 100644 --- a/constraint/vendor/k8s.io/client-go/features/features.go +++ b/constraint/vendor/k8s.io/client-go/features/features.go @@ -18,9 +18,9 @@ package features import ( "errors" + "sync/atomic" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "sync/atomic" ) // NOTE: types Feature, FeatureSpec, prerelease (and its values) diff --git a/constraint/vendor/k8s.io/client-go/features/known_features.go b/constraint/vendor/k8s.io/client-go/features/known_features.go index 0c972a46f..a74f6a833 100644 --- a/constraint/vendor/k8s.io/client-go/features/known_features.go +++ b/constraint/vendor/k8s.io/client-go/features/known_features.go @@ -28,6 +28,31 @@ const ( // of code conflicts because changes are more likely to be scattered // across the file. + // owner: @benluddy + // kep: https://kep.k8s.io/4222 + // alpha: 1.32 + // + // If disabled, clients configured to accept "application/cbor" will instead accept + // "application/json" with the same relative preference, and clients configured to write + // "application/cbor" or "application/apply-patch+cbor" will instead write + // "application/json" or "application/apply-patch+yaml", respectively. + ClientsAllowCBOR Feature = "ClientsAllowCBOR" + + // owner: @benluddy + // kep: https://kep.k8s.io/4222 + // alpha: 1.32 + // + // If enabled, and only if ClientsAllowCBOR is also enabled, the default request content + // type (if not explicitly configured) and the dynamic client's request content type both + // become "application/cbor" instead of "application/json". The default content type for + // apply patch requests becomes "application/apply-patch+cbor" instead of + // "application/apply-patch+yaml". + ClientsPreferCBOR Feature = "ClientsPreferCBOR" + + // owner: @nilekhc + // alpha: v1.30 + InformerResourceVersion Feature = "InformerResourceVersion" + // owner: @p0lyn0mial // beta: v1.30 // @@ -37,10 +62,6 @@ const ( // The feature is disabled in Beta by default because // it will only be turned on for selected control plane component(s). WatchListClient Feature = "WatchListClient" - - // owner: @nilekhc - // alpha: v1.30 - InformerResourceVersion Feature = "InformerResourceVersion" ) // defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. @@ -49,6 +70,8 @@ const ( // After registering with the binary, the features are, by default, controllable using environment variables. // For more details, please see envVarFeatureGates implementation. var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{ - WatchListClient: {Default: false, PreRelease: Beta}, + ClientsAllowCBOR: {Default: false, PreRelease: Alpha}, + ClientsPreferCBOR: {Default: false, PreRelease: Alpha}, InformerResourceVersion: {Default: false, PreRelease: Alpha}, + WatchListClient: {Default: false, PreRelease: Beta}, } diff --git a/constraint/vendor/k8s.io/client-go/gentype/fake.go b/constraint/vendor/k8s.io/client-go/gentype/fake.go new file mode 100644 index 000000000..bcb9ca27f --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/gentype/fake.go @@ -0,0 +1,304 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + json "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClient represents a fake client +type FakeClient[T objectWithMeta] struct { + *testing.Fake + ns string + resource schema.GroupVersionResource + kind schema.GroupVersionKind + newObject func() T +} + +// FakeClientWithList represents a fake client with support for lists. +type FakeClientWithList[T objectWithMeta, L runtime.Object] struct { + *FakeClient[T] + alsoFakeLister[T, L] +} + +// FakeClientWithApply represents a fake client with support for apply declarative configurations. +type FakeClientWithApply[T objectWithMeta, C namedObject] struct { + *FakeClient[T] + alsoFakeApplier[T, C] +} + +// FakeClientWithListAndApply represents a fake client with support for lists and apply declarative configurations. +type FakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *FakeClient[T] + alsoFakeLister[T, L] + alsoFakeApplier[T, C] +} + +// Helper types for composition +type alsoFakeLister[T objectWithMeta, L runtime.Object] struct { + client *FakeClient[T] + newList func() L + copyListMeta func(L, L) + getItems func(L) []T + setItems func(L, []T) +} + +type alsoFakeApplier[T objectWithMeta, C namedObject] struct { + client *FakeClient[T] +} + +// NewFakeClient constructs a fake client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewFakeClient[T objectWithMeta]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, +) *FakeClient[T] { + return &FakeClient[T]{fake, namespace, resource, kind, emptyObjectCreator} +} + +// NewFakeClientWithList constructs a namespaced client with support for lists. +func NewFakeClientWithList[T objectWithMeta, L runtime.Object]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, + emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T), +) *FakeClientWithList[T, L] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithList[T, L]{ + fakeClient, + alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter}, + } +} + +// NewFakeClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewFakeClientWithApply[T objectWithMeta, C namedObject]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, +) *FakeClientWithApply[T, C] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithApply[T, C]{ + fakeClient, + alsoFakeApplier[T, C]{fakeClient}, + } +} + +// NewFakeClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewFakeClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + fake *testing.Fake, namespace string, resource schema.GroupVersionResource, kind schema.GroupVersionKind, emptyObjectCreator func() T, + emptyListCreator func() L, listMetaCopier func(L, L), itemGetter func(L) []T, itemSetter func(L, []T), +) *FakeClientWithListAndApply[T, L, C] { + fakeClient := NewFakeClient[T](fake, namespace, resource, kind, emptyObjectCreator) + return &FakeClientWithListAndApply[T, L, C]{ + fakeClient, + alsoFakeLister[T, L]{fakeClient, emptyListCreator, listMetaCopier, itemGetter, itemSetter}, + alsoFakeApplier[T, C]{fakeClient}, + } +} + +// Get takes name of a resource, and returns the corresponding object, and an error if there is any. +func (c *FakeClient[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + emptyResult := c.newObject() + + obj, err := c.Fake. + Invokes(testing.NewGetActionWithOptions(c.resource, c.ns, name, options), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +func ToPointerSlice[T any](src []T) []*T { + if src == nil { + return nil + } + result := make([]*T, len(src)) + for i := range src { + result[i] = &src[i] + } + return result +} + +func FromPointerSlice[T any](src []*T) []T { + if src == nil { + return nil + } + result := make([]T, len(src)) + for i := range src { + result[i] = *src[i] + } + return result +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoFakeLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + emptyResult := l.newList() + obj, err := l.client.Fake. + Invokes(testing.NewListActionWithOptions(l.client.resource, l.client.kind, l.client.ns, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + // Everything matches + return obj.(L), nil + } + list := l.newList() + l.copyListMeta(list, obj.(L)) + var items []T + for _, item := range l.getItems(obj.(L)) { + itemMeta, err := meta.Accessor(item) + if err != nil { + // No ObjectMeta, nothing can match + continue + } + if label.Matches(labels.Set(itemMeta.GetLabels())) { + items = append(items, item) + } + } + l.setItems(list, items) + return list, err +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *FakeClient[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchActionWithOptions(c.resource, c.ns, opts)) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *FakeClient[T]) Create(ctx context.Context, resource T, opts metav1.CreateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewCreateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *FakeClient[T]) Update(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewUpdateActionWithOptions(c.resource, c.ns, resource, opts), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// UpdateStatus updates the resource's status and returns the updated resource. +func (c *FakeClient[T]) UpdateStatus(ctx context.Context, resource T, opts metav1.UpdateOptions) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceActionWithOptions(c.resource, "status", c.ns, resource, opts), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Delete deletes the resource matching the given name. Returns an error if one occurs. +func (c *FakeClient[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(c.resource, c.ns, name, opts), c.newObject()) + return err +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoFakeLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + _, err := l.client.Fake. + Invokes(testing.NewDeleteCollectionActionWithOptions(l.client.resource, l.client.ns, opts, listOpts), l.newList()) + return err +} + +// Patch applies the patch and returns the patched resource. +func (c *FakeClient[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result T, err error) { + emptyResult := c.newObject() + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(c.resource, c.ns, name, pt, data, opts, subresources...), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoFakeApplier[T, C]) Apply(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) { + if configuration == *new(C) { + return *new(T), fmt.Errorf("configuration provided to Apply must not be nil") + } + data, err := json.Marshal(configuration) + if err != nil { + return *new(T), err + } + name := configuration.GetName() + if name == nil { + return *new(T), fmt.Errorf("configuration.Name must be provided to Apply") + } + emptyResult := a.client.newObject() + obj, err := a.client.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +// ApplyStatus applies the given apply declarative configuration to the resource's status and returns the updated resource. +func (a *alsoFakeApplier[T, C]) ApplyStatus(ctx context.Context, configuration C, opts metav1.ApplyOptions) (result T, err error) { + if configuration == *new(C) { + return *new(T), fmt.Errorf("configuration provided to Apply must not be nil") + } + data, err := json.Marshal(configuration) + if err != nil { + return *new(T), err + } + name := configuration.GetName() + if name == nil { + return *new(T), fmt.Errorf("configuration.Name must be provided to Apply") + } + emptyResult := a.client.newObject() + obj, err := a.client.Fake. + Invokes(testing.NewPatchSubresourceActionWithOptions(a.client.resource, a.client.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) + + if obj == nil { + return emptyResult, err + } + return obj.(T), err +} + +func (c *FakeClient[T]) Namespace() string { + return c.ns +} + +func (c *FakeClient[T]) Kind() schema.GroupVersionKind { + return c.kind +} + +func (c *FakeClient[T]) Resource() schema.GroupVersionResource { + return c.resource +} diff --git a/constraint/vendor/k8s.io/client-go/gentype/type.go b/constraint/vendor/k8s.io/client-go/gentype/type.go new file mode 100644 index 000000000..e6ed6aae7 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/gentype/type.go @@ -0,0 +1,387 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gentype + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/util/apply" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" + "k8s.io/klog/v2" +) + +// objectWithMeta matches objects implementing both runtime.Object and metav1.Object. +type objectWithMeta interface { + runtime.Object + metav1.Object +} + +// namedObject matches comparable objects implementing GetName(); it is intended for use with apply declarative configurations. +type namedObject interface { + comparable + GetName() *string +} + +// Client represents a client, optionally namespaced, with no support for lists or apply declarative configurations. +type Client[T objectWithMeta] struct { + resource string + client rest.Interface + namespace string // "" for non-namespaced clients + newObject func() T + parameterCodec runtime.ParameterCodec + + prefersProtobuf bool +} + +// ClientWithList represents a client with support for lists. +type ClientWithList[T objectWithMeta, L runtime.Object] struct { + *Client[T] + alsoLister[T, L] +} + +// ClientWithApply represents a client with support for apply declarative configurations. +type ClientWithApply[T objectWithMeta, C namedObject] struct { + *Client[T] + alsoApplier[T, C] +} + +// ClientWithListAndApply represents a client with support for lists and apply declarative configurations. +type ClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject] struct { + *Client[T] + alsoLister[T, L] + alsoApplier[T, C] +} + +// Helper types for composition +type alsoLister[T objectWithMeta, L runtime.Object] struct { + client *Client[T] + newList func() L +} + +type alsoApplier[T objectWithMeta, C namedObject] struct { + client *Client[T] +} + +type Option[T objectWithMeta] func(*Client[T]) + +func PrefersProtobuf[T objectWithMeta]() Option[T] { + return func(c *Client[T]) { c.prefersProtobuf = true } +} + +// NewClient constructs a client, namespaced or not, with no support for lists or apply. +// Non-namespaced clients are constructed by passing an empty namespace (""). +func NewClient[T objectWithMeta]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + options ...Option[T], +) *Client[T] { + c := &Client[T]{ + resource: resource, + client: client, + parameterCodec: parameterCodec, + namespace: namespace, + newObject: emptyObjectCreator, + } + for _, option := range options { + option(c) + } + return c +} + +// NewClientWithList constructs a namespaced client with support for lists. +func NewClientWithList[T objectWithMeta, L runtime.Object]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, options ...Option[T], +) *ClientWithList[T, L] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) + return &ClientWithList[T, L]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + } +} + +// NewClientWithApply constructs a namespaced client with support for apply declarative configurations. +func NewClientWithApply[T objectWithMeta, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + options ...Option[T], +) *ClientWithApply[T, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) + return &ClientWithApply[T, C]{ + typeClient, + alsoApplier[T, C]{typeClient}, + } +} + +// NewClientWithListAndApply constructs a client with support for lists and applying declarative configurations. +func NewClientWithListAndApply[T objectWithMeta, L runtime.Object, C namedObject]( + resource string, client rest.Interface, parameterCodec runtime.ParameterCodec, namespace string, emptyObjectCreator func() T, + emptyListCreator func() L, options ...Option[T], +) *ClientWithListAndApply[T, L, C] { + typeClient := NewClient[T](resource, client, parameterCodec, namespace, emptyObjectCreator, options...) + return &ClientWithListAndApply[T, L, C]{ + typeClient, + alsoLister[T, L]{typeClient, emptyListCreator}, + alsoApplier[T, C]{typeClient}, + } +} + +// GetClient returns the REST interface. +func (c *Client[T]) GetClient() rest.Interface { + return c.client +} + +// GetNamespace returns the client's namespace, if any. +func (c *Client[T]) GetNamespace() string { + return c.namespace +} + +// Get takes name of the resource, and returns the corresponding object, and an error if there is any. +func (c *Client[T]) Get(ctx context.Context, name string, options metav1.GetOptions) (T, error) { + result := c.newObject() + err := c.client.Get(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + VersionedParams(&options, c.parameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// List takes label and field selectors, and returns the list of resources that match those selectors. +func (l *alsoLister[T, L]) List(ctx context.Context, opts metav1.ListOptions) (L, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr) + } else if hasWatchListOptionsPrepared { + result, err := l.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, "watchlist request for "+l.client.resource, l.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for %s ended with an error, falling back to the standard LIST semantics, err = %v", l.client.resource, err) + } + result, err := l.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, "list request for "+l.client.resource, l.list, opts, result) + } + return result, err +} + +func (l *alsoLister[T, L]) list(ctx context.Context, opts metav1.ListOptions) (L, error) { + list := l.newList() + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + err := l.client.client.Get(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + Do(ctx). + Into(list) + return list, err +} + +// watchList establishes a watch stream with the server and returns the list of resources. +func (l *alsoLister[T, L]) watchList(ctx context.Context, opts metav1.ListOptions) (result L, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = l.newList() + err = l.client.client.Get(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&opts, l.client.parameterCodec). + Timeout(timeout). + WatchList(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resources. +func (c *Client[T]) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resource and creates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Create(ctx context.Context, obj T, opts metav1.CreateOptions) (T, error) { + result := c.newObject() + err := c.client.Post(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Update takes the representation of a resource and updates it. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) Update(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// UpdateStatus updates the status subresource of a resource. Returns the server's representation of the resource, and an error, if there is any. +func (c *Client[T]) UpdateStatus(ctx context.Context, obj T, opts metav1.UpdateOptions) (T, error) { + result := c.newObject() + err := c.client.Put(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(obj.GetName()). + SubResource("status"). + VersionedParams(&opts, c.parameterCodec). + Body(obj). + Do(ctx). + Into(result) + return result, err +} + +// Delete takes name of the resource and deletes it. Returns an error if one occurs. +func (c *Client[T]) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (l *alsoLister[T, L]) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return l.client.client.Delete(). + UseProtobufAsDefaultIfPreferred(l.client.prefersProtobuf). + NamespaceIfScoped(l.client.namespace, l.client.namespace != ""). + Resource(l.client.resource). + VersionedParams(&listOpts, l.client.parameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resource. +func (c *Client[T]) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (T, error) { + result := c.newObject() + err := c.client.Patch(pt). + UseProtobufAsDefaultIfPreferred(c.prefersProtobuf). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(c.resource). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, c.parameterCodec). + Body(data). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied resource. +func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + result := a.client.newObject() + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + + request, err := apply.NewRequest(a.client.client, obj) + if err != nil { + return *new(T), err + } + + err = request. + UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + VersionedParams(&patchOpts, a.client.parameterCodec). + Do(ctx). + Into(result) + return result, err +} + +// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource. +func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) { + if obj == *new(C) { + return *new(T), fmt.Errorf("object provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + + if obj.GetName() == nil { + return *new(T), fmt.Errorf("obj.Name must be provided to Apply") + } + + request, err := apply.NewRequest(a.client.client, obj) + if err != nil { + return *new(T), err + } + + result := a.client.newObject() + err = request. + UseProtobufAsDefaultIfPreferred(a.client.prefersProtobuf). + NamespaceIfScoped(a.client.namespace, a.client.namespace != ""). + Resource(a.client.resource). + Name(*obj.GetName()). + SubResource("status"). + VersionedParams(&patchOpts, a.client.parameterCodec). + Do(ctx). + Into(result) + return result, err +} diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go index b768f6f7f..11c67480f 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // MutatingWebhookConfigurations. type MutatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.MutatingWebhookConfigurationLister + Lister() admissionregistrationv1.MutatingWebhookConfigurationLister } type mutatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.MutatingWebhookConfiguration{}, + &apiadmissionregistrationv1.MutatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes } func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer) } -func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister { - return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *mutatingWebhookConfigurationInformer) Lister() admissionregistrationv1.MutatingWebhookConfigurationLister { + return admissionregistrationv1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go index eaf9414e2..e6974238c 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1.ValidatingAdmissionPolicyLister { - return v1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go index 8cd61bf28..34067ca38 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1.ValidatingAdmissionPolicyBindingLister { - return v1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go index 8ddcdf2d9..42ca69c22 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/admissionregistration/v1" + admissionregistrationv1 "k8s.io/client-go/listers/admissionregistration/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingWebhookConfigurations. type ValidatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ValidatingWebhookConfigurationLister + Lister() admissionregistrationv1.ValidatingWebhookConfigurationLister } type validatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1.ValidatingWebhookConfiguration{}, + &apiadmissionregistrationv1.ValidatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernet } func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer) } -func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister { - return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *validatingWebhookConfigurationInformer) Lister() admissionregistrationv1.ValidatingWebhookConfigurationLister { + return admissionregistrationv1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go index 738063ee7..68ae4e25c 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -24,6 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // MutatingAdmissionPolicies returns a MutatingAdmissionPolicyInformer. + MutatingAdmissionPolicies() MutatingAdmissionPolicyInformer + // MutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindingInformer. + MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInformer // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer. @@ -41,6 +45,16 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// MutatingAdmissionPolicies returns a MutatingAdmissionPolicyInformer. +func (v *version) MutatingAdmissionPolicies() MutatingAdmissionPolicyInformer { + return &mutatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// MutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindingInformer. +func (v *version) MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInformer { + return &mutatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer. func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer { return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..5a23158bf --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyInformer provides access to a shared informer and lister for +// MutatingAdmissionPolicies. +type MutatingAdmissionPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyLister +} + +type mutatingAdmissionPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingAdmissionPolicyInformer constructs a new informer for MutatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingAdmissionPolicyInformer constructs a new informer for MutatingAdmissionPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicies().Watch(context.TODO(), options) + }, + }, + &apiadmissionregistrationv1alpha1.MutatingAdmissionPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicy{}, f.defaultInformer) +} + +func (f *mutatingAdmissionPolicyInformer) Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyLister { + return admissionregistrationv1alpha1.NewMutatingAdmissionPolicyLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..efa143fe5 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + time "time" + + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for +// MutatingAdmissionPolicyBindings. +type MutatingAdmissionPolicyBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingLister +} + +type mutatingAdmissionPolicyBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingAdmissionPolicyBindingInformer constructs a new informer for MutatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingAdmissionPolicyBindingInformer constructs a new informer for MutatingAdmissionPolicyBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicyBindings().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicyBindings().Watch(context.TODO(), options) + }, + }, + &apiadmissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{}, f.defaultInformer) +} + +func (f *mutatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingLister { + return admissionregistrationv1alpha1.NewMutatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 01b8a4ab8..aaae7b297 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1alpha1.ValidatingAdmissionPolicyLister { - return v1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1alpha1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index bd531512b..d62c59061 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + apiadmissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1alpha1.ValidatingAdmissionPolicyBindingLister { - return v1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1alpha1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 12c8ec1fb..c6ca36ea2 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // MutatingWebhookConfigurations. type MutatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.MutatingWebhookConfigurationLister + Lister() admissionregistrationv1beta1.MutatingWebhookConfigurationLister } type mutatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.MutatingWebhookConfiguration{}, + &apiadmissionregistrationv1beta1.MutatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes } func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) } -func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister { - return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *mutatingWebhookConfigurationInformer) Lister() admissionregistrationv1beta1.MutatingWebhookConfigurationLister { + return admissionregistrationv1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go index d0e9cd64c..d5b4204f1 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicies. type ValidatingAdmissionPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingAdmissionPolicyLister + Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyLister } type validatingAdmissionPolicyInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, + &apiadmissionregistrationv1beta1.ValidatingAdmissionPolicy{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.In } func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyInformer) Lister() v1beta1.ValidatingAdmissionPolicyLister { - return v1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyInformer) Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyLister { + return admissionregistrationv1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 7641e9940..dbb5153ef 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingAdmissionPolicyBindings. type ValidatingAdmissionPolicyBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingAdmissionPolicyBindingLister + Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingLister } type validatingAdmissionPolicyBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, + &apiadmissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubern } func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer) } -func (f *validatingAdmissionPolicyBindingInformer) Lister() v1beta1.ValidatingAdmissionPolicyBindingLister { - return v1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) +func (f *validatingAdmissionPolicyBindingInformer) Lister() admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingLister { + return admissionregistrationv1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 05eb05097..602b361af 100644 --- a/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + apiadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ValidatingWebhookConfigurations. type ValidatingWebhookConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ValidatingWebhookConfigurationLister + Lister() admissionregistrationv1beta1.ValidatingWebhookConfigurationLister } type validatingWebhookConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(context.TODO(), options) }, }, - &admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, + &apiadmissionregistrationv1beta1.ValidatingWebhookConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernet } func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiadmissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) } -func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister { - return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +func (f *validatingWebhookConfigurationInformer) Lister() admissionregistrationv1beta1.ValidatingWebhookConfigurationLister { + return admissionregistrationv1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/constraint/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go index 34175b522..a99dbd17d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go +++ b/constraint/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiapiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/client-go/listers/apiserverinternal/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageVersions. type StorageVersionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.StorageVersionLister + Lister() apiserverinternalv1alpha1.StorageVersionLister } type storageVersionInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod return client.InternalV1alpha1().StorageVersions().Watch(context.TODO(), options) }, }, - &apiserverinternalv1alpha1.StorageVersion{}, + &apiapiserverinternalv1alpha1.StorageVersion{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageVersionInformer) defaultInformer(client kubernetes.Interface, re } func (f *storageVersionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) + return f.factory.InformerFor(&apiapiserverinternalv1alpha1.StorageVersion{}, f.defaultInformer) } -func (f *storageVersionInformer) Lister() v1alpha1.StorageVersionLister { - return v1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) +func (f *storageVersionInformer) Lister() apiserverinternalv1alpha1.StorageVersionLister { + return apiserverinternalv1alpha1.NewStorageVersionLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go index 31e2b74d0..334a1b8f8 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ControllerRevisionLister + Lister() appsv1.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1.ControllerRevision{}, + &apiappsv1.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister { - return v1.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1.ControllerRevisionLister { + return appsv1.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go index da7fe9509..73adf8cbf 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.DaemonSetLister + Lister() appsv1.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.AppsV1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.DaemonSet{}, + &apiappsv1.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1.DaemonSetLister { - return v1.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() appsv1.DaemonSetLister { + return appsv1.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1/deployment.go index bd639bb3d..f9314844c 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.DeploymentLister + Lister() appsv1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1.Deployment{}, + &apiappsv1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1.DeploymentLister { - return v1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1.DeploymentLister { + return appsv1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go index 6d81a471a..dfa8ae87a 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ReplicaSetLister + Lister() appsv1.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.AppsV1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.ReplicaSet{}, + &apiappsv1.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1.ReplicaSetLister { - return v1.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() appsv1.ReplicaSetLister { + return appsv1.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go index c99bbb73e..84ca50123 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - appsv1 "k8s.io/api/apps/v1" + apiappsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/apps/v1" + appsv1 "k8s.io/client-go/listers/apps/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.StatefulSetLister + Lister() appsv1.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1.StatefulSet{}, + &apiappsv1.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1.StatefulSetLister { - return v1.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1.StatefulSetLister { + return appsv1.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go index cb36bd7fd..c0a51dbe3 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ControllerRevisionLister + Lister() appsv1beta1.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1beta1().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.ControllerRevision{}, + &apiappsv1beta1.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister { - return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1beta1.ControllerRevisionLister { + return appsv1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go index e02a13c2f..027ae402d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DeploymentLister + Lister() appsv1beta1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.Deployment{}, + &apiappsv1beta1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { - return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1beta1.DeploymentLister { + return appsv1beta1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go index b845cc99c..bc357d7e7 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - appsv1beta1 "k8s.io/api/apps/v1beta1" + apiappsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + appsv1beta1 "k8s.io/client-go/listers/apps/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.StatefulSetLister + Lister() appsv1beta1.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1beta1().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta1.StatefulSet{}, + &apiappsv1beta1.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta1.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta1.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister { - return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1beta1.StatefulSetLister { + return appsv1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go index 4d0e91320..62a560fda 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ControllerRevisions. type ControllerRevisionInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.ControllerRevisionLister + Lister() appsv1beta2.ControllerRevisionLister } type controllerRevisionInformer struct { @@ -71,7 +71,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac return client.AppsV1beta2().ControllerRevisions(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.ControllerRevision{}, + &apiappsv1beta2.ControllerRevision{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface } func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.ControllerRevision{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.ControllerRevision{}, f.defaultInformer) } -func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister { - return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) +func (f *controllerRevisionInformer) Lister() appsv1beta2.ControllerRevisionLister { + return appsv1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go index 280e2fe46..9d4c8ede9 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.DaemonSetLister + Lister() appsv1beta2.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.AppsV1beta2().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.DaemonSet{}, + &apiappsv1beta2.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister { - return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() appsv1beta2.DaemonSetLister { + return appsv1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go index 67bdb7972..be85192cf 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.DeploymentLister + Lister() appsv1beta2.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.AppsV1beta2().Deployments(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.Deployment{}, + &apiappsv1beta2.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta2.DeploymentLister { - return v1beta2.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() appsv1beta2.DeploymentLister { + return appsv1beta2.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go index 85d12bb65..e5d279708 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.ReplicaSetLister + Lister() appsv1beta2.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.AppsV1beta2().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.ReplicaSet{}, + &apiappsv1beta2.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister { - return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() appsv1beta2.ReplicaSetLister { + return appsv1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go index 2fab6f7b2..d147fc885 100644 --- a/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - appsv1beta2 "k8s.io/api/apps/v1beta2" + apiappsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + appsv1beta2 "k8s.io/client-go/listers/apps/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StatefulSets. type StatefulSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.StatefulSetLister + Lister() appsv1beta2.StatefulSetLister } type statefulSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin return client.AppsV1beta2().StatefulSets(namespace).Watch(context.TODO(), options) }, }, - &appsv1beta2.StatefulSet{}, + &apiappsv1beta2.StatefulSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&appsv1beta2.StatefulSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiappsv1beta2.StatefulSet{}, f.defaultInformer) } -func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister { - return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) +func (f *statefulSetInformer) Lister() appsv1beta2.StatefulSetLister { + return appsv1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go index 44f041e90..fce275934 100644 --- a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - autoscalingv1 "k8s.io/api/autoscaling/v1" + apiautoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/autoscaling/v1" + autoscalingv1 "k8s.io/client-go/listers/autoscaling/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.HorizontalPodAutoscalerLister + Lister() autoscalingv1.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv1.HorizontalPodAutoscaler{}, + &apiautoscalingv1.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister { - return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv1.HorizontalPodAutoscalerLister { + return autoscalingv1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go index 5ddb3b015..92104f822 100644 --- a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2 import ( - "context" + context "context" time "time" - autoscalingv2 "k8s.io/api/autoscaling/v2" + apiautoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2 "k8s.io/client-go/listers/autoscaling/v2" + autoscalingv2 "k8s.io/client-go/listers/autoscaling/v2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.HorizontalPodAutoscalerLister + Lister() autoscalingv2.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2.HorizontalPodAutoscaler{}, + &apiautoscalingv2.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2.HorizontalPodAutoscalerLister { - return v2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2.HorizontalPodAutoscalerLister { + return autoscalingv2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go index 6385a2a19..b77602718 100644 --- a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2beta1 import ( - "context" + context "context" time "time" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + apiautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta1.HorizontalPodAutoscalerLister + Lister() autoscalingv2beta1.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2beta1.HorizontalPodAutoscaler{}, + &apiautoscalingv2beta1.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister { - return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2beta1.HorizontalPodAutoscalerLister { + return autoscalingv2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go index f1ac3f073..1848429b1 100644 --- a/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,16 +19,16 @@ limitations under the License. package v2beta2 import ( - "context" + context "context" time "time" - autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + apiautoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v2beta2 "k8s.io/client-go/listers/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/client-go/listers/autoscaling/v2beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // HorizontalPodAutoscalers. type HorizontalPodAutoscalerInformer interface { Informer() cache.SharedIndexInformer - Lister() v2beta2.HorizontalPodAutoscalerLister + Lister() autoscalingv2beta2.HorizontalPodAutoscalerLister } type horizontalPodAutoscalerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(context.TODO(), options) }, }, - &autoscalingv2beta2.HorizontalPodAutoscaler{}, + &apiautoscalingv2beta2.HorizontalPodAutoscaler{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Inte } func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&autoscalingv2beta2.HorizontalPodAutoscaler{}, f.defaultInformer) + return f.factory.InformerFor(&apiautoscalingv2beta2.HorizontalPodAutoscaler{}, f.defaultInformer) } -func (f *horizontalPodAutoscalerInformer) Lister() v2beta2.HorizontalPodAutoscalerLister { - return v2beta2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +func (f *horizontalPodAutoscalerInformer) Lister() autoscalingv2beta2.HorizontalPodAutoscalerLister { + return autoscalingv2beta2.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go b/constraint/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go index fdfb65513..2a188acdd 100644 --- a/constraint/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - batchv1 "k8s.io/api/batch/v1" + apibatchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/batch/v1" + batchv1 "k8s.io/client-go/listers/batch/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CronJobs. type CronJobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CronJobLister + Lister() batchv1.CronJobLister } type cronJobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r return client.BatchV1().CronJobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1.CronJob{}, + &apibatchv1.CronJob{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1.CronJob{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1.CronJob{}, f.defaultInformer) } -func (f *cronJobInformer) Lister() v1.CronJobLister { - return v1.NewCronJobLister(f.Informer().GetIndexer()) +func (f *cronJobInformer) Lister() batchv1.CronJobLister { + return batchv1.NewCronJobLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/batch/v1/job.go b/constraint/vendor/k8s.io/client-go/informers/batch/v1/job.go index 4992f5228..439ec7a6a 100644 --- a/constraint/vendor/k8s.io/client-go/informers/batch/v1/job.go +++ b/constraint/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - batchv1 "k8s.io/api/batch/v1" + apibatchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/batch/v1" + batchv1 "k8s.io/client-go/listers/batch/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Jobs. type JobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.JobLister + Lister() batchv1.JobLister } type jobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyn return client.BatchV1().Jobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1.Job{}, + &apibatchv1.Job{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *jobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1.Job{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1.Job{}, f.defaultInformer) } -func (f *jobInformer) Lister() v1.JobLister { - return v1.NewJobLister(f.Informer().GetIndexer()) +func (f *jobInformer) Lister() batchv1.JobLister { + return batchv1.NewJobLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/constraint/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go index 820c93eaa..1f061e16c 100644 --- a/constraint/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - batchv1beta1 "k8s.io/api/batch/v1beta1" + apibatchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/batch/v1beta1" + batchv1beta1 "k8s.io/client-go/listers/batch/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CronJobs. type CronJobInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CronJobLister + Lister() batchv1beta1.CronJobLister } type cronJobInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r return client.BatchV1beta1().CronJobs(namespace).Watch(context.TODO(), options) }, }, - &batchv1beta1.CronJob{}, + &apibatchv1beta1.CronJob{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cronJobInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&batchv1beta1.CronJob{}, f.defaultInformer) + return f.factory.InformerFor(&apibatchv1beta1.CronJob{}, f.defaultInformer) } -func (f *cronJobInformer) Lister() v1beta1.CronJobLister { - return v1beta1.NewCronJobLister(f.Informer().GetIndexer()) +func (f *cronJobInformer) Lister() batchv1beta1.CronJobLister { + return batchv1beta1.NewCronJobLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go index 73d33a914..0bd32ab95 100644 --- a/constraint/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - certificatesv1 "k8s.io/api/certificates/v1" + apicertificatesv1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/certificates/v1" + certificatesv1 "k8s.io/client-go/listers/certificates/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CertificateSigningRequests. type CertificateSigningRequestInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CertificateSigningRequestLister + Lister() certificatesv1.CertificateSigningRequestLister } type certificateSigningRequestInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r return client.CertificatesV1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, - &certificatesv1.CertificateSigningRequest{}, + &apicertificatesv1.CertificateSigningRequest{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.In } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1.CertificateSigningRequest{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1.CertificateSigningRequest{}, f.defaultInformer) } -func (f *certificateSigningRequestInformer) Lister() v1.CertificateSigningRequestLister { - return v1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +func (f *certificateSigningRequestInformer) Lister() certificatesv1.CertificateSigningRequestLister { + return certificatesv1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go b/constraint/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go index e8b341587..046688961 100644 --- a/constraint/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go +++ b/constraint/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + apicertificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1" + certificatesv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterTrustBundles. type ClusterTrustBundleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterTrustBundleLister + Lister() certificatesv1alpha1.ClusterTrustBundleLister } type clusterTrustBundleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPe return client.CertificatesV1alpha1().ClusterTrustBundles().Watch(context.TODO(), options) }, }, - &certificatesv1alpha1.ClusterTrustBundle{}, + &apicertificatesv1alpha1.ClusterTrustBundle{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterTrustBundleInformer) defaultInformer(client kubernetes.Interface } func (f *clusterTrustBundleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1alpha1.ClusterTrustBundle{}, f.defaultInformer) } -func (f *clusterTrustBundleInformer) Lister() v1alpha1.ClusterTrustBundleLister { - return v1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer()) +func (f *clusterTrustBundleInformer) Lister() certificatesv1alpha1.ClusterTrustBundleLister { + return certificatesv1alpha1.NewClusterTrustBundleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go index 4e167ab8b..b3aff1cc8 100644 --- a/constraint/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + apicertificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/certificates/v1beta1" + certificatesv1beta1 "k8s.io/client-go/listers/certificates/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CertificateSigningRequests. type CertificateSigningRequestInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CertificateSigningRequestLister + Lister() certificatesv1beta1.CertificateSigningRequestLister } type certificateSigningRequestInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r return client.CertificatesV1beta1().CertificateSigningRequests().Watch(context.TODO(), options) }, }, - &certificatesv1beta1.CertificateSigningRequest{}, + &apicertificatesv1beta1.CertificateSigningRequest{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.In } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&certificatesv1beta1.CertificateSigningRequest{}, f.defaultInformer) + return f.factory.InformerFor(&apicertificatesv1beta1.CertificateSigningRequest{}, f.defaultInformer) } -func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister { - return v1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +func (f *certificateSigningRequestInformer) Lister() certificatesv1beta1.CertificateSigningRequestLister { + return certificatesv1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/coordination/interface.go b/constraint/vendor/k8s.io/client-go/informers/coordination/interface.go index 54cfd7b9f..d5bde12ac 100644 --- a/constraint/vendor/k8s.io/client-go/informers/coordination/interface.go +++ b/constraint/vendor/k8s.io/client-go/informers/coordination/interface.go @@ -20,6 +20,7 @@ package coordination import ( v1 "k8s.io/client-go/informers/coordination/v1" + v1alpha2 "k8s.io/client-go/informers/coordination/v1alpha2" v1beta1 "k8s.io/client-go/informers/coordination/v1beta1" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/constraint/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/constraint/vendor/k8s.io/client-go/informers/coordination/v1/lease.go index e538923a8..0627d7309 100644 --- a/constraint/vendor/k8s.io/client-go/informers/coordination/v1/lease.go +++ b/constraint/vendor/k8s.io/client-go/informers/coordination/v1/lease.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - coordinationv1 "k8s.io/api/coordination/v1" + apicoordinationv1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/coordination/v1" + coordinationv1 "k8s.io/client-go/listers/coordination/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Leases. type LeaseInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.LeaseLister + Lister() coordinationv1.LeaseLister } type leaseInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res return client.CoordinationV1().Leases(namespace).Watch(context.TODO(), options) }, }, - &coordinationv1.Lease{}, + &apicoordinationv1.Lease{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *leaseInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1.Lease{}, f.defaultInformer) + return f.factory.InformerFor(&apicoordinationv1.Lease{}, f.defaultInformer) } -func (f *leaseInformer) Lister() v1.LeaseLister { - return v1.NewLeaseLister(f.Informer().GetIndexer()) +func (f *leaseInformer) Lister() coordinationv1.LeaseLister { + return coordinationv1.NewLeaseLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go b/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go new file mode 100644 index 000000000..ba83768ad --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // LeaseCandidates returns a LeaseCandidateInformer. + LeaseCandidates() LeaseCandidateInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// LeaseCandidates returns a LeaseCandidateInformer. +func (v *version) LeaseCandidates() LeaseCandidateInformer { + return &leaseCandidateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go b/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go new file mode 100644 index 000000000..f38adf652 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + context "context" + time "time" + + apicoordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + coordinationv1alpha2 "k8s.io/client-go/listers/coordination/v1alpha2" + cache "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateInformer provides access to a shared informer and lister for +// LeaseCandidates. +type LeaseCandidateInformer interface { + Informer() cache.SharedIndexInformer + Lister() coordinationv1alpha2.LeaseCandidateLister +} + +type leaseCandidateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLeaseCandidateInformer constructs a new informer for LeaseCandidate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha2().LeaseCandidates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoordinationV1alpha2().LeaseCandidates(namespace).Watch(context.TODO(), options) + }, + }, + &apicoordinationv1alpha2.LeaseCandidate{}, + resyncPeriod, + indexers, + ) +} + +func (f *leaseCandidateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLeaseCandidateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *leaseCandidateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apicoordinationv1alpha2.LeaseCandidate{}, f.defaultInformer) +} + +func (f *leaseCandidateInformer) Lister() coordinationv1alpha2.LeaseCandidateLister { + return coordinationv1alpha2.NewLeaseCandidateLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/constraint/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go index 5a6959c0b..563a25c30 100644 --- a/constraint/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go +++ b/constraint/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + apicoordinationv1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/coordination/v1beta1" + coordinationv1beta1 "k8s.io/client-go/listers/coordination/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Leases. type LeaseInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.LeaseLister + Lister() coordinationv1beta1.LeaseLister } type leaseInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res return client.CoordinationV1beta1().Leases(namespace).Watch(context.TODO(), options) }, }, - &coordinationv1beta1.Lease{}, + &apicoordinationv1beta1.Lease{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *leaseInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *leaseInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&coordinationv1beta1.Lease{}, f.defaultInformer) + return f.factory.InformerFor(&apicoordinationv1beta1.Lease{}, f.defaultInformer) } -func (f *leaseInformer) Lister() v1beta1.LeaseLister { - return v1beta1.NewLeaseLister(f.Informer().GetIndexer()) +func (f *leaseInformer) Lister() coordinationv1beta1.LeaseLister { + return coordinationv1beta1.NewLeaseLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go index ccdee535b..2a97c638f 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ComponentStatuses. type ComponentStatusInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ComponentStatusLister + Lister() corev1.ComponentStatusLister } type componentStatusInformer struct { @@ -70,7 +70,7 @@ func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPerio return client.CoreV1().ComponentStatuses().Watch(context.TODO(), options) }, }, - &corev1.ComponentStatus{}, + &apicorev1.ComponentStatus{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, r } func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ComponentStatus{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ComponentStatus{}, f.defaultInformer) } -func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { - return v1.NewComponentStatusLister(f.Informer().GetIndexer()) +func (f *componentStatusInformer) Lister() corev1.ComponentStatusLister { + return corev1.NewComponentStatusLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/configmap.go index 625358178..07f5fb1f7 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/configmap.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ConfigMaps. type ConfigMapInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ConfigMapLister + Lister() corev1.ConfigMapLister } type configMapInformer struct { @@ -71,7 +71,7 @@ func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, return client.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), options) }, }, - &corev1.ConfigMap{}, + &apicorev1.ConfigMap{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *configMapInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ConfigMap{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ConfigMap{}, f.defaultInformer) } -func (f *configMapInformer) Lister() v1.ConfigMapLister { - return v1.NewConfigMapLister(f.Informer().GetIndexer()) +func (f *configMapInformer) Lister() corev1.ConfigMapLister { + return corev1.NewConfigMapLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/endpoints.go index cd0f25b7f..6171d5df6 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/endpoints.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Endpoints. type EndpointsInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EndpointsLister + Lister() corev1.EndpointsLister } type endpointsInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, return client.CoreV1().Endpoints(namespace).Watch(context.TODO(), options) }, }, - &corev1.Endpoints{}, + &apicorev1.Endpoints{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *endpointsInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Endpoints{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Endpoints{}, f.defaultInformer) } -func (f *endpointsInformer) Lister() v1.EndpointsLister { - return v1.NewEndpointsLister(f.Informer().GetIndexer()) +func (f *endpointsInformer) Lister() corev1.EndpointsLister { + return corev1.NewEndpointsLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/event.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/event.go index 8825e9b7a..55500679d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EventLister + Lister() corev1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.CoreV1().Events(namespace).Watch(context.TODO(), options) }, }, - &corev1.Event{}, + &apicorev1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1.EventLister { - return v1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() corev1.EventLister { + return corev1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/limitrange.go index 4cbfda1f7..2c2dec79f 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/limitrange.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // LimitRanges. type LimitRangeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.LimitRangeLister + Lister() corev1.LimitRangeLister } type limitRangeInformer struct { @@ -71,7 +71,7 @@ func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string return client.CoreV1().LimitRanges(namespace).Watch(context.TODO(), options) }, }, - &corev1.LimitRange{}, + &apicorev1.LimitRange{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resync } func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.LimitRange{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.LimitRange{}, f.defaultInformer) } -func (f *limitRangeInformer) Lister() v1.LimitRangeLister { - return v1.NewLimitRangeLister(f.Informer().GetIndexer()) +func (f *limitRangeInformer) Lister() corev1.LimitRangeLister { + return corev1.NewLimitRangeLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/namespace.go index 506f930a7..09e0740ba 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/namespace.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Namespaces. type NamespaceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NamespaceLister + Lister() corev1.NamespaceLister } type namespaceInformer struct { @@ -70,7 +70,7 @@ func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time return client.CoreV1().Namespaces().Watch(context.TODO(), options) }, }, - &corev1.Namespace{}, + &apicorev1.Namespace{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *namespaceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Namespace{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Namespace{}, f.defaultInformer) } -func (f *namespaceInformer) Lister() v1.NamespaceLister { - return v1.NewNamespaceLister(f.Informer().GetIndexer()) +func (f *namespaceInformer) Lister() corev1.NamespaceLister { + return corev1.NewNamespaceLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/node.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/node.go index 9939fc2cb..608aa9fb7 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/node.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Nodes. type NodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NodeLister + Lister() corev1.NodeLister } type nodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Dura return client.CoreV1().Nodes().Watch(context.TODO(), options) }, }, - &corev1.Node{}, + &apicorev1.Node{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *nodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Node{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Node{}, f.defaultInformer) } -func (f *nodeInformer) Lister() v1.NodeLister { - return v1.NewNodeLister(f.Informer().GetIndexer()) +func (f *nodeInformer) Lister() corev1.NodeLister { + return corev1.NewNodeLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go index c82445997..19c0929e5 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PersistentVolumes. type PersistentVolumeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PersistentVolumeLister + Lister() corev1.PersistentVolumeLister } type persistentVolumeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeri return client.CoreV1().PersistentVolumes().Watch(context.TODO(), options) }, }, - &corev1.PersistentVolume{}, + &apicorev1.PersistentVolume{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, } func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PersistentVolume{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PersistentVolume{}, f.defaultInformer) } -func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { - return v1.NewPersistentVolumeLister(f.Informer().GetIndexer()) +func (f *persistentVolumeInformer) Lister() corev1.PersistentVolumeLister { + return corev1.NewPersistentVolumeLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go index 7a7df1cff..27c35fec1 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PersistentVolumeClaims. type PersistentVolumeClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PersistentVolumeClaimLister + Lister() corev1.PersistentVolumeClaimLister } type persistentVolumeClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, names return client.CoreV1().PersistentVolumeClaims(namespace).Watch(context.TODO(), options) }, }, - &corev1.PersistentVolumeClaim{}, + &apicorev1.PersistentVolumeClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interf } func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PersistentVolumeClaim{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PersistentVolumeClaim{}, f.defaultInformer) } -func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { - return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) +func (f *persistentVolumeClaimInformer) Lister() corev1.PersistentVolumeClaimLister { + return corev1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/pod.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/pod.go index 5c713a9b6..c661704bd 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/pod.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Pods. type PodInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodLister + Lister() corev1.PodLister } type podInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyn return client.CoreV1().Pods(namespace).Watch(context.TODO(), options) }, }, - &corev1.Pod{}, + &apicorev1.Pod{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *podInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Pod{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Pod{}, f.defaultInformer) } -func (f *podInformer) Lister() v1.PodLister { - return v1.NewPodLister(f.Informer().GetIndexer()) +func (f *podInformer) Lister() corev1.PodLister { + return corev1.NewPodLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go index 2a16e910d..0d16c5b4e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodTemplates. type PodTemplateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodTemplateLister + Lister() corev1.PodTemplateLister } type podTemplateInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace strin return client.CoreV1().PodTemplates(namespace).Watch(context.TODO(), options) }, }, - &corev1.PodTemplate{}, + &apicorev1.PodTemplate{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.PodTemplate{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.PodTemplate{}, f.defaultInformer) } -func (f *podTemplateInformer) Lister() v1.PodTemplateLister { - return v1.NewPodTemplateLister(f.Informer().GetIndexer()) +func (f *podTemplateInformer) Lister() corev1.PodTemplateLister { + return corev1.NewPodTemplateLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go index 930beb4cd..5866ec151 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicationControllers. type ReplicationControllerInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ReplicationControllerLister + Lister() corev1.ReplicationControllerLister } type replicationControllerInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicationControllerInformer(client kubernetes.Interface, names return client.CoreV1().ReplicationControllers(namespace).Watch(context.TODO(), options) }, }, - &corev1.ReplicationController{}, + &apicorev1.ReplicationController{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interf } func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ReplicationController{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ReplicationController{}, f.defaultInformer) } -func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { - return v1.NewReplicationControllerLister(f.Informer().GetIndexer()) +func (f *replicationControllerInformer) Lister() corev1.ReplicationControllerLister { + return corev1.NewReplicationControllerLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go index 619262a61..999b49546 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ResourceQuotas. type ResourceQuotaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ResourceQuotaLister + Lister() corev1.ResourceQuotaLister } type resourceQuotaInformer struct { @@ -71,7 +71,7 @@ func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace str return client.CoreV1().ResourceQuotas(namespace).Watch(context.TODO(), options) }, }, - &corev1.ResourceQuota{}, + &apicorev1.ResourceQuota{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, res } func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ResourceQuota{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ResourceQuota{}, f.defaultInformer) } -func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { - return v1.NewResourceQuotaLister(f.Informer().GetIndexer()) +func (f *resourceQuotaInformer) Lister() corev1.ResourceQuotaLister { + return corev1.NewResourceQuotaLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/secret.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/secret.go index a6be07069..f3d371501 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/secret.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Secrets. type SecretInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.SecretLister + Lister() corev1.SecretLister } type secretInformer struct { @@ -71,7 +71,7 @@ func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, re return client.CoreV1().Secrets(namespace).Watch(context.TODO(), options) }, }, - &corev1.Secret{}, + &apicorev1.Secret{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeri } func (f *secretInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Secret{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Secret{}, f.defaultInformer) } -func (f *secretInformer) Lister() v1.SecretLister { - return v1.NewSecretLister(f.Informer().GetIndexer()) +func (f *secretInformer) Lister() corev1.SecretLister { + return corev1.NewSecretLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/service.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/service.go index 3d9ecc6e9..c4bc294a3 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/service.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Services. type ServiceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ServiceLister + Lister() corev1.ServiceLister } type serviceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, r return client.CoreV1().Services(namespace).Watch(context.TODO(), options) }, }, - &corev1.Service{}, + &apicorev1.Service{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *serviceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.Service{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.Service{}, f.defaultInformer) } -func (f *serviceInformer) Lister() v1.ServiceLister { - return v1.NewServiceLister(f.Informer().GetIndexer()) +func (f *serviceInformer) Lister() corev1.ServiceLister { + return corev1.NewServiceLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/constraint/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go index 44371c9fa..b04b44cb4 100644 --- a/constraint/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go +++ b/constraint/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - corev1 "k8s.io/api/core/v1" + apicorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/core/v1" + corev1 "k8s.io/client-go/listers/core/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ServiceAccounts. type ServiceAccountInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ServiceAccountLister + Lister() corev1.ServiceAccountLister } type serviceAccountInformer struct { @@ -71,7 +71,7 @@ func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace st return client.CoreV1().ServiceAccounts(namespace).Watch(context.TODO(), options) }, }, - &corev1.ServiceAccount{}, + &apicorev1.ServiceAccount{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, re } func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&corev1.ServiceAccount{}, f.defaultInformer) + return f.factory.InformerFor(&apicorev1.ServiceAccount{}, f.defaultInformer) } -func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { - return v1.NewServiceAccountLister(f.Informer().GetIndexer()) +func (f *serviceAccountInformer) Lister() corev1.ServiceAccountLister { + return corev1.NewServiceAccountLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go b/constraint/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go index 6c6c3372b..ec09b2d26 100644 --- a/constraint/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - discoveryv1 "k8s.io/api/discovery/v1" + apidiscoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/discovery/v1" + discoveryv1 "k8s.io/client-go/listers/discovery/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // EndpointSlices. type EndpointSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EndpointSliceLister + Lister() discoveryv1.EndpointSliceLister } type endpointSliceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str return client.DiscoveryV1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, - &discoveryv1.EndpointSlice{}, + &apidiscoveryv1.EndpointSlice{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&discoveryv1.EndpointSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apidiscoveryv1.EndpointSlice{}, f.defaultInformer) } -func (f *endpointSliceInformer) Lister() v1.EndpointSliceLister { - return v1.NewEndpointSliceLister(f.Informer().GetIndexer()) +func (f *endpointSliceInformer) Lister() discoveryv1.EndpointSliceLister { + return discoveryv1.NewEndpointSliceLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/constraint/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go index 69ae38a91..3af1a3be9 100644 --- a/constraint/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + apidiscoveryv1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/discovery/v1beta1" + discoveryv1beta1 "k8s.io/client-go/listers/discovery/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // EndpointSlices. type EndpointSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.EndpointSliceLister + Lister() discoveryv1beta1.EndpointSliceLister } type endpointSliceInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(context.TODO(), options) }, }, - &discoveryv1beta1.EndpointSlice{}, + &apidiscoveryv1beta1.EndpointSlice{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *endpointSliceInformer) defaultInformer(client kubernetes.Interface, res } func (f *endpointSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&discoveryv1beta1.EndpointSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apidiscoveryv1beta1.EndpointSlice{}, f.defaultInformer) } -func (f *endpointSliceInformer) Lister() v1beta1.EndpointSliceLister { - return v1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) +func (f *endpointSliceInformer) Lister() discoveryv1beta1.EndpointSliceLister { + return discoveryv1beta1.NewEndpointSliceLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/events/v1/event.go b/constraint/vendor/k8s.io/client-go/informers/events/v1/event.go index f8d35ee15..518d79841 100644 --- a/constraint/vendor/k8s.io/client-go/informers/events/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/informers/events/v1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - eventsv1 "k8s.io/api/events/v1" + apieventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/events/v1" + eventsv1 "k8s.io/client-go/listers/events/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.EventLister + Lister() eventsv1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.EventsV1().Events(namespace).Watch(context.TODO(), options) }, }, - &eventsv1.Event{}, + &apieventsv1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventsv1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apieventsv1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1.EventLister { - return v1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() eventsv1.EventLister { + return eventsv1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/constraint/vendor/k8s.io/client-go/informers/events/v1beta1/event.go index 025f6a5cf..5324599bb 100644 --- a/constraint/vendor/k8s.io/client-go/informers/events/v1beta1/event.go +++ b/constraint/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - eventsv1beta1 "k8s.io/api/events/v1beta1" + apieventsv1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/events/v1beta1" + eventsv1beta1 "k8s.io/client-go/listers/events/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Events. type EventInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.EventLister + Lister() eventsv1beta1.EventLister } type eventInformer struct { @@ -71,7 +71,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res return client.EventsV1beta1().Events(namespace).Watch(context.TODO(), options) }, }, - &eventsv1beta1.Event{}, + &apieventsv1beta1.Event{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPerio } func (f *eventInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventsv1beta1.Event{}, f.defaultInformer) + return f.factory.InformerFor(&apieventsv1beta1.Event{}, f.defaultInformer) } -func (f *eventInformer) Lister() v1beta1.EventLister { - return v1beta1.NewEventLister(f.Informer().GetIndexer()) +func (f *eventInformer) Lister() eventsv1beta1.EventLister { + return eventsv1beta1.NewEventLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go index 050080a59..ea77575c9 100644 --- a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // DaemonSets. type DaemonSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DaemonSetLister + Lister() extensionsv1beta1.DaemonSetLister } type daemonSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.DaemonSet{}, + &apiextensionsv1beta1.DaemonSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.DaemonSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.DaemonSet{}, f.defaultInformer) } -func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister { - return v1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) +func (f *daemonSetInformer) Lister() extensionsv1beta1.DaemonSetLister { + return extensionsv1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go index 1b16c5cc9..1b2770ce0 100644 --- a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Deployments. type DeploymentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.DeploymentLister + Lister() extensionsv1beta1.DeploymentLister } type deploymentInformer struct { @@ -71,7 +71,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string return client.ExtensionsV1beta1().Deployments(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.Deployment{}, + &apiextensionsv1beta1.Deployment{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resync } func (f *deploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.Deployment{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.Deployment{}, f.defaultInformer) } -func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { - return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +func (f *deploymentInformer) Lister() extensionsv1beta1.DeploymentLister { + return extensionsv1beta1.NewDeploymentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go index f01a88761..63e734060 100644 --- a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressLister + Lister() extensionsv1beta1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.ExtensionsV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.Ingress{}, + &apiextensionsv1beta1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1beta1.IngressLister { - return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() extensionsv1beta1.IngressLister { + return extensionsv1beta1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go index 4a924619f..024653af2 100644 --- a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // NetworkPolicies. type NetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.NetworkPolicyLister + Lister() extensionsv1beta1.NetworkPolicyLister } type networkPolicyInformer struct { @@ -71,7 +71,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.NetworkPolicy{}, + &apiextensionsv1beta1.NetworkPolicy{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, res } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.NetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.NetworkPolicy{}, f.defaultInformer) } -func (f *networkPolicyInformer) Lister() v1beta1.NetworkPolicyLister { - return v1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *networkPolicyInformer) Lister() extensionsv1beta1.NetworkPolicyLister { + return extensionsv1beta1.NewNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go index f7e224bcf..392ccef86 100644 --- a/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apiextensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + extensionsv1beta1 "k8s.io/client-go/listers/extensions/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ReplicaSets. type ReplicaSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ReplicaSetLister + Lister() extensionsv1beta1.ReplicaSetLister } type replicaSetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(context.TODO(), options) }, }, - &extensionsv1beta1.ReplicaSet{}, + &apiextensionsv1beta1.ReplicaSet{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resync } func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&extensionsv1beta1.ReplicaSet{}, f.defaultInformer) + return f.factory.InformerFor(&apiextensionsv1beta1.ReplicaSet{}, f.defaultInformer) } -func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister { - return v1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) +func (f *replicaSetInformer) Lister() extensionsv1beta1.ReplicaSetLister { + return extensionsv1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/factory.go b/constraint/vendor/k8s.io/client-go/informers/factory.go index f2fef0e0b..86c24551e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/factory.go +++ b/constraint/vendor/k8s.io/client-go/informers/factory.go @@ -247,6 +247,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go index 30c41b189..945bc351e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + flowcontrolv1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.FlowSchemaLister + Lister() flowcontrolv1.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1.FlowSchema{}, + &apiflowcontrolv1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1.FlowSchemaLister { - return v1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1.FlowSchemaLister { + return flowcontrolv1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go index 7092c2572..eec6388b2 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + flowcontrolv1 "k8s.io/client-go/listers/flowcontrol/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PriorityLevelConfigurationLister + Lister() flowcontrolv1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1.PriorityLevelConfiguration{}, + &apiflowcontrolv1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1.PriorityLevelConfigurationLister { - return v1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1.PriorityLevelConfigurationLister { + return flowcontrolv1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go index 13f4ff093..30d099773 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + apiflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.FlowSchemaLister + Lister() flowcontrolv1beta1.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta1.FlowSchema{}, + &apiflowcontrolv1beta1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta1.FlowSchemaLister { - return v1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta1.FlowSchemaLister { + return flowcontrolv1beta1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go index fa4835906..2a8a867c4 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + apiflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/client-go/listers/flowcontrol/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta1.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta1.PriorityLevelConfigurationLister { - return v1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta1.PriorityLevelConfigurationLister { + return flowcontrolv1beta1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go index 6f6abecea..edfed12c5 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.FlowSchemaLister + Lister() flowcontrolv1beta2.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta2().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta2.FlowSchema{}, + &apiflowcontrolv1beta2.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta2.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta2.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta2.FlowSchemaLister { - return v1beta2.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta2.FlowSchemaLister { + return flowcontrolv1beta2.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 306a90185..624e0373e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta2 import ( - "context" + context "context" time "time" - flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + apiflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/client-go/listers/flowcontrol/v1beta2" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta2.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta2.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta2().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta2.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta2.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta2.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta2.PriorityLevelConfigurationLister { - return v1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta2.PriorityLevelConfigurationLister { + return flowcontrolv1beta2.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go index 56d8c8b11..bd3f5e6ed 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" time "time" - flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + apiflowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta3.FlowSchemaLister + Lister() flowcontrolv1beta3.FlowSchemaLister } type flowSchemaInformer struct { @@ -70,7 +70,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim return client.FlowcontrolV1beta3().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta3.FlowSchema{}, + &apiflowcontrolv1beta3.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta3.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta3.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1beta3.FlowSchemaLister { - return v1beta3.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() flowcontrolv1beta3.FlowSchemaLister { + return flowcontrolv1beta3.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go index 71f8d5b07..5695d5d4d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta3 import ( - "context" + context "context" time "time" - flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + apiflowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/client-go/listers/flowcontrol/v1beta3" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta3.PriorityLevelConfigurationLister + Lister() flowcontrolv1beta3.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, return client.FlowcontrolV1beta3().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1beta3.PriorityLevelConfiguration{}, + &apiflowcontrolv1beta3.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&apiflowcontrolv1beta3.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1beta3.PriorityLevelConfigurationLister { - return v1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() flowcontrolv1beta3.PriorityLevelConfigurationLister { + return flowcontrolv1beta3.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/generic.go b/constraint/vendor/k8s.io/client-go/informers/generic.go index d85117587..fd331686d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/generic.go +++ b/constraint/vendor/k8s.io/client-go/informers/generic.go @@ -19,7 +19,7 @@ limitations under the License. package informers import ( - "fmt" + fmt "fmt" v1 "k8s.io/api/admissionregistration/v1" v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" @@ -38,6 +38,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + v1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -60,7 +61,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - v1alpha2 "k8s.io/api/resource/v1alpha2" + v1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -109,6 +111,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().MutatingAdmissionPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("mutatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().MutatingAdmissionPolicyBindings().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): @@ -198,6 +204,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case coordinationv1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1().Leases().Informer()}, nil + // Group=coordination.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("leasecandidates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1alpha2().LeaseCandidates().Informer()}, nil + // Group=coordination.k8s.io, Version=v1beta1 case coordinationv1beta1.SchemeGroupVersion.WithResource("leases"): return &genericInformer{resource: resource.GroupResource(), informer: f.Coordination().V1beta1().Leases().Informer()}, nil @@ -307,10 +317,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil // Group=networking.k8s.io, Version=v1beta1 + case networkingv1beta1.SchemeGroupVersion.WithResource("ipaddresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IPAddresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil case networkingv1beta1.SchemeGroupVersion.WithResource("ingressclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().IngressClasses().Informer()}, nil + case networkingv1beta1.SchemeGroupVersion.WithResource("servicecidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().ServiceCIDRs().Informer()}, nil // Group=node.k8s.io, Version=v1 case nodev1.SchemeGroupVersion.WithResource("runtimeclasses"): @@ -362,21 +376,25 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil - // Group=resource.k8s.io, Version=v1alpha2 - case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimparameters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimParameters().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClassParameters().Informer()}, nil - case v1alpha2.SchemeGroupVersion.WithResource("resourceslices"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceSlices().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaimTemplates().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("resourceslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceSlices().Informer()}, nil + + // Group=resource.k8s.io, Version=v1beta1 + case resourcev1beta1.SchemeGroupVersion.WithResource("deviceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().DeviceClasses().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceClaims().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceClaimTemplates().Informer()}, nil + case resourcev1beta1.SchemeGroupVersion.WithResource("resourceslices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1beta1().ResourceSlices().Informer()}, nil // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): @@ -421,6 +439,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil case storagev1beta1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttachments().Informer()}, nil + case storagev1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttributesClasses().Informer()}, nil // Group=storagemigration.k8s.io, Version=v1alpha1 case storagemigrationv1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"): diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingress.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingress.go index 06c317ad3..a0deccf16 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.IngressLister + Lister() networkingv1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.NetworkingV1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &networkingv1.Ingress{}, + &apinetworkingv1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1.IngressLister { - return v1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() networkingv1.IngressLister { + return networkingv1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go index 15514745b..7eb174516 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IngressClasses. type IngressClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.IngressClassLister + Lister() networkingv1.IngressClassLister } type ingressClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t return client.NetworkingV1().IngressClasses().Watch(context.TODO(), options) }, }, - &networkingv1.IngressClass{}, + &apinetworkingv1.IngressClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *ingressClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.IngressClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.IngressClass{}, f.defaultInformer) } -func (f *ingressClassInformer) Lister() v1.IngressClassLister { - return v1.NewIngressClassLister(f.Informer().GetIndexer()) +func (f *ingressClassInformer) Lister() networkingv1.IngressClassLister { + return networkingv1.NewIngressClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go index a75c9ac21..d4bac2911 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - networkingv1 "k8s.io/api/networking/v1" + apinetworkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/networking/v1" + networkingv1 "k8s.io/client-go/listers/networking/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // NetworkPolicies. type NetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.NetworkPolicyLister + Lister() networkingv1.NetworkPolicyLister } type networkPolicyInformer struct { @@ -71,7 +71,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str return client.NetworkingV1().NetworkPolicies(namespace).Watch(context.TODO(), options) }, }, - &networkingv1.NetworkPolicy{}, + &apinetworkingv1.NetworkPolicy{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, res } func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1.NetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1.NetworkPolicy{}, f.defaultInformer) } -func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister { - return v1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *networkPolicyInformer) Lister() networkingv1.NetworkPolicyLister { + return networkingv1.NewNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go index a1083dbf0..f04c14535 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/ipaddress.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + apinetworkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + networkingv1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IPAddresses. type IPAddressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.IPAddressLister + Lister() networkingv1alpha1.IPAddressLister } type iPAddressInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time return client.NetworkingV1alpha1().IPAddresses().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.IPAddress{}, + &apinetworkingv1alpha1.IPAddress{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.IPAddress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1alpha1.IPAddress{}, f.defaultInformer) } -func (f *iPAddressInformer) Lister() v1alpha1.IPAddressLister { - return v1alpha1.NewIPAddressLister(f.Informer().GetIndexer()) +func (f *iPAddressInformer) Lister() networkingv1alpha1.IPAddressLister { + return networkingv1alpha1.NewIPAddressLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go index 57e602143..86af6d226 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + apinetworkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + networkingv1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ServiceCIDRs. type ServiceCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ServiceCIDRLister + Lister() networkingv1alpha1.ServiceCIDRLister } type serviceCIDRInformer struct { @@ -70,7 +70,7 @@ func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod ti return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.ServiceCIDR{}, + &apinetworkingv1alpha1.ServiceCIDR{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1alpha1.ServiceCIDR{}, f.defaultInformer) } -func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister { - return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) +func (f *serviceCIDRInformer) Lister() networkingv1alpha1.ServiceCIDRLister { + return networkingv1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go index 8800d6c9c..aa337d8e7 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Ingresses. type IngressInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressLister + Lister() networkingv1beta1.IngressLister } type ingressInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r return client.NetworkingV1beta1().Ingresses(namespace).Watch(context.TODO(), options) }, }, - &networkingv1beta1.Ingress{}, + &apinetworkingv1beta1.Ingress{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *ingressInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.Ingress{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.Ingress{}, f.defaultInformer) } -func (f *ingressInformer) Lister() v1beta1.IngressLister { - return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +func (f *ingressInformer) Lister() networkingv1beta1.IngressLister { + return networkingv1beta1.NewIngressLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go index 17864299b..6ff9d5169 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - networkingv1beta1 "k8s.io/api/networking/v1beta1" + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/networking/v1beta1" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // IngressClasses. type IngressClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.IngressClassLister + Lister() networkingv1beta1.IngressClassLister } type ingressClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t return client.NetworkingV1beta1().IngressClasses().Watch(context.TODO(), options) }, }, - &networkingv1beta1.IngressClass{}, + &apinetworkingv1beta1.IngressClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *ingressClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *ingressClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1beta1.IngressClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinetworkingv1beta1.IngressClass{}, f.defaultInformer) } -func (f *ingressClassInformer) Lister() v1beta1.IngressClassLister { - return v1beta1.NewIngressClassLister(f.Informer().GetIndexer()) +func (f *ingressClassInformer) Lister() networkingv1beta1.IngressClassLister { + return networkingv1beta1.NewIngressClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go index 2dcc3129a..974a8fd5b 100644 --- a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/interface.go @@ -24,10 +24,14 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // IPAddresses returns a IPAddressInformer. + IPAddresses() IPAddressInformer // Ingresses returns a IngressInformer. Ingresses() IngressInformer // IngressClasses returns a IngressClassInformer. IngressClasses() IngressClassInformer + // ServiceCIDRs returns a ServiceCIDRInformer. + ServiceCIDRs() ServiceCIDRInformer } type version struct { @@ -41,6 +45,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// IPAddresses returns a IPAddressInformer. +func (v *version) IPAddresses() IPAddressInformer { + return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // Ingresses returns a IngressInformer. func (v *version) Ingresses() IngressInformer { return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -50,3 +59,8 @@ func (v *version) Ingresses() IngressInformer { func (v *version) IngressClasses() IngressClassInformer { return &ingressClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// ServiceCIDRs returns a ServiceCIDRInformer. +func (v *version) ServiceCIDRs() ServiceCIDRInformer { + return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..401ecd7cb --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// IPAddressInformer provides access to a shared informer and lister for +// IPAddresses. +type IPAddressInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkingv1beta1.IPAddressLister +} + +type iPAddressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIPAddressInformer constructs a new informer for IPAddress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().IPAddresses().Watch(context.TODO(), options) + }, + }, + &apinetworkingv1beta1.IPAddress{}, + resyncPeriod, + indexers, + ) +} + +func (f *iPAddressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIPAddressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *iPAddressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkingv1beta1.IPAddress{}, f.defaultInformer) +} + +func (f *iPAddressInformer) Lister() networkingv1beta1.IPAddressLister { + return networkingv1beta1.NewIPAddressLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go new file mode 100644 index 000000000..ff40692f2 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apinetworkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + networkingv1beta1 "k8s.io/client-go/listers/networking/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceCIDRInformer provides access to a shared informer and lister for +// ServiceCIDRs. +type ServiceCIDRInformer interface { + Informer() cache.SharedIndexInformer + Lister() networkingv1beta1.ServiceCIDRLister +} + +type serviceCIDRInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().ServiceCIDRs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1beta1().ServiceCIDRs().Watch(context.TODO(), options) + }, + }, + &apinetworkingv1beta1.ServiceCIDR{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apinetworkingv1beta1.ServiceCIDR{}, f.defaultInformer) +} + +func (f *serviceCIDRInformer) Lister() networkingv1beta1.ServiceCIDRLister { + return networkingv1beta1.NewServiceCIDRLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go index 293f4e2e2..7fef7e332 100644 --- a/constraint/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - nodev1 "k8s.io/api/node/v1" + apinodev1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/node/v1" + nodev1 "k8s.io/client-go/listers/node/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RuntimeClassLister + Lister() nodev1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1.RuntimeClass{}, + &apinodev1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1.RuntimeClassLister { - return v1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1.RuntimeClassLister { + return nodev1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go index d314a9573..aee61406f 100644 --- a/constraint/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - nodev1alpha1 "k8s.io/api/node/v1alpha1" + apinodev1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/node/v1alpha1" + nodev1alpha1 "k8s.io/client-go/listers/node/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RuntimeClassLister + Lister() nodev1alpha1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1alpha1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1alpha1.RuntimeClass{}, + &apinodev1alpha1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1alpha1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1alpha1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1alpha1.RuntimeClassLister { - return v1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1alpha1.RuntimeClassLister { + return nodev1alpha1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go index 07619b230..ab9b8e0ee 100644 --- a/constraint/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - nodev1beta1 "k8s.io/api/node/v1beta1" + apinodev1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/node/v1beta1" + nodev1beta1 "k8s.io/client-go/listers/node/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RuntimeClasses. type RuntimeClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RuntimeClassLister + Lister() nodev1beta1.RuntimeClassLister } type runtimeClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t return client.NodeV1beta1().RuntimeClasses().Watch(context.TODO(), options) }, }, - &nodev1beta1.RuntimeClass{}, + &apinodev1beta1.RuntimeClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *runtimeClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *runtimeClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&nodev1beta1.RuntimeClass{}, f.defaultInformer) + return f.factory.InformerFor(&apinodev1beta1.RuntimeClass{}, f.defaultInformer) } -func (f *runtimeClassInformer) Lister() v1beta1.RuntimeClassLister { - return v1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) +func (f *runtimeClassInformer) Lister() nodev1beta1.RuntimeClassLister { + return nodev1beta1.NewRuntimeClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go index 436598512..baacb59da 100644 --- a/constraint/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - policyv1 "k8s.io/api/policy/v1" + apipolicyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/policy/v1" + policyv1 "k8s.io/client-go/listers/policy/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodDisruptionBudgets. type PodDisruptionBudgetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PodDisruptionBudgetLister + Lister() policyv1.PodDisruptionBudgetLister } type podDisruptionBudgetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa return client.PolicyV1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, - &policyv1.PodDisruptionBudget{}, + &apipolicyv1.PodDisruptionBudget{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interfac } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policyv1.PodDisruptionBudget{}, f.defaultInformer) + return f.factory.InformerFor(&apipolicyv1.PodDisruptionBudget{}, f.defaultInformer) } -func (f *podDisruptionBudgetInformer) Lister() v1.PodDisruptionBudgetLister { - return v1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +func (f *podDisruptionBudgetInformer) Lister() policyv1.PodDisruptionBudgetLister { + return policyv1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go index 4530343ec..081b2e08e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - policyv1beta1 "k8s.io/api/policy/v1beta1" + apipolicyv1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/policy/v1beta1" + policyv1beta1 "k8s.io/client-go/listers/policy/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PodDisruptionBudgets. type PodDisruptionBudgetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PodDisruptionBudgetLister + Lister() policyv1beta1.PodDisruptionBudgetLister } type podDisruptionBudgetInformer struct { @@ -71,7 +71,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(context.TODO(), options) }, }, - &policyv1beta1.PodDisruptionBudget{}, + &apipolicyv1beta1.PodDisruptionBudget{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interfac } func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&policyv1beta1.PodDisruptionBudget{}, f.defaultInformer) + return f.factory.InformerFor(&apipolicyv1beta1.PodDisruptionBudget{}, f.defaultInformer) } -func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister { - return v1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +func (f *podDisruptionBudgetInformer) Lister() policyv1beta1.PodDisruptionBudgetLister { + return policyv1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go index 0572be264..0606fb464 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterRoleLister + Lister() rbacv1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1.ClusterRole{}, + &apirbacv1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister { - return v1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1.ClusterRoleLister { + return rbacv1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go index 51026c055..dca087c9d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ClusterRoleBindingLister + Lister() rbacv1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1.ClusterRoleBinding{}, + &apirbacv1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister { - return v1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1.ClusterRoleBindingLister { + return rbacv1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/role.go index 986a5f29f..66f9c3f23 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/role.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RoleLister + Lister() rbacv1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1.Role{}, + &apirbacv1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1.RoleLister { - return v1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1.RoleLister { + return rbacv1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go index 0264049fb..6d72601a4 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - rbacv1 "k8s.io/api/rbac/v1" + apirbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/rbac/v1" + rbacv1 "k8s.io/client-go/listers/rbac/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.RoleBindingLister + Lister() rbacv1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1.RoleBinding{}, + &apirbacv1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1.RoleBindingLister { - return v1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1.RoleBindingLister { + return rbacv1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go index 70d9885f0..52249f6b4 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterRoleLister + Lister() rbacv1alpha1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1alpha1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1alpha1.ClusterRole{}, + &apirbacv1alpha1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister { - return v1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1alpha1.ClusterRoleLister { + return rbacv1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go index 8c18f6792..c8f7c4c10 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterRoleBindingLister + Lister() rbacv1alpha1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1alpha1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1alpha1.ClusterRoleBinding{}, + &apirbacv1alpha1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister { - return v1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1alpha1.ClusterRoleBindingLister { + return rbacv1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go index 7dc4551d9..dcdddc057 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RoleLister + Lister() rbacv1alpha1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1alpha1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1alpha1.Role{}, + &apirbacv1alpha1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1alpha1.RoleLister { - return v1alpha1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1alpha1.RoleLister { + return rbacv1alpha1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go index d49ec8b36..9184a5baf 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + apirbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.RoleBindingLister + Lister() rbacv1alpha1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1alpha1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1alpha1.RoleBinding{}, + &apirbacv1alpha1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1alpha1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1alpha1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister { - return v1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1alpha1.RoleBindingLister { + return rbacv1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go index e50e1d393..d86dd771a 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoles. type ClusterRoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ClusterRoleLister + Lister() rbacv1beta1.ClusterRoleLister } type clusterRoleInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti return client.RbacV1beta1().ClusterRoles().Watch(context.TODO(), options) }, }, - &rbacv1beta1.ClusterRole{}, + &apirbacv1beta1.ClusterRole{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.ClusterRole{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.ClusterRole{}, f.defaultInformer) } -func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister { - return v1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) +func (f *clusterRoleInformer) Lister() rbacv1beta1.ClusterRoleLister { + return rbacv1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go index a7ea4cd38..70c1cd984 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // ClusterRoleBindings. type ClusterRoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.ClusterRoleBindingLister + Lister() rbacv1beta1.ClusterRoleBindingLister } type clusterRoleBindingInformer struct { @@ -70,7 +70,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe return client.RbacV1beta1().ClusterRoleBindings().Watch(context.TODO(), options) }, }, - &rbacv1beta1.ClusterRoleBinding{}, + &apirbacv1beta1.ClusterRoleBinding{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface } func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.ClusterRoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.ClusterRoleBinding{}, f.defaultInformer) } -func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister { - return v1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +func (f *clusterRoleBindingInformer) Lister() rbacv1beta1.ClusterRoleBindingLister { + return rbacv1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go index e56961e81..2995e1e63 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // Roles. type RoleInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RoleLister + Lister() rbacv1beta1.RoleLister } type roleInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy return client.RbacV1beta1().Roles(namespace).Watch(context.TODO(), options) }, }, - &rbacv1beta1.Role{}, + &apirbacv1beta1.Role{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod } func (f *roleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.Role{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.Role{}, f.defaultInformer) } -func (f *roleInformer) Lister() v1beta1.RoleLister { - return v1beta1.NewRoleLister(f.Informer().GetIndexer()) +func (f *roleInformer) Lister() rbacv1beta1.RoleLister { + return rbacv1beta1.NewRoleLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go index d893882db..11854f38d 100644 --- a/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" + apirbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + rbacv1beta1 "k8s.io/client-go/listers/rbac/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // RoleBindings. type RoleBindingInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.RoleBindingLister + Lister() rbacv1beta1.RoleBindingLister } type roleBindingInformer struct { @@ -71,7 +71,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin return client.RbacV1beta1().RoleBindings(namespace).Watch(context.TODO(), options) }, }, - &rbacv1beta1.RoleBinding{}, + &apirbacv1beta1.RoleBinding{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyn } func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&rbacv1beta1.RoleBinding{}, f.defaultInformer) + return f.factory.InformerFor(&apirbacv1beta1.RoleBinding{}, f.defaultInformer) } -func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister { - return v1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) +func (f *roleBindingInformer) Lister() rbacv1beta1.RoleBindingLister { + return rbacv1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/interface.go b/constraint/vendor/k8s.io/client-go/informers/resource/interface.go index 3fcce8ae9..0d75732af 100644 --- a/constraint/vendor/k8s.io/client-go/informers/resource/interface.go +++ b/constraint/vendor/k8s.io/client-go/informers/resource/interface.go @@ -20,13 +20,16 @@ package resource import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - v1alpha2 "k8s.io/client-go/informers/resource/v1alpha2" + v1alpha3 "k8s.io/client-go/informers/resource/v1alpha3" + v1beta1 "k8s.io/client-go/informers/resource/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { - // V1alpha2 provides access to shared informers for resources in V1alpha2. - V1alpha2() v1alpha2.Interface + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -40,7 +43,12 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1alpha2 returns a new v1alpha2.Interface. -func (g *group) V1alpha2() v1alpha2.Interface { - return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go deleted file mode 100644 index aa4a5ae7d..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/interface.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // PodSchedulingContexts returns a PodSchedulingContextInformer. - PodSchedulingContexts() PodSchedulingContextInformer - // ResourceClaims returns a ResourceClaimInformer. - ResourceClaims() ResourceClaimInformer - // ResourceClaimParameters returns a ResourceClaimParametersInformer. - ResourceClaimParameters() ResourceClaimParametersInformer - // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. - ResourceClaimTemplates() ResourceClaimTemplateInformer - // ResourceClasses returns a ResourceClassInformer. - ResourceClasses() ResourceClassInformer - // ResourceClassParameters returns a ResourceClassParametersInformer. - ResourceClassParameters() ResourceClassParametersInformer - // ResourceSlices returns a ResourceSliceInformer. - ResourceSlices() ResourceSliceInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// PodSchedulingContexts returns a PodSchedulingContextInformer. -func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { - return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// ResourceClaims returns a ResourceClaimInformer. -func (v *version) ResourceClaims() ResourceClaimInformer { - return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// ResourceClaimParameters returns a ResourceClaimParametersInformer. -func (v *version) ResourceClaimParameters() ResourceClaimParametersInformer { - return &resourceClaimParametersInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. -func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { - return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// ResourceClasses returns a ResourceClassInformer. -func (v *version) ResourceClasses() ResourceClassInformer { - return &resourceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// ResourceClassParameters returns a ResourceClassParametersInformer. -func (v *version) ResourceClassParameters() ResourceClassParametersInformer { - return &resourceClassParametersInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// ResourceSlices returns a ResourceSliceInformer. -func (v *version) ResourceSlices() ResourceSliceInformer { - return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index b4aabb376..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// PodSchedulingContextInformer provides access to a shared informer and lister for -// PodSchedulingContexts. -type PodSchedulingContextInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.PodSchedulingContextLister -} - -type podSchedulingContextInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().PodSchedulingContexts(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.PodSchedulingContext{}, - resyncPeriod, - indexers, - ) -} - -func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.PodSchedulingContext{}, f.defaultInformer) -} - -func (f *podSchedulingContextInformer) Lister() v1alpha2.PodSchedulingContextLister { - return v1alpha2.NewPodSchedulingContextLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index 3af936891..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClaimInformer provides access to a shared informer and lister for -// ResourceClaims. -type ResourceClaimInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimLister -} - -type resourceClaimInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClaimInformer constructs a new informer for ResourceClaim type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaims(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaims(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClaim{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaim{}, f.defaultInformer) -} - -func (f *resourceClaimInformer) Lister() v1alpha2.ResourceClaimLister { - return v1alpha2.NewResourceClaimLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index 3064ac9f5..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClaimParametersInformer provides access to a shared informer and lister for -// ResourceClaimParameters. -type ResourceClaimParametersInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimParametersLister -} - -type resourceClaimParametersInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClaimParametersInformer constructs a new informer for ResourceClaimParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClaimParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClaimParametersInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClaimParametersInformer constructs a new informer for ResourceClaimParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClaimParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimParameters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimParameters(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClaimParameters{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClaimParametersInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClaimParametersInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClaimParametersInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimParameters{}, f.defaultInformer) -} - -func (f *resourceClaimParametersInformer) Lister() v1alpha2.ResourceClaimParametersLister { - return v1alpha2.NewResourceClaimParametersLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index 13f4ad835..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClaimTemplateInformer provides access to a shared informer and lister for -// ResourceClaimTemplates. -type ResourceClaimTemplateInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClaimTemplateLister -} - -type resourceClaimTemplateInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClaimTemplate{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClaimTemplate{}, f.defaultInformer) -} - -func (f *resourceClaimTemplateInformer) Lister() v1alpha2.ResourceClaimTemplateLister { - return v1alpha2.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go deleted file mode 100644 index cb76d78fe..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClassInformer provides access to a shared informer and lister for -// ResourceClasses. -type ResourceClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClassLister -} - -type resourceClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewResourceClassInformer constructs a new informer for ResourceClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClassInformer constructs a new informer for ResourceClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClasses().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClasses().Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClass{}, f.defaultInformer) -} - -func (f *resourceClassInformer) Lister() v1alpha2.ResourceClassLister { - return v1alpha2.NewResourceClassLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 71fbefe16..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceClassParametersInformer provides access to a shared informer and lister for -// ResourceClassParameters. -type ResourceClassParametersInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceClassParametersLister -} - -type resourceClassParametersInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewResourceClassParametersInformer constructs a new informer for ResourceClassParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceClassParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceClassParametersInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceClassParametersInformer constructs a new informer for ResourceClassParameters type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceClassParametersInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClassParameters(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceClassParameters(namespace).Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceClassParameters{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceClassParametersInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceClassParametersInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceClassParametersInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceClassParameters{}, f.defaultInformer) -} - -func (f *resourceClassParametersInformer) Lister() v1alpha2.ResourceClassParametersLister { - return v1alpha2.NewResourceClassParametersLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go deleted file mode 100644 index da9d2a024..000000000 --- a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha2/resourceslice.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - time "time" - - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha2 "k8s.io/client-go/listers/resource/v1alpha2" - cache "k8s.io/client-go/tools/cache" -) - -// ResourceSliceInformer provides access to a shared informer and lister for -// ResourceSlices. -type ResourceSliceInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha2.ResourceSliceLister -} - -type resourceSliceInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewResourceSliceInformer constructs a new informer for ResourceSlice type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceSlices().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha2().ResourceSlices().Watch(context.TODO(), options) - }, - }, - &resourcev1alpha2.ResourceSlice{}, - resyncPeriod, - indexers, - ) -} - -func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&resourcev1alpha2.ResourceSlice{}, f.defaultInformer) -} - -func (f *resourceSliceInformer) Lister() v1alpha2.ResourceSliceLister { - return v1alpha2.NewResourceSliceLister(f.Informer().GetIndexer()) -} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..da322c8d0 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + time "time" + + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassInformer provides access to a shared informer and lister for +// DeviceClasses. +type DeviceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1alpha3.DeviceClassLister +} + +type deviceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().DeviceClasses().Watch(context.TODO(), options) + }, + }, + &apiresourcev1alpha3.DeviceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1alpha3.DeviceClass{}, f.defaultInformer) +} + +func (f *deviceClassInformer) Lister() resourcev1alpha3.DeviceClassLister { + return resourcev1alpha3.NewDeviceClassLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go new file mode 100644 index 000000000..356c46179 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/interface.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DeviceClasses returns a DeviceClassInformer. + DeviceClasses() DeviceClassInformer + // ResourceClaims returns a ResourceClaimInformer. + ResourceClaims() ResourceClaimInformer + // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. + ResourceClaimTemplates() ResourceClaimTemplateInformer + // ResourceSlices returns a ResourceSliceInformer. + ResourceSlices() ResourceSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DeviceClasses returns a DeviceClassInformer. +func (v *version) DeviceClasses() DeviceClassInformer { + return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaims returns a ResourceClaimInformer. +func (v *version) ResourceClaims() ResourceClaimInformer { + return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. +func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { + return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceSlices returns a ResourceSliceInformer. +func (v *version) ResourceSlices() ResourceSliceInformer { + return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..822d145bc --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + time "time" + + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimInformer provides access to a shared informer and lister for +// ResourceClaims. +type ResourceClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1alpha3.ResourceClaimLister +} + +type resourceClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceClaims(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceClaims(namespace).Watch(context.TODO(), options) + }, + }, + &apiresourcev1alpha3.ResourceClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceClaim{}, f.defaultInformer) +} + +func (f *resourceClaimInformer) Lister() resourcev1alpha3.ResourceClaimLister { + return resourcev1alpha3.NewResourceClaimLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..94680730a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + time "time" + + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateInformer provides access to a shared informer and lister for +// ResourceClaimTemplates. +type ResourceClaimTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1alpha3.ResourceClaimTemplateLister +} + +type resourceClaimTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + }, + }, + &apiresourcev1alpha3.ResourceClaimTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceClaimTemplate{}, f.defaultInformer) +} + +func (f *resourceClaimTemplateInformer) Lister() resourcev1alpha3.ResourceClaimTemplateLister { + return resourcev1alpha3.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..15394575f --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1alpha3/resourceslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + time "time" + + apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceInformer provides access to a shared informer and lister for +// ResourceSlices. +type ResourceSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1alpha3.ResourceSliceLister +} + +type resourceSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceSlices().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1alpha3().ResourceSlices().Watch(context.TODO(), options) + }, + }, + &apiresourcev1alpha3.ResourceSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1alpha3.ResourceSlice{}, f.defaultInformer) +} + +func (f *resourceSliceInformer) Lister() resourcev1alpha3.ResourceSliceLister { + return resourcev1alpha3.NewResourceSliceLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..9623788c4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassInformer provides access to a shared informer and lister for +// DeviceClasses. +type DeviceClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.DeviceClassLister +} + +type deviceClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDeviceClassInformer constructs a new informer for DeviceClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().DeviceClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().DeviceClasses().Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.DeviceClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *deviceClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeviceClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deviceClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.DeviceClass{}, f.defaultInformer) +} + +func (f *deviceClassInformer) Lister() resourcev1beta1.DeviceClassLister { + return resourcev1beta1.NewDeviceClassLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go new file mode 100644 index 000000000..07330763b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DeviceClasses returns a DeviceClassInformer. + DeviceClasses() DeviceClassInformer + // ResourceClaims returns a ResourceClaimInformer. + ResourceClaims() ResourceClaimInformer + // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. + ResourceClaimTemplates() ResourceClaimTemplateInformer + // ResourceSlices returns a ResourceSliceInformer. + ResourceSlices() ResourceSliceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DeviceClasses returns a DeviceClassInformer. +func (v *version) DeviceClasses() DeviceClassInformer { + return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaims returns a ResourceClaimInformer. +func (v *version) ResourceClaims() ResourceClaimInformer { + return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceClaimTemplates returns a ResourceClaimTemplateInformer. +func (v *version) ResourceClaimTemplates() ResourceClaimTemplateInformer { + return &resourceClaimTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceSlices returns a ResourceSliceInformer. +func (v *version) ResourceSlices() ResourceSliceInformer { + return &resourceSliceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..107b7fda7 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimInformer provides access to a shared informer and lister for +// ResourceClaims. +type ResourceClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.ResourceClaimLister +} + +type resourceClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimInformer constructs a new informer for ResourceClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaims(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaims(namespace).Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.ResourceClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceClaim{}, f.defaultInformer) +} + +func (f *resourceClaimInformer) Lister() resourcev1beta1.ResourceClaimLister { + return resourcev1beta1.NewResourceClaimLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..9ae634ad0 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateInformer provides access to a shared informer and lister for +// ResourceClaimTemplates. +type ResourceClaimTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.ResourceClaimTemplateLister +} + +type resourceClaimTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceClaimTemplateInformer constructs a new informer for ResourceClaimTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaimTemplates(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceClaimTemplates(namespace).Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.ResourceClaimTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceClaimTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceClaimTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceClaimTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceClaimTemplate{}, f.defaultInformer) +} + +func (f *resourceClaimTemplateInformer) Lister() resourcev1beta1.ResourceClaimTemplateLister { + return resourcev1beta1.NewResourceClaimTemplateLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..8ab6cb4fc --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apiresourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + resourcev1beta1 "k8s.io/client-go/listers/resource/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceInformer provides access to a shared informer and lister for +// ResourceSlices. +type ResourceSliceInformer interface { + Informer() cache.SharedIndexInformer + Lister() resourcev1beta1.ResourceSliceLister +} + +type resourceSliceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceSliceInformer constructs a new informer for ResourceSlice type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceSlices().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResourceV1beta1().ResourceSlices().Watch(context.TODO(), options) + }, + }, + &apiresourcev1beta1.ResourceSlice{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceSliceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceSliceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceSliceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiresourcev1beta1.ResourceSlice{}, f.defaultInformer) +} + +func (f *resourceSliceInformer) Lister() resourcev1beta1.ResourceSliceLister { + return resourcev1beta1.NewResourceSliceLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go index 730616b4a..20b9fc0dc 100644 --- a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - schedulingv1 "k8s.io/api/scheduling/v1" + apischedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/scheduling/v1" + schedulingv1 "k8s.io/client-go/listers/scheduling/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PriorityClassLister + Lister() schedulingv1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1.PriorityClass{}, + &apischedulingv1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1.PriorityClassLister { - return v1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1.PriorityClassLister { + return schedulingv1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go index f82b66436..904bc6c4e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + apischedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.PriorityClassLister + Lister() schedulingv1alpha1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1alpha1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1alpha1.PriorityClass{}, + &apischedulingv1alpha1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1alpha1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister { - return v1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1alpha1.PriorityClassLister { + return schedulingv1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go index fc7848891..299d37673 100644 --- a/constraint/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + apischedulingv1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/client-go/listers/scheduling/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityClasses. type PriorityClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.PriorityClassLister + Lister() schedulingv1beta1.PriorityClassLister } type priorityClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod return client.SchedulingV1beta1().PriorityClasses().Watch(context.TODO(), options) }, }, - &schedulingv1beta1.PriorityClass{}, + &apischedulingv1beta1.PriorityClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, res } func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1beta1.PriorityClass{}, f.defaultInformer) + return f.factory.InformerFor(&apischedulingv1beta1.PriorityClass{}, f.defaultInformer) } -func (f *priorityClassInformer) Lister() v1beta1.PriorityClassLister { - return v1beta1.NewPriorityClassLister(f.Informer().GetIndexer()) +func (f *priorityClassInformer) Lister() schedulingv1beta1.PriorityClassLister { + return schedulingv1beta1.NewPriorityClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go index 6fd1e678d..79282873b 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIDrivers. type CSIDriverInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSIDriverLister + Lister() storagev1.CSIDriverLister } type cSIDriverInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time return client.StorageV1().CSIDrivers().Watch(context.TODO(), options) }, }, - &storagev1.CSIDriver{}, + &apistoragev1.CSIDriver{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSIDriver{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSIDriver{}, f.defaultInformer) } -func (f *cSIDriverInformer) Lister() v1.CSIDriverLister { - return v1.NewCSIDriverLister(f.Informer().GetIndexer()) +func (f *cSIDriverInformer) Lister() storagev1.CSIDriverLister { + return storagev1.NewCSIDriverLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csinode.go index 96416967f..00345f897 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csinode.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSINodes. type CSINodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSINodeLister + Lister() storagev1.CSINodeLister } type cSINodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D return client.StorageV1().CSINodes().Watch(context.TODO(), options) }, }, - &storagev1.CSINode{}, + &apistoragev1.CSINode{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSINode{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSINode{}, f.defaultInformer) } -func (f *cSINodeInformer) Lister() v1.CSINodeLister { - return v1.NewCSINodeLister(f.Informer().GetIndexer()) +func (f *cSINodeInformer) Lister() storagev1.CSINodeLister { + return storagev1.NewCSINodeLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go index 9b9095f3a..5a72272fc 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.CSIStorageCapacityLister + Lister() storagev1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1.CSIStorageCapacity{}, + &apistoragev1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1.CSIStorageCapacityLister { - return v1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1.CSIStorageCapacityLister { + return storagev1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go index 8cde79d9a..6eecc50f7 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageClasses. type StorageClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.StorageClassLister + Lister() storagev1.StorageClassLister } type storageClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t return client.StorageV1().StorageClasses().Watch(context.TODO(), options) }, }, - &storagev1.StorageClass{}, + &apistoragev1.StorageClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.StorageClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.StorageClass{}, f.defaultInformer) } -func (f *storageClassInformer) Lister() v1.StorageClassLister { - return v1.NewStorageClassLister(f.Informer().GetIndexer()) +func (f *storageClassInformer) Lister() storagev1.StorageClassLister { + return storagev1.NewStorageClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go index be605ff48..deca09cda 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1 import ( - "context" + context "context" time "time" - storagev1 "k8s.io/api/storage/v1" + apistoragev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/storage/v1" + storagev1 "k8s.io/client-go/listers/storage/v1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.VolumeAttachmentLister + Lister() storagev1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1.VolumeAttachment{}, + &apistoragev1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1.VolumeAttachmentLister { - return v1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1.VolumeAttachmentLister { + return storagev1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go index e59dfab2d..2253f700e 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.CSIStorageCapacityLister + Lister() storagev1alpha1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1alpha1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1alpha1.CSIStorageCapacity{}, + &apistoragev1alpha1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1alpha1.CSIStorageCapacityLister { - return v1alpha1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1alpha1.CSIStorageCapacityLister { + return storagev1alpha1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go index 445496dad..f31989953 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.VolumeAttachmentLister + Lister() storagev1alpha1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1alpha1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1alpha1.VolumeAttachment{}, + &apistoragev1alpha1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister { - return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1alpha1.VolumeAttachmentLister { + return storagev1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go index 5e62e2f42..8a688312a 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + apistoragev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagev1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttributesClasses. type VolumeAttributesClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.VolumeAttributesClassLister + Lister() storagev1alpha1.VolumeAttributesClassLister } type volumeAttributesClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn return client.StorageV1alpha1().VolumeAttributesClasses().Watch(context.TODO(), options) }, }, - &storagev1alpha1.VolumeAttributesClass{}, + &apistoragev1alpha1.VolumeAttributesClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interf } func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.VolumeAttributesClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1alpha1.VolumeAttributesClass{}, f.defaultInformer) } -func (f *volumeAttributesClassInformer) Lister() v1alpha1.VolumeAttributesClassLister { - return v1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +func (f *volumeAttributesClassInformer) Lister() storagev1alpha1.VolumeAttributesClassLister { + return storagev1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go index f138a915b..f538deed5 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIDrivers. type CSIDriverInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSIDriverLister + Lister() storagev1beta1.CSIDriverLister } type cSIDriverInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options) }, }, - &storagev1beta1.CSIDriver{}, + &apistoragev1beta1.CSIDriver{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncP } func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSIDriver{}, f.defaultInformer) } -func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister { - return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) +func (f *cSIDriverInformer) Lister() storagev1beta1.CSIDriverLister { + return storagev1beta1.NewCSIDriverLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go index 6ba63172a..5d26cffdc 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSINodes. type CSINodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSINodeLister + Lister() storagev1beta1.CSINodeLister } type cSINodeInformer struct { @@ -70,7 +70,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D return client.StorageV1beta1().CSINodes().Watch(context.TODO(), options) }, }, - &storagev1beta1.CSINode{}, + &apistoragev1beta1.CSINode{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *cSINodeInformer) defaultInformer(client kubernetes.Interface, resyncPer } func (f *cSINodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSINode{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSINode{}, f.defaultInformer) } -func (f *cSINodeInformer) Lister() v1beta1.CSINodeLister { - return v1beta1.NewCSINodeLister(f.Informer().GetIndexer()) +func (f *cSINodeInformer) Lister() storagev1beta1.CSINodeLister { + return storagev1beta1.NewCSINodeLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go index 8f0cc4668..9ad42e9f8 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // CSIStorageCapacities. type CSIStorageCapacityInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.CSIStorageCapacityLister + Lister() storagev1beta1.CSIStorageCapacityLister } type cSIStorageCapacityInformer struct { @@ -71,7 +71,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac return client.StorageV1beta1().CSIStorageCapacities(namespace).Watch(context.TODO(), options) }, }, - &storagev1beta1.CSIStorageCapacity{}, + &apistoragev1beta1.CSIStorageCapacity{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *cSIStorageCapacityInformer) defaultInformer(client kubernetes.Interface } func (f *cSIStorageCapacityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.CSIStorageCapacity{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.CSIStorageCapacity{}, f.defaultInformer) } -func (f *cSIStorageCapacityInformer) Lister() v1beta1.CSIStorageCapacityLister { - return v1beta1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) +func (f *cSIStorageCapacityInformer) Lister() storagev1beta1.CSIStorageCapacityLister { + return storagev1beta1.NewCSIStorageCapacityLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go index 77b77c08e..743395185 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -34,6 +34,8 @@ type Interface interface { StorageClasses() StorageClassInformer // VolumeAttachments returns a VolumeAttachmentInformer. VolumeAttachments() VolumeAttachmentInformer + // VolumeAttributesClasses returns a VolumeAttributesClassInformer. + VolumeAttributesClasses() VolumeAttributesClassInformer } type version struct { @@ -71,3 +73,8 @@ func (v *version) StorageClasses() StorageClassInformer { func (v *version) VolumeAttachments() VolumeAttachmentInformer { return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// VolumeAttributesClasses returns a VolumeAttributesClassInformer. +func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer { + return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go index a6582bf3d..2d8649e9b 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageClasses. type StorageClassInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.StorageClassLister + Lister() storagev1beta1.StorageClassLister } type storageClassInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t return client.StorageV1beta1().StorageClasses().Watch(context.TODO(), options) }, }, - &storagev1beta1.StorageClass{}, + &apistoragev1beta1.StorageClass{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resy } func (f *storageClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.StorageClass{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.StorageClass{}, f.defaultInformer) } -func (f *storageClassInformer) Lister() v1beta1.StorageClassLister { - return v1beta1.NewStorageClassLister(f.Informer().GetIndexer()) +func (f *storageClassInformer) Lister() storagev1beta1.StorageClassLister { + return storagev1beta1.NewStorageClassLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go index e89424634..93d382693 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go @@ -19,16 +19,16 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" time "time" - storagev1beta1 "k8s.io/api/storage/v1beta1" + apistoragev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // VolumeAttachments. type VolumeAttachmentInformer interface { Informer() cache.SharedIndexInformer - Lister() v1beta1.VolumeAttachmentLister + Lister() storagev1beta1.VolumeAttachmentLister } type volumeAttachmentInformer struct { @@ -70,7 +70,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri return client.StorageV1beta1().VolumeAttachments().Watch(context.TODO(), options) }, }, - &storagev1beta1.VolumeAttachment{}, + &apistoragev1beta1.VolumeAttachment{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, } func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1beta1.VolumeAttachment{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragev1beta1.VolumeAttachment{}, f.defaultInformer) } -func (f *volumeAttachmentInformer) Lister() v1beta1.VolumeAttachmentLister { - return v1beta1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +func (f *volumeAttachmentInformer) Lister() storagev1beta1.VolumeAttachmentLister { + return storagev1beta1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..dd9734bdc --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + time "time" + + apistoragev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + storagev1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassInformer provides access to a shared informer and lister for +// VolumeAttributesClasses. +type VolumeAttributesClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() storagev1beta1.VolumeAttributesClassLister +} + +type volumeAttributesClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().VolumeAttributesClasses().Watch(context.TODO(), options) + }, + }, + &apistoragev1beta1.VolumeAttributesClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apistoragev1beta1.VolumeAttributesClass{}, f.defaultInformer) +} + +func (f *volumeAttributesClassInformer) Lister() storagev1beta1.VolumeAttributesClassLister { + return storagev1beta1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) +} diff --git a/constraint/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go b/constraint/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go index 70e7c7279..49d6dd2e5 100644 --- a/constraint/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go +++ b/constraint/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go @@ -19,16 +19,16 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + apistoragemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // StorageVersionMigrations. type StorageVersionMigrationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.StorageVersionMigrationLister + Lister() storagemigrationv1alpha1.StorageVersionMigrationLister } type storageVersionMigrationInformer struct { @@ -70,7 +70,7 @@ func NewFilteredStorageVersionMigrationInformer(client kubernetes.Interface, res return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(context.TODO(), options) }, }, - &storagemigrationv1alpha1.StorageVersionMigration{}, + &apistoragemigrationv1alpha1.StorageVersionMigration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *storageVersionMigrationInformer) defaultInformer(client kubernetes.Inte } func (f *storageVersionMigrationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer) + return f.factory.InformerFor(&apistoragemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer) } -func (f *storageVersionMigrationInformer) Lister() v1alpha1.StorageVersionMigrationLister { - return v1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer()) +func (f *storageVersionMigrationInformer) Lister() storagemigrationv1alpha1.StorageVersionMigrationLister { + return storagemigrationv1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer()) } diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/clientset.go b/constraint/vendor/k8s.io/client-go/kubernetes/clientset.go index eaa206ff6..a6dbc23a9 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -19,8 +19,8 @@ limitations under the License. package kubernetes import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" discovery "k8s.io/client-go/discovery" admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" @@ -45,6 +45,7 @@ import ( certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1alpha2 "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" @@ -67,7 +68,8 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" + resourcev1alpha3 "k8s.io/client-go/kubernetes/typed/resource/v1alpha3" + resourcev1beta1 "k8s.io/client-go/kubernetes/typed/resource/v1beta1" schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" @@ -102,6 +104,7 @@ type Interface interface { CertificatesV1() certificatesv1.CertificatesV1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface + CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface @@ -125,7 +128,8 @@ type Interface interface { RbacV1() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface + ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface + ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface SchedulingV1() schedulingv1.SchedulingV1Interface @@ -159,6 +163,7 @@ type Clientset struct { certificatesV1 *certificatesv1.CertificatesV1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + coordinationV1alpha2 *coordinationv1alpha2.CoordinationV1alpha2Client coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client coordinationV1 *coordinationv1.CoordinationV1Client coreV1 *corev1.CoreV1Client @@ -182,7 +187,8 @@ type Clientset struct { rbacV1 *rbacv1.RbacV1Client rbacV1beta1 *rbacv1beta1.RbacV1beta1Client rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - resourceV1alpha2 *resourcev1alpha2.ResourceV1alpha2Client + resourceV1beta1 *resourcev1beta1.ResourceV1beta1Client + resourceV1alpha3 *resourcev1alpha3.ResourceV1alpha3Client schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client schedulingV1 *schedulingv1.SchedulingV1Client @@ -297,6 +303,11 @@ func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1al return c.certificatesV1alpha1 } +// CoordinationV1alpha2 retrieves the CoordinationV1alpha2Client +func (c *Clientset) CoordinationV1alpha2() coordinationv1alpha2.CoordinationV1alpha2Interface { + return c.coordinationV1alpha2 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 @@ -412,9 +423,14 @@ func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return c.rbacV1alpha1 } -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return c.resourceV1alpha2 +// ResourceV1beta1 retrieves the ResourceV1beta1Client +func (c *Clientset) ResourceV1beta1() resourcev1beta1.ResourceV1beta1Interface { + return c.resourceV1beta1 +} + +// ResourceV1alpha3 retrieves the ResourceV1alpha3Client +func (c *Clientset) ResourceV1alpha3() resourcev1alpha3.ResourceV1alpha3Interface { + return c.resourceV1alpha3 } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client @@ -580,6 +596,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.coordinationV1alpha2, err = coordinationv1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -672,7 +692,11 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.resourceV1alpha2, err = resourcev1alpha2.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.resourceV1beta1, err = resourcev1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.resourceV1alpha3, err = resourcev1alpha3.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -746,6 +770,7 @@ func New(c rest.Interface) *Clientset { cs.certificatesV1 = certificatesv1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) + cs.coordinationV1alpha2 = coordinationv1alpha2.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) @@ -769,7 +794,8 @@ func New(c rest.Interface) *Clientset { cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.resourceV1alpha2 = resourcev1alpha2.New(c) + cs.resourceV1beta1 = resourcev1beta1.New(c) + cs.resourceV1alpha3 = resourcev1alpha3.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) cs.schedulingV1 = schedulingv1.New(c) diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/constraint/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go deleted file mode 100644 index a62b8f7c4..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ /dev/null @@ -1,442 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - clientset "k8s.io/client-go/kubernetes" - admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" - fakeadmissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake" - admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" - fakeadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake" - admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" - fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake" - internalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" - fakeinternalv1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake" - appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - fakeappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1/fake" - appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" - appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake" - authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" - fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" - authenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" - fakeauthenticationv1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake" - authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - fakeauthenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake" - authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" - fakeauthorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1/fake" - authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" - fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake" - autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake" - autoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" - fakeautoscalingv2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake" - autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" - fakeautoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake" - autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" - fakeautoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake" - batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" - fakebatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1/fake" - batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - fakebatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake" - certificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1" - fakecertificatesv1 "k8s.io/client-go/kubernetes/typed/certificates/v1/fake" - certificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" - fakecertificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake" - certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - fakecoordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1/fake" - coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" - fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" - discoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1" - fakediscoveryv1 "k8s.io/client-go/kubernetes/typed/discovery/v1/fake" - discoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" - fakediscoveryv1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake" - eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" - fakeeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1/fake" - eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" - fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake" - extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" - flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" - fakeflowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake" - flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" - fakeflowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake" - flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" - fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake" - flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" - fakeflowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake" - networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" - fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" - networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" - fakenetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake" - networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" - fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake" - nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" - fakenodev1 "k8s.io/client-go/kubernetes/typed/node/v1/fake" - nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" - fakenodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake" - nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" - fakenodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1/fake" - policyv1 "k8s.io/client-go/kubernetes/typed/policy/v1" - fakepolicyv1 "k8s.io/client-go/kubernetes/typed/policy/v1/fake" - policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" - fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" - rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" - fakerbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1/fake" - rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" - fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" - rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" - resourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" - fakeresourcev1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake" - schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" - fakeschedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1/fake" - schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" - fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake" - schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - fakeschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake" - storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" - fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" - storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" - fakestoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake" - storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" - fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake" - storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" - fakestoragemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake" - "k8s.io/client-go/testing" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{tracker: o} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - -var ( - _ clientset.Interface = &Clientset{} - _ testing.FakeClient = &Clientset{} -) - -// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client -func (c *Clientset) AdmissionregistrationV1() admissionregistrationv1.AdmissionregistrationV1Interface { - return &fakeadmissionregistrationv1.FakeAdmissionregistrationV1{Fake: &c.Fake} -} - -// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client -func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return &fakeadmissionregistrationv1alpha1.FakeAdmissionregistrationV1alpha1{Fake: &c.Fake} -} - -// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client -func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake} -} - -// InternalV1alpha1 retrieves the InternalV1alpha1Client -func (c *Clientset) InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface { - return &fakeinternalv1alpha1.FakeInternalV1alpha1{Fake: &c.Fake} -} - -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return &fakeappsv1.FakeAppsV1{Fake: &c.Fake} -} - -// AppsV1beta1 retrieves the AppsV1beta1Client -func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { - return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} -} - -// AppsV1beta2 retrieves the AppsV1beta2Client -func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { - return &fakeappsv1beta2.FakeAppsV1beta2{Fake: &c.Fake} -} - -// AuthenticationV1 retrieves the AuthenticationV1Client -func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { - return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} -} - -// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client -func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { - return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake} -} - -// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client -func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { - return &fakeauthenticationv1beta1.FakeAuthenticationV1beta1{Fake: &c.Fake} -} - -// AuthorizationV1 retrieves the AuthorizationV1Client -func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { - return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} -} - -// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client -func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { - return &fakeauthorizationv1beta1.FakeAuthorizationV1beta1{Fake: &c.Fake} -} - -// AutoscalingV1 retrieves the AutoscalingV1Client -func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { - return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} -} - -// AutoscalingV2 retrieves the AutoscalingV2Client -func (c *Clientset) AutoscalingV2() autoscalingv2.AutoscalingV2Interface { - return &fakeautoscalingv2.FakeAutoscalingV2{Fake: &c.Fake} -} - -// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client -func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { - return &fakeautoscalingv2beta1.FakeAutoscalingV2beta1{Fake: &c.Fake} -} - -// AutoscalingV2beta2 retrieves the AutoscalingV2beta2Client -func (c *Clientset) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface { - return &fakeautoscalingv2beta2.FakeAutoscalingV2beta2{Fake: &c.Fake} -} - -// BatchV1 retrieves the BatchV1Client -func (c *Clientset) BatchV1() batchv1.BatchV1Interface { - return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} -} - -// BatchV1beta1 retrieves the BatchV1beta1Client -func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { - return &fakebatchv1beta1.FakeBatchV1beta1{Fake: &c.Fake} -} - -// CertificatesV1 retrieves the CertificatesV1Client -func (c *Clientset) CertificatesV1() certificatesv1.CertificatesV1Interface { - return &fakecertificatesv1.FakeCertificatesV1{Fake: &c.Fake} -} - -// CertificatesV1beta1 retrieves the CertificatesV1beta1Client -func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { - return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} -} - -// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client -func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { - return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} -} - -// CoordinationV1beta1 retrieves the CoordinationV1beta1Client -func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { - return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake} -} - -// CoordinationV1 retrieves the CoordinationV1Client -func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { - return &fakecoordinationv1.FakeCoordinationV1{Fake: &c.Fake} -} - -// CoreV1 retrieves the CoreV1Client -func (c *Clientset) CoreV1() corev1.CoreV1Interface { - return &fakecorev1.FakeCoreV1{Fake: &c.Fake} -} - -// DiscoveryV1 retrieves the DiscoveryV1Client -func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface { - return &fakediscoveryv1.FakeDiscoveryV1{Fake: &c.Fake} -} - -// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client -func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface { - return &fakediscoveryv1beta1.FakeDiscoveryV1beta1{Fake: &c.Fake} -} - -// EventsV1 retrieves the EventsV1Client -func (c *Clientset) EventsV1() eventsv1.EventsV1Interface { - return &fakeeventsv1.FakeEventsV1{Fake: &c.Fake} -} - -// EventsV1beta1 retrieves the EventsV1beta1Client -func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { - return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake} -} - -// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client -func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { - return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} -} - -// FlowcontrolV1 retrieves the FlowcontrolV1Client -func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { - return &fakeflowcontrolv1.FakeFlowcontrolV1{Fake: &c.Fake} -} - -// FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client -func (c *Clientset) FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface { - return &fakeflowcontrolv1beta1.FakeFlowcontrolV1beta1{Fake: &c.Fake} -} - -// FlowcontrolV1beta2 retrieves the FlowcontrolV1beta2Client -func (c *Clientset) FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface { - return &fakeflowcontrolv1beta2.FakeFlowcontrolV1beta2{Fake: &c.Fake} -} - -// FlowcontrolV1beta3 retrieves the FlowcontrolV1beta3Client -func (c *Clientset) FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface { - return &fakeflowcontrolv1beta3.FakeFlowcontrolV1beta3{Fake: &c.Fake} -} - -// NetworkingV1 retrieves the NetworkingV1Client -func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { - return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} -} - -// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client -func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { - return &fakenetworkingv1alpha1.FakeNetworkingV1alpha1{Fake: &c.Fake} -} - -// NetworkingV1beta1 retrieves the NetworkingV1beta1Client -func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { - return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} -} - -// NodeV1 retrieves the NodeV1Client -func (c *Clientset) NodeV1() nodev1.NodeV1Interface { - return &fakenodev1.FakeNodeV1{Fake: &c.Fake} -} - -// NodeV1alpha1 retrieves the NodeV1alpha1Client -func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { - return &fakenodev1alpha1.FakeNodeV1alpha1{Fake: &c.Fake} -} - -// NodeV1beta1 retrieves the NodeV1beta1Client -func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { - return &fakenodev1beta1.FakeNodeV1beta1{Fake: &c.Fake} -} - -// PolicyV1 retrieves the PolicyV1Client -func (c *Clientset) PolicyV1() policyv1.PolicyV1Interface { - return &fakepolicyv1.FakePolicyV1{Fake: &c.Fake} -} - -// PolicyV1beta1 retrieves the PolicyV1beta1Client -func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { - return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} -} - -// RbacV1 retrieves the RbacV1Client -func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { - return &fakerbacv1.FakeRbacV1{Fake: &c.Fake} -} - -// RbacV1beta1 retrieves the RbacV1beta1Client -func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { - return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} -} - -// RbacV1alpha1 retrieves the RbacV1alpha1Client -func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { - return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} -} - -// ResourceV1alpha2 retrieves the ResourceV1alpha2Client -func (c *Clientset) ResourceV1alpha2() resourcev1alpha2.ResourceV1alpha2Interface { - return &fakeresourcev1alpha2.FakeResourceV1alpha2{Fake: &c.Fake} -} - -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} -} - -// SchedulingV1beta1 retrieves the SchedulingV1beta1Client -func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { - return &fakeschedulingv1beta1.FakeSchedulingV1beta1{Fake: &c.Fake} -} - -// SchedulingV1 retrieves the SchedulingV1Client -func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { - return &fakeschedulingv1.FakeSchedulingV1{Fake: &c.Fake} -} - -// StorageV1beta1 retrieves the StorageV1beta1Client -func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { - return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} -} - -// StorageV1 retrieves the StorageV1Client -func (c *Clientset) StorageV1() storagev1.StorageV1Interface { - return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} -} - -// StorageV1alpha1 retrieves the StorageV1alpha1Client -func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { - return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake} -} - -// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client -func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface { - return &fakestoragemigrationv1alpha1.FakeStoragemigrationV1alpha1{Fake: &c.Fake} -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/fake/doc.go deleted file mode 100644 index 9b99e7167..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/fake/register.go b/constraint/vendor/k8s.io/client-go/kubernetes/fake/register.go deleted file mode 100644 index 339983fe0..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/fake/register.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - internalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" - appsv1beta2 "k8s.io/api/apps/v1beta2" - authenticationv1 "k8s.io/api/authentication/v1" - authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" - authenticationv1beta1 "k8s.io/api/authentication/v1beta1" - authorizationv1 "k8s.io/api/authorization/v1" - authorizationv1beta1 "k8s.io/api/authorization/v1beta1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - autoscalingv2 "k8s.io/api/autoscaling/v2" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" - autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - certificatesv1 "k8s.io/api/certificates/v1" - certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1 "k8s.io/api/coordination/v1" - coordinationv1beta1 "k8s.io/api/coordination/v1beta1" - corev1 "k8s.io/api/core/v1" - discoveryv1 "k8s.io/api/discovery/v1" - discoveryv1beta1 "k8s.io/api/discovery/v1beta1" - eventsv1 "k8s.io/api/events/v1" - eventsv1beta1 "k8s.io/api/events/v1beta1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" - flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" - flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" - flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" - networkingv1 "k8s.io/api/networking/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - networkingv1beta1 "k8s.io/api/networking/v1beta1" - nodev1 "k8s.io/api/node/v1" - nodev1alpha1 "k8s.io/api/node/v1alpha1" - nodev1beta1 "k8s.io/api/node/v1beta1" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - rbacv1 "k8s.io/api/rbac/v1" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" - schedulingv1 "k8s.io/api/scheduling/v1" - schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" - schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" - storagev1beta1 "k8s.io/api/storage/v1beta1" - storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) - -var localSchemeBuilder = runtime.SchemeBuilder{ - admissionregistrationv1.AddToScheme, - admissionregistrationv1alpha1.AddToScheme, - admissionregistrationv1beta1.AddToScheme, - internalv1alpha1.AddToScheme, - appsv1.AddToScheme, - appsv1beta1.AddToScheme, - appsv1beta2.AddToScheme, - authenticationv1.AddToScheme, - authenticationv1alpha1.AddToScheme, - authenticationv1beta1.AddToScheme, - authorizationv1.AddToScheme, - authorizationv1beta1.AddToScheme, - autoscalingv1.AddToScheme, - autoscalingv2.AddToScheme, - autoscalingv2beta1.AddToScheme, - autoscalingv2beta2.AddToScheme, - batchv1.AddToScheme, - batchv1beta1.AddToScheme, - certificatesv1.AddToScheme, - certificatesv1beta1.AddToScheme, - certificatesv1alpha1.AddToScheme, - coordinationv1beta1.AddToScheme, - coordinationv1.AddToScheme, - corev1.AddToScheme, - discoveryv1.AddToScheme, - discoveryv1beta1.AddToScheme, - eventsv1.AddToScheme, - eventsv1beta1.AddToScheme, - extensionsv1beta1.AddToScheme, - flowcontrolv1.AddToScheme, - flowcontrolv1beta1.AddToScheme, - flowcontrolv1beta2.AddToScheme, - flowcontrolv1beta3.AddToScheme, - networkingv1.AddToScheme, - networkingv1alpha1.AddToScheme, - networkingv1beta1.AddToScheme, - nodev1.AddToScheme, - nodev1alpha1.AddToScheme, - nodev1beta1.AddToScheme, - policyv1.AddToScheme, - policyv1beta1.AddToScheme, - rbacv1.AddToScheme, - rbacv1beta1.AddToScheme, - rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, - schedulingv1alpha1.AddToScheme, - schedulingv1beta1.AddToScheme, - schedulingv1.AddToScheme, - storagev1beta1.AddToScheme, - storagev1.AddToScheme, - storagev1alpha1.AddToScheme, - storagemigrationv1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/constraint/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8ebfb7cea..a9a5d8eb7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -41,6 +41,7 @@ import ( certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -63,7 +64,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + resourcev1beta1 "k8s.io/api/resource/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -103,6 +105,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ certificatesv1.AddToScheme, certificatesv1beta1.AddToScheme, certificatesv1alpha1.AddToScheme, + coordinationv1alpha2.AddToScheme, coordinationv1beta1.AddToScheme, coordinationv1.AddToScheme, corev1.AddToScheme, @@ -126,7 +129,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, - resourcev1alpha2.AddToScheme, + resourcev1beta1.AddToScheme, + resourcev1alpha3.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, schedulingv1.AddToScheme, diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go index a81b2b682..74d2967f6 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/admissionregistration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := admissionregistrationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go deleted file mode 100644 index b7487c2fb..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_admissionregistration_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAdmissionregistrationV1 struct { - *testing.Fake -} - -func (c *FakeAdmissionregistrationV1) MutatingWebhookConfigurations() v1.MutatingWebhookConfigurationInterface { - return &FakeMutatingWebhookConfigurations{c} -} - -func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicies() v1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} -} - -func (c *FakeAdmissionregistrationV1) ValidatingAdmissionPolicyBindings() v1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} -} - -func (c *FakeAdmissionregistrationV1) ValidatingWebhookConfigurations() v1.ValidatingWebhookConfigurationInterface { - return &FakeValidatingWebhookConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAdmissionregistrationV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go deleted file mode 100644 index b88598b71..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_mutatingwebhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" -) - -// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type FakeMutatingWebhookConfigurations struct { - Fake *FakeAdmissionregistrationV1 -} - -var mutatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") - -var mutatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1.MutatingWebhookConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1.MutatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1.MutatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1.MutatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.MutatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.MutatingWebhookConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go deleted file mode 100644 index c947e6572..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { - Fake *FakeAdmissionregistrationV1 -} - -var validatingadmissionpoliciesResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1.ValidatingAdmissionPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicy), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go deleted file mode 100644 index 9ace73593..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { - Fake *FakeAdmissionregistrationV1 -} - -var validatingadmissionpolicybindingsResource = v1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1.ValidatingAdmissionPolicyBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go deleted file mode 100644 index a6951c736..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingwebhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/admissionregistration/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type FakeValidatingWebhookConfigurations struct { - Fake *FakeAdmissionregistrationV1 -} - -var validatingwebhookconfigurationsResource = v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") - -var validatingwebhookconfigurationsKind = v1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1.ValidatingWebhookConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1.ValidatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1.ValidatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1.ValidatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ValidatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ValidatingWebhookConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go index edbc826d1..d46a3c987 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -41,157 +38,38 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error) - Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) - Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) + Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1.MutatingWebhookConfiguration, *admissionregistrationv1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1.MutatingWebhookConfiguration { + return &admissionregistrationv1.MutatingWebhookConfiguration{} + }, + func() *admissionregistrationv1.MutatingWebhookConfigurationList { + return &admissionregistrationv1.MutatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.MutatingWebhookConfiguration](), + ), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) { - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go index 0b0b05acd..2d56ab168 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -41,203 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicy, *admissionregistrationv1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1.ValidatingAdmissionPolicy { + return &admissionregistrationv1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicy](), + ), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1.ValidatingAdmissionPolicy, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicy, err error) { - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go index 83a8ef163..d3eaa0d2d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -41,157 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*v1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingAdmissionPolicyBinding](), + ), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.CreateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1.ValidatingAdmissionPolicyBinding, opts metav1.UpdateOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go index 065e3c834..f8f60f681 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + applyconfigurationsadmissionregistrationv1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -41,157 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error) - Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*admissionregistrationv1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) - Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) + Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *admissionregistrationv1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1.ValidatingWebhookConfiguration, *admissionregistrationv1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1.ValidatingWebhookConfiguration { + return &admissionregistrationv1.ValidatingWebhookConfiguration{} + }, + func() *admissionregistrationv1.ValidatingWebhookConfigurationList { + return &admissionregistrationv1.ValidatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1.ValidatingWebhookConfiguration](), + ), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) { - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index f6102d25a..f8a67c6d8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -19,15 +19,17 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface + MutatingAdmissionPoliciesGetter + MutatingAdmissionPolicyBindingsGetter ValidatingAdmissionPoliciesGetter ValidatingAdmissionPolicyBindingsGetter } @@ -37,6 +39,14 @@ type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } +func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface { + return newMutatingAdmissionPolicies(c) +} + +func (c *AdmissionregistrationV1alpha1Client) MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface { + return newMutatingAdmissionPolicyBindings(c) +} + func (c *AdmissionregistrationV1alpha1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface { return newValidatingAdmissionPolicies(c) } @@ -90,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := admissionregistrationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go deleted file mode 100644 index dc0e30ca4..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAdmissionregistrationV1alpha1 struct { - *testing.Fake -} - -func (c *FakeAdmissionregistrationV1alpha1) ValidatingAdmissionPolicies() v1alpha1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} -} - -func (c *FakeAdmissionregistrationV1alpha1) ValidatingAdmissionPolicyBindings() v1alpha1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAdmissionregistrationV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go deleted file mode 100644 index f4358ce46..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicy.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { - Fake *FakeAdmissionregistrationV1alpha1 -} - -var validatingadmissionpoliciesResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1alpha1.ValidatingAdmissionPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1alpha1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1alpha1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1alpha1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go deleted file mode 100644 index c520655f9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_validatingadmissionpolicybinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { - Fake *FakeAdmissionregistrationV1alpha1 -} - -var validatingadmissionpolicybindingsResource = v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1alpha1.ValidatingAdmissionPolicyBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1alpha1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index 94562da59..676578c63 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1alpha1 +type MutatingAdmissionPolicyExpansion interface{} + +type MutatingAdmissionPolicyBindingExpansion interface{} + type ValidatingAdmissionPolicyExpansion interface{} type ValidatingAdmissionPolicyBindingExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..4a781a602 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// MutatingAdmissionPoliciesGetter has a method to return a MutatingAdmissionPolicyInterface. +// A group's client should implement this interface. +type MutatingAdmissionPoliciesGetter interface { + MutatingAdmissionPolicies() MutatingAdmissionPolicyInterface +} + +// MutatingAdmissionPolicyInterface has methods to work with MutatingAdmissionPolicy resources. +type MutatingAdmissionPolicyInterface interface { + Create(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + Update(ctx context.Context, mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + Apply(ctx context.Context, mutatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + MutatingAdmissionPolicyExpansion +} + +// mutatingAdmissionPolicies implements MutatingAdmissionPolicyInterface +type mutatingAdmissionPolicies struct { + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration] +} + +// newMutatingAdmissionPolicies returns a MutatingAdmissionPolicies +func newMutatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicies { + return &mutatingAdmissionPolicies{ + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicy, *admissionregistrationv1alpha1.MutatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyApplyConfiguration]( + "mutatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicy { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicy{} + }, + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyList { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicy](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..78057e200 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + context "context" + + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// MutatingAdmissionPolicyBindingsGetter has a method to return a MutatingAdmissionPolicyBindingInterface. +// A group's client should implement this interface. +type MutatingAdmissionPolicyBindingsGetter interface { + MutatingAdmissionPolicyBindings() MutatingAdmissionPolicyBindingInterface +} + +// MutatingAdmissionPolicyBindingInterface has methods to work with MutatingAdmissionPolicyBinding resources. +type MutatingAdmissionPolicyBindingInterface interface { + Create(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + Update(ctx context.Context, mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, mutatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + MutatingAdmissionPolicyBindingExpansion +} + +// mutatingAdmissionPolicyBindings implements MutatingAdmissionPolicyBindingInterface +type mutatingAdmissionPolicyBindings struct { + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration] +} + +// newMutatingAdmissionPolicyBindings returns a MutatingAdmissionPolicyBindings +func newMutatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *mutatingAdmissionPolicyBindings { + return &mutatingAdmissionPolicyBindings{ + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.MutatingAdmissionPolicyBindingApplyConfiguration]( + "mutatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList { + return &admissionregistrationv1alpha1.MutatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go index 1d994b5ab..ce2328b12 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -41,203 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicy { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy](), + ), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1alpha1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1alpha1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 39823ca82..6236ea90c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + applyconfigurationsadmissionregistrationv1alpha1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -41,157 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1alpha1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding](), + ), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1alpha1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1alpha1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go index 5a0a17d9b..16c42b0ec 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AdmissionregistrationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := admissionregistrationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go deleted file mode 100644 index badfbf034..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAdmissionregistrationV1beta1 struct { - *testing.Fake -} - -func (c *FakeAdmissionregistrationV1beta1) MutatingWebhookConfigurations() v1beta1.MutatingWebhookConfigurationInterface { - return &FakeMutatingWebhookConfigurations{c} -} - -func (c *FakeAdmissionregistrationV1beta1) ValidatingAdmissionPolicies() v1beta1.ValidatingAdmissionPolicyInterface { - return &FakeValidatingAdmissionPolicies{c} -} - -func (c *FakeAdmissionregistrationV1beta1) ValidatingAdmissionPolicyBindings() v1beta1.ValidatingAdmissionPolicyBindingInterface { - return &FakeValidatingAdmissionPolicyBindings{c} -} - -func (c *FakeAdmissionregistrationV1beta1) ValidatingWebhookConfigurations() v1beta1.ValidatingWebhookConfigurationInterface { - return &FakeValidatingWebhookConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAdmissionregistrationV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go deleted file mode 100644 index 9d85aff37..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type FakeMutatingWebhookConfigurations struct { - Fake *FakeAdmissionregistrationV1beta1 -} - -var mutatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations") - -var mutatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration") - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *FakeMutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *FakeMutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.MutatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.MutatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *FakeMutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *FakeMutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeMutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(mutatingwebhookconfigurationsResource, name, opts), &v1beta1.MutatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *FakeMutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.MutatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.MutatingWebhookConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go deleted file mode 100644 index 90cb4ff6c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicy.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface -type FakeValidatingAdmissionPolicies struct { - Fake *FakeAdmissionregistrationV1beta1 -} - -var validatingadmissionpoliciesResource = v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies") - -var validatingadmissionpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicy") - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpoliciesResource, name), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *FakeValidatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpoliciesResource, validatingadmissionpoliciesKind, opts), &v1beta1.ValidatingAdmissionPolicyList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingAdmissionPolicyList{ListMeta: obj.(*v1beta1.ValidatingAdmissionPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingAdmissionPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *FakeValidatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpoliciesResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpoliciesResource, validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeValidatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(validatingadmissionpoliciesResource, "status", validatingAdmissionPolicy), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpoliciesResource, name, opts), &v1beta1.ValidatingAdmissionPolicy{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpoliciesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *FakeValidatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeValidatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpoliciesResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.ValidatingAdmissionPolicy{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go deleted file mode 100644 index f771f81f3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingadmissionpolicybinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface -type FakeValidatingAdmissionPolicyBindings struct { - Fake *FakeAdmissionregistrationV1beta1 -} - -var validatingadmissionpolicybindingsResource = v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings") - -var validatingadmissionpolicybindingsKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingAdmissionPolicyBinding") - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingadmissionpolicybindingsResource, name), &v1beta1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *FakeValidatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingadmissionpolicybindingsResource, validatingadmissionpolicybindingsKind, opts), &v1beta1.ValidatingAdmissionPolicyBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingAdmissionPolicyBindingList{ListMeta: obj.(*v1beta1.ValidatingAdmissionPolicyBindingList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingAdmissionPolicyBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *FakeValidatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingadmissionpolicybindingsResource, opts)) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *FakeValidatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingadmissionpolicybindingsResource, validatingAdmissionPolicyBinding), &v1beta1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *FakeValidatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingadmissionpolicybindingsResource, name, opts), &v1beta1.ValidatingAdmissionPolicyBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingadmissionpolicybindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingAdmissionPolicyBindingList{}) - return err -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, name, pt, data, subresources...), &v1beta1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *FakeValidatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingadmissionpolicybindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingAdmissionPolicyBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go deleted file mode 100644 index 41e3a7c1e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type FakeValidatingWebhookConfigurations struct { - Fake *FakeAdmissionregistrationV1beta1 -} - -var validatingwebhookconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations") - -var validatingwebhookconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration") - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *FakeValidatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *FakeValidatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.ValidatingWebhookConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.ValidatingWebhookConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *FakeValidatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts)) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *FakeValidatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *FakeValidatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(validatingwebhookconfigurationsResource, name, opts), &v1beta1.ValidatingWebhookConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeValidatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, pt, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *FakeValidatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.ValidatingWebhookConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index ca6bb8bd5..17e3541cc 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. @@ -41,157 +38,38 @@ type MutatingWebhookConfigurationsGetter interface { // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. type MutatingWebhookConfigurationInterface interface { - Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error) - Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error) + Create(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) + Update(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.MutatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) - Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) + Apply(ctx context.Context, mutatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) MutatingWebhookConfigurationExpansion } // mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface type mutatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration] } // newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { return &mutatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.MutatingWebhookConfiguration, *admissionregistrationv1beta1.MutatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration]( + "mutatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1beta1.MutatingWebhookConfiguration { + return &admissionregistrationv1beta1.MutatingWebhookConfiguration{} + }, + func() *admissionregistrationv1beta1.MutatingWebhookConfigurationList { + return &admissionregistrationv1beta1.MutatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.MutatingWebhookConfiguration](), + ), } } - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(mutatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Apply(ctx context.Context, mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - if mutatingWebhookConfiguration == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(mutatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := mutatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("mutatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("mutatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go index bea51b587..2c663ba1e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface. @@ -41,203 +38,42 @@ type ValidatingAdmissionPoliciesGetter interface { // ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources. type ValidatingAdmissionPolicyInterface interface { - Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) - Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) - UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error) + Create(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) + Update(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) - Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) - ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) + Apply(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, validatingAdmissionPolicy *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) ValidatingAdmissionPolicyExpansion } // validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface type validatingAdmissionPolicies struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration] } // newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies { return &validatingAdmissionPolicies{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicy, *admissionregistrationv1beta1.ValidatingAdmissionPolicyList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration]( + "validatingadmissionpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicy { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicy{} + }, + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyList { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicy](), + ), } } - -// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any. -func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors. -func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyList{} - err = c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies. -func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Post(). - Resource("validatingadmissionpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any. -func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Put(). - Resource("validatingadmissionpolicies"). - Name(validatingAdmissionPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy. -func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) { - if validatingAdmissionPolicy == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicy) - if err != nil { - return nil, err - } - - name := validatingAdmissionPolicy.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply") - } - - result = &v1beta1.ValidatingAdmissionPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicies"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index bba37bb04..196cc8f0a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface. @@ -41,157 +38,38 @@ type ValidatingAdmissionPolicyBindingsGetter interface { // ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources. type ValidatingAdmissionPolicyBindingInterface interface { - Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) - Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Create(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) + Update(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) - Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) + Apply(ctx context.Context, validatingAdmissionPolicyBinding *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) ValidatingAdmissionPolicyBindingExpansion } // validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface type validatingAdmissionPolicyBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration] } // newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings { return &validatingAdmissionPolicyBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration]( + "validatingadmissionpolicybindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{} + }, + func() *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList { + return &admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding](), + ), } } - -// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any. -func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors. -func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingAdmissionPolicyBindingList{} - err = c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings. -func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Post(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any. -func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Put(). - Resource("validatingadmissionpolicybindings"). - Name(validatingAdmissionPolicyBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingAdmissionPolicyBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs. -func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingadmissionpolicybindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(pt). - Resource("validatingadmissionpolicybindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding. -func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) { - if validatingAdmissionPolicyBinding == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingAdmissionPolicyBinding) - if err != nil { - return nil, err - } - name := validatingAdmissionPolicyBinding.Name - if name == nil { - return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply") - } - result = &v1beta1.ValidatingAdmissionPolicyBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingadmissionpolicybindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 5ba5974d7..9f28346e8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + applyconfigurationsadmissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. @@ -41,157 +38,38 @@ type ValidatingWebhookConfigurationsGetter interface { // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. type ValidatingWebhookConfigurationInterface interface { - Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + Create(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) + Update(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*admissionregistrationv1beta1.ValidatingWebhookConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) - Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) + Apply(ctx context.Context, validatingWebhookConfiguration *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) ValidatingWebhookConfigurationExpansion } // validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface type validatingWebhookConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration] } // newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { return &validatingWebhookConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*admissionregistrationv1beta1.ValidatingWebhookConfiguration, *admissionregistrationv1beta1.ValidatingWebhookConfigurationList, *applyconfigurationsadmissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration]( + "validatingwebhookconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *admissionregistrationv1beta1.ValidatingWebhookConfiguration { + return &admissionregistrationv1beta1.ValidatingWebhookConfiguration{} + }, + func() *admissionregistrationv1beta1.ValidatingWebhookConfigurationList { + return &admissionregistrationv1beta1.ValidatingWebhookConfigurationList{} + }, + gentype.PrefersProtobuf[*admissionregistrationv1beta1.ValidatingWebhookConfiguration](), + ), } } - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(validatingWebhookConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Apply(ctx context.Context, validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - if validatingWebhookConfiguration == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(validatingWebhookConfiguration) - if err != nil { - return nil, err - } - name := validatingWebhookConfiguration.Name - if name == nil { - return nil, fmt.Errorf("validatingWebhookConfiguration.Name must be provided to Apply") - } - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("validatingwebhookconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go index 1794cb941..b76fadf91 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/apiserverinternal_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *InternalV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := apiserverinternalv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go deleted file mode 100644 index 0960a5e81..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_apiserverinternal_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeInternalV1alpha1 struct { - *testing.Fake -} - -func (c *FakeInternalV1alpha1) StorageVersions() v1alpha1.StorageVersionInterface { - return &FakeStorageVersions{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeInternalV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go deleted file mode 100644 index 738c68038..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake/fake_storageversion.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeStorageVersions implements StorageVersionInterface -type FakeStorageVersions struct { - Fake *FakeInternalV1alpha1 -} - -var storageversionsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversions") - -var storageversionsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersion") - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *FakeStorageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageversionsResource, name), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *FakeStorageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageversionsResource, storageversionsKind, opts), &v1alpha1.StorageVersionList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.StorageVersionList{ListMeta: obj.(*v1alpha1.StorageVersionList).ListMeta} - for _, item := range obj.(*v1alpha1.StorageVersionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *FakeStorageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageversionsResource, opts)) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *FakeStorageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *FakeStorageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageversionsResource, storageVersion), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(storageversionsResource, "status", storageVersion), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *FakeStorageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageversionsResource, name, opts), &v1alpha1.StorageVersion{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageversionsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionList{}) - return err -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *FakeStorageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, name, pt, data, subresources...), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *FakeStorageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStorageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersion{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersion), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go index 18789c7f8..cea897b3d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/storageversion.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - apiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + applyconfigurationsapiserverinternalv1alpha1 "k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionsGetter has a method to return a StorageVersionInterface. @@ -41,203 +38,40 @@ type StorageVersionsGetter interface { // StorageVersionInterface has methods to work with StorageVersion resources. type StorageVersionInterface interface { - Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (*v1alpha1.StorageVersion, error) - Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) - UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (*v1alpha1.StorageVersion, error) + Create(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.CreateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) + Update(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersion, opts v1.UpdateOptions) (*apiserverinternalv1alpha1.StorageVersion, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersion, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*apiserverinternalv1alpha1.StorageVersion, error) + List(ctx context.Context, opts v1.ListOptions) (*apiserverinternalv1alpha1.StorageVersionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) - Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) - ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *apiserverinternalv1alpha1.StorageVersion, err error) + Apply(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, storageVersion *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *apiserverinternalv1alpha1.StorageVersion, err error) StorageVersionExpansion } // storageVersions implements StorageVersionInterface type storageVersions struct { - client rest.Interface + *gentype.ClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration] } // newStorageVersions returns a StorageVersions func newStorageVersions(c *InternalV1alpha1Client) *storageVersions { return &storageVersions{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*apiserverinternalv1alpha1.StorageVersion, *apiserverinternalv1alpha1.StorageVersionList, *applyconfigurationsapiserverinternalv1alpha1.StorageVersionApplyConfiguration]( + "storageversions", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *apiserverinternalv1alpha1.StorageVersion { return &apiserverinternalv1alpha1.StorageVersion{} }, + func() *apiserverinternalv1alpha1.StorageVersionList { + return &apiserverinternalv1alpha1.StorageVersionList{} + }, + gentype.PrefersProtobuf[*apiserverinternalv1alpha1.StorageVersion](), + ), } } - -// Get takes name of the storageVersion, and returns the corresponding storageVersion object, and an error if there is any. -func (c *storageVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Get(). - Resource("storageversions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersions that match those selectors. -func (c *storageVersions) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionList{} - err = c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersions. -func (c *storageVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersion and creates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Create(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.CreateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Post(). - Resource("storageversions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersion and updates it. Returns the server's representation of the storageVersion, and an error, if there is any. -func (c *storageVersions) Update(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersions) UpdateStatus(ctx context.Context, storageVersion *v1alpha1.StorageVersion, opts v1.UpdateOptions) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Put(). - Resource("storageversions"). - Name(storageVersion.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersion). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersion and deletes it. Returns an error if one occurs. -func (c *storageVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersion. -func (c *storageVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersion, err error) { - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(pt). - Resource("storageversions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersion. -func (c *storageVersions) Apply(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersions) ApplyStatus(ctx context.Context, storageVersion *apiserverinternalv1alpha1.StorageVersionApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersion, err error) { - if storageVersion == nil { - return nil, fmt.Errorf("storageVersion provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersion) - if err != nil { - return nil, err - } - - name := storageVersion.Name - if name == nil { - return nil, fmt.Errorf("storageVersion.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersion{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversions"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 397542eeb..cb0bf87ba 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/apps/v1" - "k8s.io/client-go/kubernetes/scheme" + appsv1 "k8s.io/api/apps/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := appsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index f4b198265..8bf810810 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -41,168 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.CreateOptions) (*appsv1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1.ControllerRevision, opts metav1.UpdateOptions) (*appsv1.ControllerRevision, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ControllerRevision, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ControllerRevisionList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1.ControllerRevision, *appsv1.ControllerRevisionList, *applyconfigurationsappsv1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.ControllerRevision { return &appsv1.ControllerRevision{} }, + func() *appsv1.ControllerRevisionList { return &appsv1.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1.ControllerRevision](), + ), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index 53e539287..6354da219 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -41,216 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) - UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) + Create(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.CreateOptions) (*appsv1.DaemonSet, error) + Update(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, daemonSet *appsv1.DaemonSet, opts metav1.UpdateOptions) (*appsv1.DaemonSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.DaemonSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DaemonSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) - ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.DaemonSet { return &appsv1.DaemonSet{} }, + func() *appsv1.DaemonSetList { return &appsv1.DaemonSetList{} }, + gentype.PrefersProtobuf[*appsv1.DaemonSet](), + ), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index ccc2049ff..cc06ccf3a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,20 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -43,17 +42,19 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error) - Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) - UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) + Create(ctx context.Context, deployment *appsv1.Deployment, opts metav1.CreateOptions) (*appsv1.Deployment, error) + Update(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, deployment *appsv1.Deployment, opts metav1.UpdateOptions) (*appsv1.Deployment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.Deployment, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.DeploymentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) - ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.Deployment, err error) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,30 @@ type DeploymentInterface interface { // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.Deployment { return &appsv1.Deployment{} }, + func() *appsv1.DeploymentList { return &appsv1.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1.Deployment](), + ), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -278,8 +100,9 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -297,19 +120,19 @@ func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, sca return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go deleted file mode 100644 index 458df0fa3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/apps/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAppsV1 struct { - *testing.Fake -} - -func (c *FakeAppsV1) ControllerRevisions(namespace string) v1.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} -} - -func (c *FakeAppsV1) DaemonSets(namespace string) v1.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} -} - -func (c *FakeAppsV1) Deployments(namespace string) v1.DeploymentInterface { - return &FakeDeployments{c, namespace} -} - -func (c *FakeAppsV1) ReplicaSets(namespace string) v1.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} -} - -func (c *FakeAppsV1) StatefulSets(namespace string) v1.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAppsV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go deleted file mode 100644 index f691ba9ac..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - testing "k8s.io/client-go/testing" -) - -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { - Fake *FakeAppsV1 - ns string -} - -var controllerrevisionsResource = v1.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1.ControllerRevisionList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ControllerRevisionList{ListMeta: obj.(*v1.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) - -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1.ControllerRevisionApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ControllerRevision), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go deleted file mode 100644 index 3e0df7235..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - testing "k8s.io/client-go/testing" -) - -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { - Fake *FakeAppsV1 - ns string -} - -var daemonsetsResource = v1.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1.DaemonSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.DaemonSetList{ListMeta: obj.(*v1.DaemonSetList).ListMeta} - for _, item := range obj.(*v1.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1.DaemonSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.DaemonSet), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go deleted file mode 100644 index da1896fe6..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" - testing "k8s.io/client-go/testing" -) - -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { - Fake *FakeAppsV1 - ns string -} - -var deploymentsResource = v1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1.DeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.DeploymentList{ListMeta: obj.(*v1.DeploymentList).ListMeta} - for _, item := range obj.(*v1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Deployment), err -} - -// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go deleted file mode 100644 index dedf19b42..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" - testing "k8s.io/client-go/testing" -) - -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { - Fake *FakeAppsV1 - ns string -} - -var replicasetsResource = v1.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1.ReplicaSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ReplicaSetList{ListMeta: obj.(*v1.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicaSet), err -} - -// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go deleted file mode 100644 index f1d7d96e8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" - applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" - testing "k8s.io/client-go/testing" -) - -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { - Fake *FakeAppsV1 - ns string -} - -var statefulsetsResource = v1.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1.StatefulSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.StatefulSetList{ListMeta: obj.(*v1.StatefulSetList).ListMeta} - for _, item := range obj.(*v1.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.StatefulSet), err -} - -// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index 917ed521f..db0fed952 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,20 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -43,17 +42,19 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) - UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.CreateOptions) (*appsv1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, replicaSet *appsv1.ReplicaSet, opts metav1.UpdateOptions) (*appsv1.ReplicaSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.ReplicaSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.ReplicaSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) - ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.ReplicaSet, err error) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,30 @@ type ReplicaSetInterface interface { // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1.ReplicaSet, *appsv1.ReplicaSetList, *applyconfigurationsappsv1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.ReplicaSet { return &appsv1.ReplicaSet{} }, + func() *appsv1.ReplicaSetList { return &appsv1.ReplicaSetList{} }, + gentype.PrefersProtobuf[*appsv1.ReplicaSet](), + ), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -278,8 +100,9 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -297,19 +120,19 @@ func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, sca return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index d1fbb915d..e52cc6159 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,20 +19,19 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -43,17 +42,19 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) - UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.CreateOptions) (*appsv1.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet, opts metav1.UpdateOptions) (*appsv1.StatefulSet, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*appsv1.StatefulSet, error) + List(ctx context.Context, opts metav1.ListOptions) (*appsv1.StatefulSetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) - ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *appsv1.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *appsv1.StatefulSet, err error) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) @@ -63,209 +64,30 @@ type StatefulSetInterface interface { // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1.StatefulSet { return &appsv1.StatefulSet{} }, + func() *appsv1.StatefulSetList { return &appsv1.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1.StatefulSet](), + ), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -278,8 +100,9 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -297,19 +120,19 @@ func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, s return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } result = &autoscalingv1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 6b7148c5a..72bde633b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + appsv1beta1 "k8s.io/api/apps/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *AppsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := appsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 0c3f49ba1..1bd92695b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -41,168 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.CreateOptions) (*appsv1beta1.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta1.ControllerRevision, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta1.ControllerRevision, *appsv1beta1.ControllerRevisionList, *applyconfigurationsappsv1beta1.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta1.ControllerRevision { return &appsv1beta1.ControllerRevision{} }, + func() *appsv1beta1.ControllerRevisionList { return &appsv1beta1.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1beta1.ControllerRevision](), + ), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 281758c43..e01dd5a2f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -41,216 +38,38 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) - Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) - UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.CreateOptions) (*appsv1beta1.Deployment, error) + Update(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, deployment *appsv1beta1.Deployment, opts v1.UpdateOptions) (*appsv1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) - ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta1.Deployment, *appsv1beta1.DeploymentList, *applyconfigurationsappsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta1.Deployment { return &appsv1beta1.Deployment{} }, + func() *appsv1beta1.DeploymentList { return &appsv1beta1.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1beta1.Deployment](), + ), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go deleted file mode 100644 index 8e65d78d2..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAppsV1beta1 struct { - *testing.Fake -} - -func (c *FakeAppsV1beta1) ControllerRevisions(namespace string) v1beta1.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} -} - -func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { - return &FakeDeployments{c, namespace} -} - -func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAppsV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go deleted file mode 100644 index 1954c9470..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { - Fake *FakeAppsV1beta1 - ns string -} - -var controllerrevisionsResource = v1beta1.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1beta1.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ControllerRevisionList{ListMeta: obj.(*v1beta1.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1beta1.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) - -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta1.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta1.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ControllerRevision), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go deleted file mode 100644 index 9614852f7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { - Fake *FakeAppsV1beta1 - ns string -} - -var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go deleted file mode 100644 index 2124515cf..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { - Fake *FakeAppsV1beta1 - ns string -} - -var statefulsetsResource = v1beta1.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1beta1.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.StatefulSetList{ListMeta: obj.(*v1beta1.StatefulSetList).ListMeta} - for _, item := range obj.(*v1beta1.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta1.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StatefulSet), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index 3f1aebcff..b88acdeb0 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + applyconfigurationsappsv1beta1 "k8s.io/client-go/applyconfigurations/apps/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -41,216 +38,38 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) - UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.CreateOptions) (*appsv1beta1.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSet, opts v1.UpdateOptions) (*appsv1beta1.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta1.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta1.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) - ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta1.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta1.StatefulSet, err error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta1.StatefulSet, *appsv1beta1.StatefulSetList, *applyconfigurationsappsv1beta1.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta1.StatefulSet { return &appsv1beta1.StatefulSet{} }, + func() *appsv1beta1.StatefulSetList { return &appsv1beta1.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1beta1.StatefulSet](), + ), } } - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta1.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta1.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 968abc56f..e13d12a76 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - "net/http" + http "net/http" - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/client-go/kubernetes/scheme" + appsv1beta2 "k8s.io/api/apps/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *AppsV1beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta2.SchemeGroupVersion + gv := appsv1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1643277a..a170805b3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. @@ -41,168 +38,34 @@ type ControllerRevisionsGetter interface { // ControllerRevisionInterface has methods to work with ControllerRevision resources. type ControllerRevisionInterface interface { - Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error) - Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error) + Create(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.CreateOptions) (*appsv1beta2.ControllerRevision, error) + Update(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevision, opts v1.UpdateOptions) (*appsv1beta2.ControllerRevision, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ControllerRevision, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ControllerRevisionList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) - Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ControllerRevision, err error) + Apply(ctx context.Context, controllerRevision *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ControllerRevision, err error) ControllerRevisionExpansion } // controllerRevisions implements ControllerRevisionInterface type controllerRevisions struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration] } // newControllerRevisions returns a ControllerRevisions func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta2.ControllerRevision, *appsv1beta2.ControllerRevisionList, *applyconfigurationsappsv1beta2.ControllerRevisionApplyConfiguration]( + "controllerrevisions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta2.ControllerRevision { return &appsv1beta2.ControllerRevision{} }, + func() *appsv1beta2.ControllerRevisionList { return &appsv1beta2.ControllerRevisionList{} }, + gentype.PrefersProtobuf[*appsv1beta2.ControllerRevision](), + ), } } - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(controllerRevision). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *controllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index 1391df87d..f078121b0 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -41,216 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) - UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) + Create(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.CreateOptions) (*appsv1beta2.DaemonSet, error) + Update(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSet, opts v1.UpdateOptions) (*appsv1beta2.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) - ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta2.DaemonSet, *appsv1beta2.DaemonSetList, *applyconfigurationsappsv1beta2.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta2.DaemonSet { return &appsv1beta2.DaemonSet{} }, + func() *appsv1beta2.DaemonSetList { return &appsv1beta2.DaemonSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.DaemonSet](), + ), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta2.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 5bda0d92c..1be57edb2 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -41,216 +38,38 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error) - Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) - UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) + Create(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.CreateOptions) (*appsv1beta2.Deployment, error) + Update(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, deployment *appsv1beta2.Deployment, opts v1.UpdateOptions) (*appsv1beta2.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) - Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) - ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, deployment *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Deployment, err error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta2.Deployment, *appsv1beta2.DeploymentList, *applyconfigurationsappsv1beta2.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta2.Deployment { return &appsv1beta2.Deployment{} }, + func() *appsv1beta2.DeploymentList { return &appsv1beta2.DeploymentList{} }, + gentype.PrefersProtobuf[*appsv1beta2.Deployment](), + ), } } - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta2.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go deleted file mode 100644 index 0ec34a2cd..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAppsV1beta2 struct { - *testing.Fake -} - -func (c *FakeAppsV1beta2) ControllerRevisions(namespace string) v1beta2.ControllerRevisionInterface { - return &FakeControllerRevisions{c, namespace} -} - -func (c *FakeAppsV1beta2) DaemonSets(namespace string) v1beta2.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} -} - -func (c *FakeAppsV1beta2) Deployments(namespace string) v1beta2.DeploymentInterface { - return &FakeDeployments{c, namespace} -} - -func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} -} - -func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface { - return &FakeStatefulSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAppsV1beta2) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go deleted file mode 100644 index 1bf7fb331..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeControllerRevisions implements ControllerRevisionInterface -type FakeControllerRevisions struct { - Fake *FakeAppsV1beta2 - ns string -} - -var controllerrevisionsResource = v1beta2.SchemeGroupVersion.WithResource("controllerrevisions") - -var controllerrevisionsKind = v1beta2.SchemeGroupVersion.WithKind("ControllerRevision") - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *FakeControllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *FakeControllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.ControllerRevisionList{ListMeta: obj.(*v1beta2.ControllerRevisionList).ListMeta} - for _, item := range obj.(*v1beta2.ControllerRevisionList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *FakeControllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts)) - -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *FakeControllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *FakeControllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(controllerrevisionsResource, c.ns, name, opts), &v1beta2.ControllerRevision{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeControllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{}) - return err -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *FakeControllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, pt, data, subresources...), &v1beta2.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ControllerRevision), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied controllerRevision. -func (c *FakeControllerRevisions) Apply(ctx context.Context, controllerRevision *appsv1beta2.ControllerRevisionApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ControllerRevision, err error) { - if controllerRevision == nil { - return nil, fmt.Errorf("controllerRevision provided to Apply must not be nil") - } - data, err := json.Marshal(controllerRevision) - if err != nil { - return nil, err - } - name := controllerRevision.Name - if name == nil { - return nil, fmt.Errorf("controllerRevision.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ControllerRevision{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ControllerRevision), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go deleted file mode 100644 index 8f5cfa5a8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { - Fake *FakeAppsV1beta2 - ns string -} - -var daemonsetsResource = v1beta2.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1beta2.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.DaemonSetList{ListMeta: obj.(*v1beta2.DaemonSetList).ListMeta} - for _, item := range obj.(*v1beta2.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta2.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *appsv1beta2.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.DaemonSet), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go deleted file mode 100644 index c9e8ab48b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { - Fake *FakeAppsV1beta2 - ns string -} - -var deploymentsResource = v1beta2.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta2.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.DeploymentList{ListMeta: obj.(*v1beta2.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta2.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta2.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *appsv1beta2.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Deployment), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go deleted file mode 100644 index 46e1a78a7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { - Fake *FakeAppsV1beta2 - ns string -} - -var replicasetsResource = v1beta2.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1beta2.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.ReplicaSetList{ListMeta: obj.(*v1beta2.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1beta2.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta2.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.ReplicaSet), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go deleted file mode 100644 index 684f79925..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeStatefulSets implements StatefulSetInterface -type FakeStatefulSets struct { - Fake *FakeAppsV1beta2 - ns string -} - -var statefulsetsResource = v1beta2.SchemeGroupVersion.WithResource("statefulsets") - -var statefulsetsKind = v1beta2.SchemeGroupVersion.WithKind("StatefulSet") - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *FakeStatefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *FakeStatefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.StatefulSetList{ListMeta: obj.(*v1beta2.StatefulSetList).ListMeta} - for _, item := range obj.(*v1beta2.StatefulSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *FakeStatefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *FakeStatefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStatefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *FakeStatefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(statefulsetsResource, c.ns, name, opts), &v1beta2.StatefulSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStatefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{}) - return err -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *FakeStatefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, pt, data, subresources...), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *FakeStatefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStatefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta2.StatefulSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.StatefulSet), err -} - -// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeStatefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeStatefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeStatefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, statefulSetName, types.ApplyPatchType, data, "status"), &v1beta2.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta2.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 988d898f7..12bac0923 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -41,216 +38,38 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) - UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error) + Create(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.CreateOptions) (*appsv1beta2.ReplicaSet, error) + Update(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSet, opts v1.UpdateOptions) (*appsv1beta2.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) - ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.ReplicaSet, err error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta2.ReplicaSet, *appsv1beta2.ReplicaSetList, *applyconfigurationsappsv1beta2.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta2.ReplicaSet { return &appsv1beta2.ReplicaSet{} }, + func() *appsv1beta2.ReplicaSetList { return &appsv1beta2.ReplicaSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.ReplicaSet](), + ), } } - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *appsv1beta2.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index 0416675d6..c71e93494 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,18 +19,17 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1beta2 "k8s.io/api/apps/v1beta2" + appsv1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - appsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + applyconfigurationsappsv1beta2 "k8s.io/client-go/applyconfigurations/apps/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // StatefulSetsGetter has a method to return a StatefulSetInterface. @@ -41,229 +40,52 @@ type StatefulSetsGetter interface { // StatefulSetInterface has methods to work with StatefulSet resources. type StatefulSetInterface interface { - Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error) - Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) - UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error) + Create(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.CreateOptions) (*appsv1beta2.StatefulSet, error) + Update(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSet, opts v1.UpdateOptions) (*appsv1beta2.StatefulSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*appsv1beta2.StatefulSet, error) + List(ctx context.Context, opts v1.ListOptions) (*appsv1beta2.StatefulSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) - Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) - ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) - GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) - UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error) - ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta2.Scale, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *appsv1beta2.StatefulSet, err error) + Apply(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, statefulSet *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.StatefulSet, err error) + GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*appsv1beta2.Scale, error) + UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (*appsv1beta2.Scale, error) + ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*appsv1beta2.Scale, error) StatefulSetExpansion } // statefulSets implements StatefulSetInterface type statefulSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration] } // newStatefulSets returns a StatefulSets func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { return &statefulSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*appsv1beta2.StatefulSet, *appsv1beta2.StatefulSetList, *applyconfigurationsappsv1beta2.StatefulSetApplyConfiguration]( + "statefulsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *appsv1beta2.StatefulSet { return &appsv1beta2.StatefulSet{} }, + func() *appsv1beta2.StatefulSetList { return &appsv1beta2.StatefulSetList{} }, + gentype.PrefersProtobuf[*appsv1beta2.StatefulSet](), + ), } } -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(statefulSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied statefulSet. -func (c *statefulSets) Apply(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *statefulSets) ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error) { - if statefulSet == nil { - return nil, fmt.Errorf("statefulSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(statefulSet) - if err != nil { - return nil, err - } - - name := statefulSet.Name - if name == nil { - return nil, fmt.Errorf("statefulSet.Name must be provided to Apply") - } - - result = &v1beta2.StatefulSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("statefulsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} - err = c.client.Get(). - Namespace(c.ns). +// GetScale takes name of the statefulSet, and returns the corresponding appsv1beta2.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *appsv1beta2.Scale, err error) { + result = &appsv1beta2.Scale{} + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -274,10 +96,11 @@ func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, opt } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} - err = c.client.Put(). - Namespace(c.ns). +func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.Scale, opts v1.UpdateOptions) (result *appsv1beta2.Scale, err error) { + result = &appsv1beta2.Scale{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). @@ -290,24 +113,24 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) { +func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsappsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *appsv1beta2.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta2.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + result = &appsv1beta2.Scale{} + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("statefulsets"). Name(statefulSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go index 81be8b2e0..bd5df7798 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/authentication/v1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1 "k8s.io/api/authentication/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := authenticationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go deleted file mode 100644 index 865239ff6..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/authentication/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAuthenticationV1 struct { - *testing.Fake -} - -func (c *FakeAuthenticationV1) SelfSubjectReviews() v1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} -} - -func (c *FakeAuthenticationV1) TokenReviews() v1.TokenReviewInterface { - return &FakeTokenReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthenticationV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go deleted file mode 100644 index e683b3eaa..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_selfsubjectreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { - Fake *FakeAuthenticationV1 -} - -var selfsubjectreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1.SelfSubjectReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1.SelfSubjectReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go deleted file mode 100644 index 500e87d06..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeTokenReviews implements TokenReviewInterface -type FakeTokenReviews struct { - Fake *FakeAuthenticationV1 -} - -var tokenreviewsResource = v1.SchemeGroupVersion.WithResource("tokenreviews") - -var tokenreviewsKind = v1.SchemeGroupVersion.WithKind("TokenReview") - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1.TokenReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1.TokenReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go index bfb9603d6..9113b6a6d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authentication/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -35,30 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1.SelfSubjectReview, opts metav1.CreateOptions) (*authenticationv1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*authenticationv1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*authenticationv1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authenticationv1.SelfSubjectReview { return &authenticationv1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1.SelfSubjectReview](), + ), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) { - result = &v1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go index ca7cd47d2..ce8b62d1b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authentication/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -35,30 +35,25 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error) + Create(ctx context.Context, tokenReview *authenticationv1.TokenReview, opts metav1.CreateOptions) (*authenticationv1.TokenReview, error) TokenReviewExpansion } // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*authenticationv1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*authenticationv1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authenticationv1.TokenReview { return &authenticationv1.TokenReview{} }, + gentype.PrefersProtobuf[*authenticationv1.TokenReview](), + ), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) { - result = &v1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go index 187392661..821265859 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/authentication/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AuthenticationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := authenticationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go deleted file mode 100644 index 1a1a04f41..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_authentication_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/authentication/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAuthenticationV1alpha1 struct { - *testing.Fake -} - -func (c *FakeAuthenticationV1alpha1) SelfSubjectReviews() v1alpha1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthenticationV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go deleted file mode 100644 index a20b3dd76..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake/fake_selfsubjectreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "k8s.io/api/authentication/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { - Fake *FakeAuthenticationV1alpha1 -} - -var selfsubjectreviewsResource = v1alpha1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1alpha1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1alpha1.SelfSubjectReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SelfSubjectReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go index 7f8b12a46..8d5b176f7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/selfsubjectreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" - v1alpha1 "k8s.io/api/authentication/v1alpha1" + authenticationv1alpha1 "k8s.io/api/authentication/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -35,30 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*v1alpha1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1alpha1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1alpha1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*authenticationv1alpha1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1alpha1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*authenticationv1alpha1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authenticationv1alpha1.SelfSubjectReview { return &authenticationv1alpha1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1alpha1.SelfSubjectReview](), + ), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1alpha1.SelfSubjectReview, opts v1.CreateOptions) (result *v1alpha1.SelfSubjectReview, err error) { - result = &v1alpha1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go index 7823729e0..7b22e46e3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/authentication/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *AuthenticationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := authenticationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go deleted file mode 100644 index 1d72cf22f..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAuthenticationV1beta1 struct { - *testing.Fake -} - -func (c *FakeAuthenticationV1beta1) SelfSubjectReviews() v1beta1.SelfSubjectReviewInterface { - return &FakeSelfSubjectReviews{c} -} - -func (c *FakeAuthenticationV1beta1) TokenReviews() v1beta1.TokenReviewInterface { - return &FakeTokenReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthenticationV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go deleted file mode 100644 index 4a9db85cf..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_selfsubjectreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authentication/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectReviews implements SelfSubjectReviewInterface -type FakeSelfSubjectReviews struct { - Fake *FakeAuthenticationV1beta1 -} - -var selfsubjectreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectreviews") - -var selfsubjectreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectReview") - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *FakeSelfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectreviewsResource, selfSubjectReview), &v1beta1.SelfSubjectReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.SelfSubjectReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go deleted file mode 100644 index b1988a67a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authentication/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeTokenReviews implements TokenReviewInterface -type FakeTokenReviews struct { - Fake *FakeAuthenticationV1beta1 -} - -var tokenreviewsResource = v1beta1.SchemeGroupVersion.WithResource("tokenreviews") - -var tokenreviewsKind = v1beta1.SchemeGroupVersion.WithKind("TokenReview") - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *FakeTokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(tokenreviewsResource, tokenReview), &v1beta1.TokenReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.TokenReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go index 9d54826a3..e29f81451 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/selfsubjectreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authentication/v1beta1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface. @@ -35,30 +35,25 @@ type SelfSubjectReviewsGetter interface { // SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources. type SelfSubjectReviewInterface interface { - Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectReview, error) + Create(ctx context.Context, selfSubjectReview *authenticationv1beta1.SelfSubjectReview, opts v1.CreateOptions) (*authenticationv1beta1.SelfSubjectReview, error) SelfSubjectReviewExpansion } // selfSubjectReviews implements SelfSubjectReviewInterface type selfSubjectReviews struct { - client rest.Interface + *gentype.Client[*authenticationv1beta1.SelfSubjectReview] } // newSelfSubjectReviews returns a SelfSubjectReviews func newSelfSubjectReviews(c *AuthenticationV1beta1Client) *selfSubjectReviews { return &selfSubjectReviews{ - client: c.RESTClient(), + gentype.NewClient[*authenticationv1beta1.SelfSubjectReview]( + "selfsubjectreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authenticationv1beta1.SelfSubjectReview { return &authenticationv1beta1.SelfSubjectReview{} }, + gentype.PrefersProtobuf[*authenticationv1beta1.SelfSubjectReview](), + ), } } - -// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any. -func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1beta1.SelfSubjectReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectReview, err error) { - result = &v1beta1.SelfSubjectReview{} - err = c.client.Post(). - Resource("selfsubjectreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go index 5da122433..5e1e002be 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authentication/v1beta1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // TokenReviewsGetter has a method to return a TokenReviewInterface. @@ -35,30 +35,25 @@ type TokenReviewsGetter interface { // TokenReviewInterface has methods to work with TokenReview resources. type TokenReviewInterface interface { - Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error) + Create(ctx context.Context, tokenReview *authenticationv1beta1.TokenReview, opts v1.CreateOptions) (*authenticationv1beta1.TokenReview, error) TokenReviewExpansion } // tokenReviews implements TokenReviewInterface type tokenReviews struct { - client rest.Interface + *gentype.Client[*authenticationv1beta1.TokenReview] } // newTokenReviews returns a TokenReviews func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { return &tokenReviews{ - client: c.RESTClient(), + gentype.NewClient[*authenticationv1beta1.TokenReview]( + "tokenreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authenticationv1beta1.TokenReview { return &authenticationv1beta1.TokenReview{} }, + gentype.PrefersProtobuf[*authenticationv1beta1.TokenReview](), + ), } } - -// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, and an error, if there is any. -func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) { - result = &v1beta1.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(tokenReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go index edfc90346..71fb89b38 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/authorization/v1" - "k8s.io/client-go/kubernetes/scheme" + authorizationv1 "k8s.io/api/authorization/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := authorizationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go deleted file mode 100644 index f7e823450..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/authorization/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAuthorizationV1 struct { - *testing.Fake -} - -func (c *FakeAuthorizationV1) LocalSubjectAccessReviews(namespace string) v1.LocalSubjectAccessReviewInterface { - return &FakeLocalSubjectAccessReviews{c, namespace} -} - -func (c *FakeAuthorizationV1) SelfSubjectAccessReviews() v1.SelfSubjectAccessReviewInterface { - return &FakeSelfSubjectAccessReviews{c} -} - -func (c *FakeAuthorizationV1) SelfSubjectRulesReviews() v1.SelfSubjectRulesReviewInterface { - return &FakeSelfSubjectRulesReviews{c} -} - -func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface { - return &FakeSubjectAccessReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthorizationV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go deleted file mode 100644 index 43ea05328..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type FakeLocalSubjectAccessReviews struct { - Fake *FakeAuthorizationV1 - ns string -} - -var localsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") - -var localsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1.LocalSubjectAccessReview{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LocalSubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go deleted file mode 100644 index 27642266d..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type FakeSelfSubjectAccessReviews struct { - Fake *FakeAuthorizationV1 -} - -var selfsubjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") - -var selfsubjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1.SelfSubjectAccessReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1.SelfSubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go deleted file mode 100644 index cd6c682d1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type FakeSelfSubjectRulesReviews struct { - Fake *FakeAuthorizationV1 -} - -var selfsubjectrulesreviewsResource = v1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") - -var selfsubjectrulesreviewsKind = v1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1.SelfSubjectRulesReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1.SelfSubjectRulesReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go deleted file mode 100644 index 09dab6480..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/api/authorization/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSubjectAccessReviews implements SubjectAccessReviewInterface -type FakeSubjectAccessReviews struct { - Fake *FakeAuthorizationV1 -} - -var subjectaccessreviewsResource = v1.SchemeGroupVersion.WithResource("subjectaccessreviews") - -var subjectaccessreviewsKind = v1.SchemeGroupVersion.WithKind("SubjectAccessReview") - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1.SubjectAccessReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1.SubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go index 84b2efe16..24327e87f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -35,33 +35,25 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *authorizationv1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*authorizationv1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*authorizationv1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1.LocalSubjectAccessReview { return &authorizationv1.LocalSubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.LocalSubjectAccessReview](), + ), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) { - result = &v1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go index 2006196c1..014faeffb 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -35,30 +35,25 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *authorizationv1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.SelfSubjectAccessReview { return &authorizationv1.SelfSubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SelfSubjectAccessReview](), + ), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) { - result = &v1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go index 25d99f7b5..a14b2d7d5 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -35,30 +35,25 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *authorizationv1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*authorizationv1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.SelfSubjectRulesReview { return &authorizationv1.SelfSubjectRulesReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SelfSubjectRulesReview](), + ), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) { - result = &v1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go index 8ac0566a2..bdc9955ad 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1 import ( - "context" + context "context" - v1 "k8s.io/api/authorization/v1" + authorizationv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -35,30 +35,25 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, error) SubjectAccessReviewExpansion } // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1.SubjectAccessReview { return &authorizationv1.SubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1.SubjectAccessReview](), + ), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) { - result = &v1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go index 23b0edf27..f33619eb3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/authorization/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *AuthorizationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := authorizationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go deleted file mode 100644 index 8e328a57b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAuthorizationV1beta1 struct { - *testing.Fake -} - -func (c *FakeAuthorizationV1beta1) LocalSubjectAccessReviews(namespace string) v1beta1.LocalSubjectAccessReviewInterface { - return &FakeLocalSubjectAccessReviews{c, namespace} -} - -func (c *FakeAuthorizationV1beta1) SelfSubjectAccessReviews() v1beta1.SelfSubjectAccessReviewInterface { - return &FakeSelfSubjectAccessReviews{c} -} - -func (c *FakeAuthorizationV1beta1) SelfSubjectRulesReviews() v1beta1.SelfSubjectRulesReviewInterface { - return &FakeSelfSubjectRulesReviews{c} -} - -func (c *FakeAuthorizationV1beta1) SubjectAccessReviews() v1beta1.SubjectAccessReviewInterface { - return &FakeSubjectAccessReviews{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAuthorizationV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go deleted file mode 100644 index 104e979d1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type FakeLocalSubjectAccessReviews struct { - Fake *FakeAuthorizationV1beta1 - ns string -} - -var localsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("localsubjectaccessreviews") - -var localsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("LocalSubjectAccessReview") - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *FakeLocalSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(localsubjectaccessreviewsResource, c.ns, localSubjectAccessReview), &v1beta1.LocalSubjectAccessReview{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.LocalSubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go deleted file mode 100644 index 517e48b76..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type FakeSelfSubjectAccessReviews struct { - Fake *FakeAuthorizationV1beta1 -} - -var selfsubjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectaccessreviews") - -var selfsubjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectAccessReview") - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *FakeSelfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectaccessreviewsResource, selfSubjectAccessReview), &v1beta1.SelfSubjectAccessReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.SelfSubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go deleted file mode 100644 index 3aed050fc..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type FakeSelfSubjectRulesReviews struct { - Fake *FakeAuthorizationV1beta1 -} - -var selfsubjectrulesreviewsResource = v1beta1.SchemeGroupVersion.WithResource("selfsubjectrulesreviews") - -var selfsubjectrulesreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SelfSubjectRulesReview") - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *FakeSelfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(selfsubjectrulesreviewsResource, selfSubjectRulesReview), &v1beta1.SelfSubjectRulesReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.SelfSubjectRulesReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go deleted file mode 100644 index e9bfa521a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1beta1 "k8s.io/api/authorization/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSubjectAccessReviews implements SubjectAccessReviewInterface -type FakeSubjectAccessReviews struct { - Fake *FakeAuthorizationV1beta1 -} - -var subjectaccessreviewsResource = v1beta1.SchemeGroupVersion.WithResource("subjectaccessreviews") - -var subjectaccessreviewsKind = v1beta1.SchemeGroupVersion.WithKind("SubjectAccessReview") - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *FakeSubjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(subjectaccessreviewsResource, subjectAccessReview), &v1beta1.SubjectAccessReview{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.SubjectAccessReview), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go index 78584ba94..8dcc984f7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. @@ -35,33 +35,27 @@ type LocalSubjectAccessReviewsGetter interface { // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. type LocalSubjectAccessReviewInterface interface { - Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error) + Create(ctx context.Context, localSubjectAccessReview *authorizationv1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.LocalSubjectAccessReview, error) LocalSubjectAccessReviewExpansion } // localSubjectAccessReviews implements LocalSubjectAccessReviewInterface type localSubjectAccessReviews struct { - client rest.Interface - ns string + *gentype.Client[*authorizationv1beta1.LocalSubjectAccessReview] } // newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*authorizationv1beta1.LocalSubjectAccessReview]( + "localsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *authorizationv1beta1.LocalSubjectAccessReview { + return &authorizationv1beta1.LocalSubjectAccessReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.LocalSubjectAccessReview](), + ), } } - -// Create takes the representation of a localSubjectAccessReview and creates it. Returns the server's representation of the localSubjectAccessReview, and an error, if there is any. -func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) { - result = &v1beta1.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(localSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go index 0286c93fe..b1f111f3f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. @@ -35,30 +35,27 @@ type SelfSubjectAccessReviewsGetter interface { // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. type SelfSubjectAccessReviewInterface interface { - Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error) + Create(ctx context.Context, selfSubjectAccessReview *authorizationv1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectAccessReview, error) SelfSubjectAccessReviewExpansion } // selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface type selfSubjectAccessReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1beta1.SelfSubjectAccessReview] } // newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { return &selfSubjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1beta1.SelfSubjectAccessReview]( + "selfsubjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1beta1.SelfSubjectAccessReview { + return &authorizationv1beta1.SelfSubjectAccessReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectAccessReview](), + ), } } - -// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any. -func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) { - result = &v1beta1.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go index d772973ec..11a11b8e6 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. @@ -35,30 +35,27 @@ type SelfSubjectRulesReviewsGetter interface { // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. type SelfSubjectRulesReviewInterface interface { - Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error) + Create(ctx context.Context, selfSubjectRulesReview *authorizationv1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*authorizationv1beta1.SelfSubjectRulesReview, error) SelfSubjectRulesReviewExpansion } // selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface type selfSubjectRulesReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1beta1.SelfSubjectRulesReview] } // newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { return &selfSubjectRulesReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1beta1.SelfSubjectRulesReview]( + "selfsubjectrulesreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1beta1.SelfSubjectRulesReview { + return &authorizationv1beta1.SelfSubjectRulesReview{} + }, + gentype.PrefersProtobuf[*authorizationv1beta1.SelfSubjectRulesReview](), + ), } } - -// Create takes the representation of a selfSubjectRulesReview and creates it. Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any. -func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) { - result = &v1beta1.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(selfSubjectRulesReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go index aebe8398c..b62537521 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -19,12 +19,12 @@ limitations under the License. package v1beta1 import ( - "context" + context "context" - v1beta1 "k8s.io/api/authorization/v1beta1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. @@ -35,30 +35,25 @@ type SubjectAccessReviewsGetter interface { // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. type SubjectAccessReviewInterface interface { - Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error) + Create(ctx context.Context, subjectAccessReview *authorizationv1beta1.SubjectAccessReview, opts v1.CreateOptions) (*authorizationv1beta1.SubjectAccessReview, error) SubjectAccessReviewExpansion } // subjectAccessReviews implements SubjectAccessReviewInterface type subjectAccessReviews struct { - client rest.Interface + *gentype.Client[*authorizationv1beta1.SubjectAccessReview] } // newSubjectAccessReviews returns a SubjectAccessReviews func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { return &subjectAccessReviews{ - client: c.RESTClient(), + gentype.NewClient[*authorizationv1beta1.SubjectAccessReview]( + "subjectaccessreviews", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *authorizationv1beta1.SubjectAccessReview { return &authorizationv1beta1.SubjectAccessReview{} }, + gentype.PrefersProtobuf[*authorizationv1beta1.SubjectAccessReview](), + ), } } - -// Create takes the representation of a subjectAccessReview and creates it. Returns the server's representation of the subjectAccessReview, and an error, if there is any. -func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) { - result = &v1beta1.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(subjectAccessReview). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go index f3a2752cb..6ceaaf82a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/autoscaling/v1" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv1 "k8s.io/api/autoscaling/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := autoscalingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go deleted file mode 100644 index 99e26fcf3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAutoscalingV1 struct { - *testing.Fake -} - -func (c *FakeAutoscalingV1) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAutoscalingV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go deleted file mode 100644 index a2c95b753..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" - testing "k8s.io/client-go/testing" -) - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscalingV1 - ns string -} - -var horizontalpodautoscalersResource = v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v1.HorizontalPodAutoscalerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.HorizontalPodAutoscalerList{ListMeta: obj.(*v1.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v1.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v1.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.HorizontalPodAutoscaler), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 19afde66d..c5c69b7c6 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/autoscaling/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -41,216 +38,38 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*autoscalingv1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts metav1.ListOptions) (*autoscalingv1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*autoscalingv1.HorizontalPodAutoscaler, *autoscalingv1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *autoscalingv1.HorizontalPodAutoscaler { return &autoscalingv1.HorizontalPodAutoscaler{} }, + func() *autoscalingv1.HorizontalPodAutoscalerList { return &autoscalingv1.HorizontalPodAutoscalerList{} }, + gentype.PrefersProtobuf[*autoscalingv1.HorizontalPodAutoscaler](), + ), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscalerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go index 04d5d0f94..78a2609bf 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2 import ( - "net/http" + http "net/http" - v2 "k8s.io/api/autoscaling/v2" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2 "k8s.io/api/autoscaling/v2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2.SchemeGroupVersion + gv := autoscalingv2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go deleted file mode 100644 index d4b907f4b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_autoscaling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAutoscalingV2 struct { - *testing.Fake -} - -func (c *FakeAutoscalingV2) HorizontalPodAutoscalers(namespace string) v2.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAutoscalingV2) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go deleted file mode 100644 index cfcc20823..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake/fake_horizontalpodautoscaler.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" - testing "k8s.io/client-go/testing" -) - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscalingV2 - ns string -} - -var horizontalpodautoscalersResource = v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2.HorizontalPodAutoscalerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v2.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2.HorizontalPodAutoscaler), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go index 3a077d71d..9eb4a6d93 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2/horizontalpodautoscaler.go @@ -19,18 +19,15 @@ limitations under the License. package v2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v2 "k8s.io/api/autoscaling/v2" + autoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + applyconfigurationsautoscalingv2 "k8s.io/client-go/applyconfigurations/autoscaling/v2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -41,216 +38,38 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*autoscalingv2.HorizontalPodAutoscaler, *autoscalingv2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *autoscalingv2.HorizontalPodAutoscaler { return &autoscalingv2.HorizontalPodAutoscaler{} }, + func() *autoscalingv2.HorizontalPodAutoscalerList { return &autoscalingv2.HorizontalPodAutoscalerList{} }, + gentype.PrefersProtobuf[*autoscalingv2.HorizontalPodAutoscaler](), + ), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.HorizontalPodAutoscaler, err error) { - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go index d1dde5ed1..1fcda17c8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta1 import ( - "net/http" + http "net/http" - v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2beta1.SchemeGroupVersion + gv := autoscalingv2beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go deleted file mode 100644 index be8e0f48e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAutoscalingV2beta1 struct { - *testing.Fake -} - -func (c *FakeAutoscalingV2beta1) HorizontalPodAutoscalers(namespace string) v2beta1.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAutoscalingV2beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go deleted file mode 100644 index 0b2658e64..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v2beta1 "k8s.io/api/autoscaling/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" - testing "k8s.io/client-go/testing" -) - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscalingV2beta1 - ns string -} - -var horizontalpodautoscalersResource = v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2beta1.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta1.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta1.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2beta1.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta1.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta1.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta1.HorizontalPodAutoscaler), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 5080912a1..c1dc75ccc 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,18 +19,15 @@ limitations under the License. package v2beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + applyconfigurationsautoscalingv2beta1 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -41,216 +38,42 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta1.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta1.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*autoscalingv2beta1.HorizontalPodAutoscaler, *autoscalingv2beta1.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *autoscalingv2beta1.HorizontalPodAutoscaler { + return &autoscalingv2beta1.HorizontalPodAutoscaler{} + }, + func() *autoscalingv2beta1.HorizontalPodAutoscalerList { + return &autoscalingv2beta1.HorizontalPodAutoscalerList{} + }, + gentype.PrefersProtobuf[*autoscalingv2beta1.HorizontalPodAutoscaler](), + ), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go index cae1b4e43..62f5b743c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta2 import ( - "net/http" + http "net/http" - v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/client-go/kubernetes/scheme" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *AutoscalingV2beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2beta2.SchemeGroupVersion + gv := autoscalingv2beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go deleted file mode 100644 index 8c36e0e81..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeAutoscalingV2beta2 struct { - *testing.Fake -} - -func (c *FakeAutoscalingV2beta2) HorizontalPodAutoscalers(namespace string) v2beta2.HorizontalPodAutoscalerInterface { - return &FakeHorizontalPodAutoscalers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeAutoscalingV2beta2) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go deleted file mode 100644 index 0a7c93c3d..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v2beta2 "k8s.io/api/autoscaling/v2beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" - testing "k8s.io/client-go/testing" -) - -// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type FakeHorizontalPodAutoscalers struct { - Fake *FakeAutoscalingV2beta2 - ns string -} - -var horizontalpodautoscalersResource = v2beta2.SchemeGroupVersion.WithResource("horizontalpodautoscalers") - -var horizontalpodautoscalersKind = v2beta2.SchemeGroupVersion.WithKind("HorizontalPodAutoscaler") - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *FakeHorizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *FakeHorizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v2beta2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta2.HorizontalPodAutoscalerList).ListMeta} - for _, item := range obj.(*v2beta2.HorizontalPodAutoscalerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *FakeHorizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts)) - -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *FakeHorizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeHorizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *FakeHorizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(horizontalpodautoscalersResource, c.ns, name, opts), &v2beta2.HorizontalPodAutoscaler{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeHorizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{}) - return err -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, pt, data, subresources...), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *FakeHorizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeHorizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v2beta2.HorizontalPodAutoscaler{}) - - if obj == nil { - return nil, err - } - return obj.(*v2beta2.HorizontalPodAutoscaler), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 0ddb9108b..017b3e1fc 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,18 +19,15 @@ limitations under the License. package v2beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v2beta2 "k8s.io/api/autoscaling/v2beta2" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - autoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + applyconfigurationsautoscalingv2beta2 "k8s.io/client-go/applyconfigurations/autoscaling/v2beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. @@ -41,216 +38,42 @@ type HorizontalPodAutoscalersGetter interface { // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. type HorizontalPodAutoscalerInterface interface { - Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error) - Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) - UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error) + Create(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) + Update(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) - List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) + List(ctx context.Context, opts v1.ListOptions) (*autoscalingv2beta2.HorizontalPodAutoscalerList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) - Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) - ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) + Apply(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, horizontalPodAutoscaler *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *autoscalingv2beta2.HorizontalPodAutoscaler, err error) HorizontalPodAutoscalerExpansion } // horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface type horizontalPodAutoscalers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration] } // newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*autoscalingv2beta2.HorizontalPodAutoscaler, *autoscalingv2beta2.HorizontalPodAutoscalerList, *applyconfigurationsautoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration]( + "horizontalpodautoscalers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *autoscalingv2beta2.HorizontalPodAutoscaler { + return &autoscalingv2beta2.HorizontalPodAutoscaler{} + }, + func() *autoscalingv2beta2.HorizontalPodAutoscalerList { + return &autoscalingv2beta2.HorizontalPodAutoscalerList{} + }, + gentype.PrefersProtobuf[*autoscalingv2beta2.HorizontalPodAutoscaler](), + ), } } - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2beta2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(horizontalPodAutoscaler). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Apply(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *horizontalPodAutoscalers) ApplyStatus(ctx context.Context, horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscalerApplyConfiguration, opts v1.ApplyOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - if horizontalPodAutoscaler == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(horizontalPodAutoscaler) - if err != nil { - return nil, err - } - - name := horizontalPodAutoscaler.Name - if name == nil { - return nil, fmt.Errorf("horizontalPodAutoscaler.Name must be provided to Apply") - } - - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go index eee144f71..614d049f3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/batch/v1" - "k8s.io/client-go/kubernetes/scheme" + batchv1 "k8s.io/api/batch/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *BatchV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := batchv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go index 925026321..29ef3e9b7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/cronjob.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -41,216 +38,38 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (*v1.CronJob, error) - Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) - UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) + Create(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.CreateOptions) (*batchv1.CronJob, error) + Update(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, cronJob *batchv1.CronJob, opts metav1.UpdateOptions) (*batchv1.CronJob, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CronJob, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CronJobList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.CronJob, error) + List(ctx context.Context, opts metav1.ListOptions) (*batchv1.CronJobList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) - Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) - ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.CronJob, err error) + Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *batchv1.CronJob { return &batchv1.CronJob{} }, + func() *batchv1.CronJobList { return &batchv1.CronJobList{} }, + gentype.PrefersProtobuf[*batchv1.CronJob](), + ), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - result = &v1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go deleted file mode 100644 index 43d5b0d30..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/batch/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeBatchV1 struct { - *testing.Fake -} - -func (c *FakeBatchV1) CronJobs(namespace string) v1.CronJobInterface { - return &FakeCronJobs{c, namespace} -} - -func (c *FakeBatchV1) Jobs(namespace string) v1.JobInterface { - return &FakeJobs{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeBatchV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go deleted file mode 100644 index 0cbcce6d8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_cronjob.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" - testing "k8s.io/client-go/testing" -) - -// FakeCronJobs implements CronJobInterface -type FakeCronJobs struct { - Fake *FakeBatchV1 - ns string -} - -var cronjobsResource = v1.SchemeGroupVersion.WithResource("cronjobs") - -var cronjobsKind = v1.SchemeGroupVersion.WithKind("CronJob") - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CronJobList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1.CronJobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CronJobList{ListMeta: obj.(*v1.CronJobList).ListMeta} - for _, item := range obj.(*v1.CronJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) - -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1.CronJob, opts metav1.CreateOptions) (result *v1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (result *v1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1.CronJob, opts metav1.UpdateOptions) (*v1.CronJob, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1.CronJob{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CronJobList{}) - return err -} - -// Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1.CronJobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CronJob), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go deleted file mode 100644 index cf1a913bd..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" - testing "k8s.io/client-go/testing" -) - -// FakeJobs implements JobInterface -type FakeJobs struct { - Fake *FakeBatchV1 - ns string -} - -var jobsResource = v1.SchemeGroupVersion.WithResource("jobs") - -var jobsKind = v1.SchemeGroupVersion.WithKind("Job") - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *FakeJobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(jobsResource, c.ns, name), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *FakeJobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &v1.JobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.JobList{ListMeta: obj.(*v1.JobList).ListMeta} - for _, item := range obj.(*v1.JobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *FakeJobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts)) - -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *FakeJobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeJobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *FakeJobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(jobsResource, c.ns, name, opts), &v1.Job{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeJobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.JobList{}) - return err -} - -// Patch applies the patch and returns the patched job. -func (c *FakeJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, pt, data, subresources...), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *FakeJobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeJobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Job{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Job), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index c076c80af..d77aa0f03 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/batch/v1" + batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // JobsGetter has a method to return a JobInterface. @@ -41,216 +38,38 @@ type JobsGetter interface { // JobInterface has methods to work with Job resources. type JobInterface interface { - Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error) - Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) - UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error) + Create(ctx context.Context, job *batchv1.Job, opts metav1.CreateOptions) (*batchv1.Job, error) + Update(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, job *batchv1.Job, opts metav1.UpdateOptions) (*batchv1.Job, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*batchv1.Job, error) + List(ctx context.Context, opts metav1.ListOptions) (*batchv1.JobList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) - Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) - ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *batchv1.Job, err error) + Apply(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, job *applyconfigurationsbatchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *batchv1.Job, err error) JobExpansion } // jobs implements JobInterface type jobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration] } // newJobs returns a Jobs func newJobs(c *BatchV1Client, namespace string) *jobs { return &jobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration]( + "jobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *batchv1.Job { return &batchv1.Job{} }, + func() *batchv1.JobList { return &batchv1.JobList{} }, + gentype.PrefersProtobuf[*batchv1.Job](), + ), } } - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(job). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied job. -func (c *jobs) Apply(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *jobs) ApplyStatus(ctx context.Context, job *batchv1.JobApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Job, err error) { - if job == nil { - return nil, fmt.Errorf("job provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(job) - if err != nil { - return nil, err - } - - name := job.Name - if name == nil { - return nil, fmt.Errorf("job.Name must be provided to Apply") - } - - result = &v1.Job{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("jobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go index ebbf063ec..2da9e4135 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + batchv1beta1 "k8s.io/api/batch/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *BatchV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := batchv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d687339ae..3091020ba 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/batch/v1beta1" + batchv1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + applyconfigurationsbatchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CronJobsGetter has a method to return a CronJobInterface. @@ -41,216 +38,38 @@ type CronJobsGetter interface { // CronJobInterface has methods to work with CronJob resources. type CronJobInterface interface { - Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error) - Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) - UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) + Create(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.CreateOptions) (*batchv1beta1.CronJob, error) + Update(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, cronJob *batchv1beta1.CronJob, opts v1.UpdateOptions) (*batchv1beta1.CronJob, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*batchv1beta1.CronJob, error) + List(ctx context.Context, opts v1.ListOptions) (*batchv1beta1.CronJobList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) - Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) - ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *batchv1beta1.CronJob, err error) + Apply(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, cronJob *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *batchv1beta1.CronJob, err error) CronJobExpansion } // cronJobs implements CronJobInterface type cronJobs struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration] } // newCronJobs returns a CronJobs func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { return &cronJobs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*batchv1beta1.CronJob, *batchv1beta1.CronJobList, *applyconfigurationsbatchv1beta1.CronJobApplyConfiguration]( + "cronjobs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *batchv1beta1.CronJob { return &batchv1beta1.CronJob{} }, + func() *batchv1beta1.CronJobList { return &batchv1beta1.CronJobList{} }, + gentype.PrefersProtobuf[*batchv1beta1.CronJob](), + ), } } - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cronJob). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *cronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *cronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - - result = &v1beta1.CronJob{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("cronjobs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go deleted file mode 100644 index 6f350aed9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeBatchV1beta1 struct { - *testing.Fake -} - -func (c *FakeBatchV1beta1) CronJobs(namespace string) v1beta1.CronJobInterface { - return &FakeCronJobs{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeBatchV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go deleted file mode 100644 index 9d078f55a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/batch/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - batchv1beta1 "k8s.io/client-go/applyconfigurations/batch/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeCronJobs implements CronJobInterface -type FakeCronJobs struct { - Fake *FakeBatchV1beta1 - ns string -} - -var cronjobsResource = v1beta1.SchemeGroupVersion.WithResource("cronjobs") - -var cronjobsKind = v1beta1.SchemeGroupVersion.WithKind("CronJob") - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *FakeCronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *FakeCronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CronJobList{ListMeta: obj.(*v1beta1.CronJobList).ListMeta} - for _, item := range obj.(*v1beta1.CronJobList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *FakeCronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts)) - -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *FakeCronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *FakeCronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(cronjobsResource, c.ns, name, opts), &v1beta1.CronJob{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{}) - return err -} - -// Patch applies the patch and returns the patched cronJob. -func (c *FakeCronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, pt, data, subresources...), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cronJob. -func (c *FakeCronJobs) Apply(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCronJobs) ApplyStatus(ctx context.Context, cronJob *batchv1beta1.CronJobApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CronJob, err error) { - if cronJob == nil { - return nil, fmt.Errorf("cronJob provided to Apply must not be nil") - } - data, err := json.Marshal(cronJob) - if err != nil { - return nil, err - } - name := cronJob.Name - if name == nil { - return nil, fmt.Errorf("cronJob.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.CronJob{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CronJob), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go index 6d87c539e..60337cd23 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/certificates/v1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1 "k8s.io/api/certificates/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := certificatesv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go index 0d6b68b29..6863a22d1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/certificatesigningrequest.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/certificates/v1" + certificatesv1 "k8s.io/api/certificates/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + applyconfigurationscertificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -41,213 +38,51 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (*v1.CertificateSigningRequest, error) - Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) - UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + Create(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.CreateOptions) (*certificatesv1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CertificateSigningRequest, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CertificateSigningRequestList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*certificatesv1.CertificateSigningRequest, error) + List(ctx context.Context, opts metav1.ListOptions) (*certificatesv1.CertificateSigningRequestList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) - Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) - ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) - UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *certificatesv1.CertificateSigningRequest, err error) + Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *certificatesv1.CertificateSigningRequest, err error) + UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (*certificatesv1.CertificateSigningRequest, error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*certificatesv1.CertificateSigningRequest, *certificatesv1.CertificateSigningRequestList, *applyconfigurationscertificatesv1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *certificatesv1.CertificateSigningRequest { return &certificatesv1.CertificateSigningRequest{} }, + func() *certificatesv1.CertificateSigningRequestList { + return &certificatesv1.CertificateSigningRequestList{} + }, + gentype.PrefersProtobuf[*certificatesv1.CertificateSigningRequest](), + ), } } -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // UpdateApproval takes the top resource name and the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - result = &v1.CertificateSigningRequest{} - err = c.client.Put(). +func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *certificatesv1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificatesv1.CertificateSigningRequest, err error) { + result = &certificatesv1.CertificateSigningRequest{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). Resource("certificatesigningrequests"). Name(certificateSigningRequestName). SubResource("approval"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go deleted file mode 100644 index 4779d6169..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificates_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/certificates/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCertificatesV1 struct { - *testing.Fake -} - -func (c *FakeCertificatesV1) CertificateSigningRequests() v1.CertificateSigningRequestInterface { - return &FakeCertificateSigningRequests{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCertificatesV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go deleted file mode 100644 index adb7db0bf..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1/fake/fake_certificatesigningrequest.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/certificates/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - certificatesv1 "k8s.io/client-go/applyconfigurations/certificates/v1" - testing "k8s.io/client-go/testing" -) - -// FakeCertificateSigningRequests implements CertificateSigningRequestInterface -type FakeCertificateSigningRequests struct { - Fake *FakeCertificatesV1 -} - -var certificatesigningrequestsResource = v1.SchemeGroupVersion.WithResource("certificatesigningrequests") - -var certificatesigningrequestsKind = v1.SchemeGroupVersion.WithKind("CertificateSigningRequest") - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CertificateSigningRequestList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1.CertificateSigningRequestList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CertificateSigningRequestList{ListMeta: obj.(*v1.CertificateSigningRequestList).ListMeta} - for _, item := range obj.(*v1.CertificateSigningRequestList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.CreateOptions) (result *v1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (*v1.CertificateSigningRequest, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1.CertificateSigningRequest{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CertificateSigningRequestList{}) - return err -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1.CertificateSigningRequestApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} - -// UpdateApproval takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequestName string, certificateSigningRequest *v1.CertificateSigningRequest, opts metav1.UpdateOptions) (result *v1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &v1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CertificateSigningRequest), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go index a9050af94..36e08253a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := certificatesv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go index 970fb15e6..df215ff53 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/clustertrustbundle.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/certificates/v1alpha1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterTrustBundlesGetter has a method to return a ClusterTrustBundleInterface. @@ -41,157 +38,36 @@ type ClusterTrustBundlesGetter interface { // ClusterTrustBundleInterface has methods to work with ClusterTrustBundle resources. type ClusterTrustBundleInterface interface { - Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*v1alpha1.ClusterTrustBundle, error) - Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*v1alpha1.ClusterTrustBundle, error) + Create(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) + Update(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTrustBundle, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTrustBundleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1alpha1.ClusterTrustBundle, error) + List(ctx context.Context, opts v1.ListOptions) (*certificatesv1alpha1.ClusterTrustBundleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) - Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1alpha1.ClusterTrustBundle, err error) + Apply(ctx context.Context, clusterTrustBundle *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1alpha1.ClusterTrustBundle, err error) ClusterTrustBundleExpansion } // clusterTrustBundles implements ClusterTrustBundleInterface type clusterTrustBundles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration] } // newClusterTrustBundles returns a ClusterTrustBundles func newClusterTrustBundles(c *CertificatesV1alpha1Client) *clusterTrustBundles { return &clusterTrustBundles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*certificatesv1alpha1.ClusterTrustBundle, *certificatesv1alpha1.ClusterTrustBundleList, *applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration]( + "clustertrustbundles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *certificatesv1alpha1.ClusterTrustBundle { return &certificatesv1alpha1.ClusterTrustBundle{} }, + func() *certificatesv1alpha1.ClusterTrustBundleList { + return &certificatesv1alpha1.ClusterTrustBundleList{} + }, + gentype.PrefersProtobuf[*certificatesv1alpha1.ClusterTrustBundle](), + ), } } - -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. -func (c *clusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Get(). - Resource("clustertrustbundles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. -func (c *clusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterTrustBundleList{} - err = c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTrustBundles. -func (c *clusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Post(). - Resource("clustertrustbundles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *clusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Put(). - Resource("clustertrustbundles"). - Name(clusterTrustBundle.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTrustBundle). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. -func (c *clusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertrustbundles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertrustbundles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterTrustBundle. -func (c *clusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(pt). - Resource("clustertrustbundles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. -func (c *clusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - if clusterTrustBundle == nil { - return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterTrustBundle) - if err != nil { - return nil, err - } - name := clusterTrustBundle.Name - if name == nil { - return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") - } - result = &v1alpha1.ClusterTrustBundle{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustertrustbundles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go deleted file mode 100644 index 8ff02cdbb..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCertificatesV1alpha1 struct { - *testing.Fake -} - -func (c *FakeCertificatesV1alpha1) ClusterTrustBundles() v1alpha1.ClusterTrustBundleInterface { - return &FakeClusterTrustBundles{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCertificatesV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go deleted file mode 100644 index 2f849cbd7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_clustertrustbundle.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/certificates/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterTrustBundles implements ClusterTrustBundleInterface -type FakeClusterTrustBundles struct { - Fake *FakeCertificatesV1alpha1 -} - -var clustertrustbundlesResource = v1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles") - -var clustertrustbundlesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundle") - -// Get takes name of the clusterTrustBundle, and returns the corresponding clusterTrustBundle object, and an error if there is any. -func (c *FakeClusterTrustBundles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustertrustbundlesResource, name), &v1alpha1.ClusterTrustBundle{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// List takes label and field selectors, and returns the list of ClusterTrustBundles that match those selectors. -func (c *FakeClusterTrustBundles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTrustBundleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustertrustbundlesResource, clustertrustbundlesKind, opts), &v1alpha1.ClusterTrustBundleList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterTrustBundleList{ListMeta: obj.(*v1alpha1.ClusterTrustBundleList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterTrustBundleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterTrustBundles. -func (c *FakeClusterTrustBundles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustertrustbundlesResource, opts)) -} - -// Create takes the representation of a clusterTrustBundle and creates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *FakeClusterTrustBundles) Create(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.CreateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Update takes the representation of a clusterTrustBundle and updates it. Returns the server's representation of the clusterTrustBundle, and an error, if there is any. -func (c *FakeClusterTrustBundles) Update(ctx context.Context, clusterTrustBundle *v1alpha1.ClusterTrustBundle, opts v1.UpdateOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustertrustbundlesResource, clusterTrustBundle), &v1alpha1.ClusterTrustBundle{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Delete takes name of the clusterTrustBundle and deletes it. Returns an error if one occurs. -func (c *FakeClusterTrustBundles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustertrustbundlesResource, name, opts), &v1alpha1.ClusterTrustBundle{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterTrustBundles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustertrustbundlesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterTrustBundleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterTrustBundle. -func (c *FakeClusterTrustBundles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTrustBundle, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, name, pt, data, subresources...), &v1alpha1.ClusterTrustBundle{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterTrustBundle. -func (c *FakeClusterTrustBundles) Apply(ctx context.Context, clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterTrustBundle, err error) { - if clusterTrustBundle == nil { - return nil, fmt.Errorf("clusterTrustBundle provided to Apply must not be nil") - } - data, err := json.Marshal(clusterTrustBundle) - if err != nil { - return nil, err - } - name := clusterTrustBundle.Name - if name == nil { - return nil, fmt.Errorf("clusterTrustBundle.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertrustbundlesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterTrustBundle{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTrustBundle), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go index fa97b441d..f040e7664 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CertificatesV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := certificatesv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index ec0b9d266..4c6e28c65 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/certificates/v1beta1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. @@ -41,203 +38,42 @@ type CertificateSigningRequestsGetter interface { // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. type CertificateSigningRequestInterface interface { - Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error) - Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) - UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) + Create(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) + Update(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*certificatesv1beta1.CertificateSigningRequest, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1beta1.CertificateSigningRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*certificatesv1beta1.CertificateSigningRequestList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) - Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) - ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1beta1.CertificateSigningRequest, err error) + Apply(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, certificateSigningRequest *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.CertificateSigningRequest, err error) CertificateSigningRequestExpansion } // certificateSigningRequests implements CertificateSigningRequestInterface type certificateSigningRequests struct { - client rest.Interface + *gentype.ClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration] } // newCertificateSigningRequests returns a CertificateSigningRequests func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { return &certificateSigningRequests{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*certificatesv1beta1.CertificateSigningRequest, *certificatesv1beta1.CertificateSigningRequestList, *applyconfigurationscertificatesv1beta1.CertificateSigningRequestApplyConfiguration]( + "certificatesigningrequests", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *certificatesv1beta1.CertificateSigningRequest { + return &certificatesv1beta1.CertificateSigningRequest{} + }, + func() *certificatesv1beta1.CertificateSigningRequestList { + return &certificatesv1beta1.CertificateSigningRequestList{} + }, + gentype.PrefersProtobuf[*certificatesv1beta1.CertificateSigningRequest](), + ), } } - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(certificateSigningRequest). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *certificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *certificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(types.ApplyPatchType). - Resource("certificatesigningrequests"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go index 473789141..4e631b0a4 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -30,7 +30,7 @@ type CertificateSigningRequestExpansion interface { func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). + err = c.GetClient().Put(). Resource("certificatesigningrequests"). Name(certificateSigningRequest.Name). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go deleted file mode 100644 index 29d8b088e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCertificatesV1beta1 struct { - *testing.Fake -} - -func (c *FakeCertificatesV1beta1) CertificateSigningRequests() v1beta1.CertificateSigningRequestInterface { - return &FakeCertificateSigningRequests{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCertificatesV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go deleted file mode 100644 index 76bb38e7b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/certificates/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeCertificateSigningRequests implements CertificateSigningRequestInterface -type FakeCertificateSigningRequests struct { - Fake *FakeCertificatesV1beta1 -} - -var certificatesigningrequestsResource = v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests") - -var certificatesigningrequestsKind = v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest") - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *FakeCertificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *FakeCertificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CertificateSigningRequestList{ListMeta: obj.(*v1beta1.CertificateSigningRequestList).ListMeta} - for _, item := range obj.(*v1beta1.CertificateSigningRequestList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *FakeCertificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *FakeCertificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeCertificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *FakeCertificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(certificatesigningrequestsResource, name, opts), &v1beta1.CertificateSigningRequest{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCertificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{}) - return err -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, pt, data, subresources...), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequest. -func (c *FakeCertificateSigningRequests) Apply(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeCertificateSigningRequests) ApplyStatus(ctx context.Context, certificateSigningRequest *certificatesv1beta1.CertificateSigningRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequest, err error) { - if certificateSigningRequest == nil { - return nil, fmt.Errorf("certificateSigningRequest provided to Apply must not be nil") - } - data, err := json.Marshal(certificateSigningRequest) - if err != nil { - return nil, err - } - name := certificateSigningRequest.Name - if name == nil { - return nil, fmt.Errorf("certificateSigningRequest.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CertificateSigningRequest), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go deleted file mode 100644 index 2c3eaf971..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - certificates "k8s.io/api/certificates/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeCertificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) { - obj, err := c.Fake. - Invokes(core.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &certificates.CertificateSigningRequest{}) - if obj == nil { - return nil, err - } - return obj.(*certificates.CertificateSigningRequest), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go index e19469d53..427cb7e93 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/coordination/v1" - "k8s.io/client-go/kubernetes/scheme" + coordinationv1 "k8s.io/api/coordination/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := coordinationv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go deleted file mode 100644 index 6920275b2..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCoordinationV1 struct { - *testing.Fake -} - -func (c *FakeCoordinationV1) Leases(namespace string) v1.LeaseInterface { - return &FakeLeases{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCoordinationV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go deleted file mode 100644 index 6dc7c4c17..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" - testing "k8s.io/client-go/testing" -) - -// FakeLeases implements LeaseInterface -type FakeLeases struct { - Fake *FakeCoordinationV1 - ns string -} - -var leasesResource = v1.SchemeGroupVersion.WithResource("leases") - -var leasesKind = v1.SchemeGroupVersion.WithKind("Lease") - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Lease), err -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1.LeaseList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.LeaseList{ListMeta: obj.(*v1.LeaseList).ListMeta} - for _, item := range obj.(*v1.LeaseList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) - -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Lease), err -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Lease), err -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1.Lease{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.LeaseList{}) - return err -} - -// Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Lease), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Lease), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go index 9e6b169a8..6e7784d6a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/coordination/v1" + coordinationv1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - coordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + applyconfigurationscoordinationv1 "k8s.io/client-go/applyconfigurations/coordination/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -41,168 +38,34 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error) - Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error) + Create(ctx context.Context, lease *coordinationv1.Lease, opts metav1.CreateOptions) (*coordinationv1.Lease, error) + Update(ctx context.Context, lease *coordinationv1.Lease, opts metav1.UpdateOptions) (*coordinationv1.Lease, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*coordinationv1.Lease, error) + List(ctx context.Context, opts metav1.ListOptions) (*coordinationv1.LeaseList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) - Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *coordinationv1.Lease, err error) + Apply(ctx context.Context, lease *applyconfigurationscoordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *coordinationv1.Lease, err error) LeaseExpansion } // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*coordinationv1.Lease, *coordinationv1.LeaseList, *applyconfigurationscoordinationv1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *coordinationv1.Lease { return &coordinationv1.Lease{} }, + func() *coordinationv1.LeaseList { return &coordinationv1.LeaseList{} }, + gentype.PrefersProtobuf[*coordinationv1.Lease](), + ), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1.LeaseApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go new file mode 100644 index 000000000..4c286d463 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + http "net/http" + + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1alpha2Interface interface { + RESTClient() rest.Interface + LeaseCandidatesGetter +} + +// CoordinationV1alpha2Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1alpha2Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1alpha2Client) LeaseCandidates(namespace string) LeaseCandidateInterface { + return newLeaseCandidates(c, namespace) +} + +// NewForConfig creates a new CoordinationV1alpha2Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CoordinationV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CoordinationV1alpha2Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoordinationV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CoordinationV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1alpha2Client { + return &CoordinationV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := coordinationv1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go similarity index 100% rename from constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/doc.go rename to constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/doc.go diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go new file mode 100644 index 000000000..52af4786c --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type LeaseCandidateExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go new file mode 100644 index 000000000..c994a8893 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + context "context" + + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationscoordinationv1alpha2 "k8s.io/client-go/applyconfigurations/coordination/v1alpha2" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// LeaseCandidatesGetter has a method to return a LeaseCandidateInterface. +// A group's client should implement this interface. +type LeaseCandidatesGetter interface { + LeaseCandidates(namespace string) LeaseCandidateInterface +} + +// LeaseCandidateInterface has methods to work with LeaseCandidate resources. +type LeaseCandidateInterface interface { + Create(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.CreateOptions) (*coordinationv1alpha2.LeaseCandidate, error) + Update(ctx context.Context, leaseCandidate *coordinationv1alpha2.LeaseCandidate, opts v1.UpdateOptions) (*coordinationv1alpha2.LeaseCandidate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1alpha2.LeaseCandidate, error) + List(ctx context.Context, opts v1.ListOptions) (*coordinationv1alpha2.LeaseCandidateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1alpha2.LeaseCandidate, err error) + Apply(ctx context.Context, leaseCandidate *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1alpha2.LeaseCandidate, err error) + LeaseCandidateExpansion +} + +// leaseCandidates implements LeaseCandidateInterface +type leaseCandidates struct { + *gentype.ClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration] +} + +// newLeaseCandidates returns a LeaseCandidates +func newLeaseCandidates(c *CoordinationV1alpha2Client, namespace string) *leaseCandidates { + return &leaseCandidates{ + gentype.NewClientWithListAndApply[*coordinationv1alpha2.LeaseCandidate, *coordinationv1alpha2.LeaseCandidateList, *applyconfigurationscoordinationv1alpha2.LeaseCandidateApplyConfiguration]( + "leasecandidates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *coordinationv1alpha2.LeaseCandidate { return &coordinationv1alpha2.LeaseCandidate{} }, + func() *coordinationv1alpha2.LeaseCandidateList { return &coordinationv1alpha2.LeaseCandidateList{} }, + gentype.PrefersProtobuf[*coordinationv1alpha2.LeaseCandidate](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go index 27d674e23..1f1afba24 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *CoordinationV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := coordinationv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go deleted file mode 100644 index f583b466e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCoordinationV1beta1 struct { - *testing.Fake -} - -func (c *FakeCoordinationV1beta1) Leases(namespace string) v1beta1.LeaseInterface { - return &FakeLeases{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCoordinationV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go deleted file mode 100644 index 9a4a0d7eb..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/coordination/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeLeases implements LeaseInterface -type FakeLeases struct { - Fake *FakeCoordinationV1beta1 - ns string -} - -var leasesResource = v1beta1.SchemeGroupVersion.WithResource("leases") - -var leasesKind = v1beta1.SchemeGroupVersion.WithKind("Lease") - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *FakeLeases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Lease), err -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *FakeLeases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.LeaseList{ListMeta: obj.(*v1beta1.LeaseList).ListMeta} - for _, item := range obj.(*v1beta1.LeaseList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *FakeLeases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts)) - -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Lease), err -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *FakeLeases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Lease), err -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *FakeLeases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(leasesResource, c.ns, name, opts), &v1beta1.Lease{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLeases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{}) - return err -} - -// Patch applies the patch and returns the patched lease. -func (c *FakeLeases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, pt, data, subresources...), &v1beta1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Lease), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *FakeLeases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Lease{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Lease), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 1bbd57bdd..18ca9823c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/coordination/v1beta1" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - coordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + applyconfigurationscoordinationv1beta1 "k8s.io/client-go/applyconfigurations/coordination/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LeasesGetter has a method to return a LeaseInterface. @@ -41,168 +38,34 @@ type LeasesGetter interface { // LeaseInterface has methods to work with Lease resources. type LeaseInterface interface { - Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error) - Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error) + Create(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.CreateOptions) (*coordinationv1beta1.Lease, error) + Update(ctx context.Context, lease *coordinationv1beta1.Lease, opts v1.UpdateOptions) (*coordinationv1beta1.Lease, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*coordinationv1beta1.Lease, error) + List(ctx context.Context, opts v1.ListOptions) (*coordinationv1beta1.LeaseList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) - Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *coordinationv1beta1.Lease, err error) + Apply(ctx context.Context, lease *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *coordinationv1beta1.Lease, err error) LeaseExpansion } // leases implements LeaseInterface type leases struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration] } // newLeases returns a Leases func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { return &leases{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*coordinationv1beta1.Lease, *coordinationv1beta1.LeaseList, *applyconfigurationscoordinationv1beta1.LeaseApplyConfiguration]( + "leases", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *coordinationv1beta1.Lease { return &coordinationv1beta1.Lease{} }, + func() *coordinationv1beta1.LeaseList { return &coordinationv1beta1.LeaseList{} }, + gentype.PrefersProtobuf[*coordinationv1beta1.Lease](), + ), } } - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(lease). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied lease. -func (c *leases) Apply(ctx context.Context, lease *coordinationv1beta1.LeaseApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Lease, err error) { - if lease == nil { - return nil, fmt.Errorf("lease provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(lease) - if err != nil { - return nil, err - } - name := lease.Name - if name == nil { - return nil, fmt.Errorf("lease.Name must be provided to Apply") - } - result = &v1beta1.Lease{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("leases"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 0fef56429..b8e58cd15 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ComponentStatusesGetter has a method to return a ComponentStatusInterface. @@ -41,157 +38,34 @@ type ComponentStatusesGetter interface { // ComponentStatusInterface has methods to work with ComponentStatus resources. type ComponentStatusInterface interface { - Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error) - Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error) + Create(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.CreateOptions) (*corev1.ComponentStatus, error) + Update(ctx context.Context, componentStatus *corev1.ComponentStatus, opts metav1.UpdateOptions) (*corev1.ComponentStatus, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ComponentStatus, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ComponentStatusList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) - Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ComponentStatus, err error) + Apply(ctx context.Context, componentStatus *applyconfigurationscorev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ComponentStatus, err error) ComponentStatusExpansion } // componentStatuses implements ComponentStatusInterface type componentStatuses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration] } // newComponentStatuses returns a ComponentStatuses func newComponentStatuses(c *CoreV1Client) *componentStatuses { return &componentStatuses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*corev1.ComponentStatus, *corev1.ComponentStatusList, *applyconfigurationscorev1.ComponentStatusApplyConfiguration]( + "componentstatuses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *corev1.ComponentStatus { return &corev1.ComponentStatus{} }, + func() *corev1.ComponentStatusList { return &corev1.ComponentStatusList{} }, + gentype.PrefersProtobuf[*corev1.ComponentStatus](), + ), } } - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(componentStatus). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *componentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - result = &v1.ComponentStatus{} - err = c.client.Patch(types.ApplyPatchType). - Resource("componentstatuses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index b68177720..74d321193 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ConfigMapsGetter has a method to return a ConfigMapInterface. @@ -41,168 +38,34 @@ type ConfigMapsGetter interface { // ConfigMapInterface has methods to work with ConfigMap resources. type ConfigMapInterface interface { - Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error) - Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error) + Create(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.CreateOptions) (*corev1.ConfigMap, error) + Update(ctx context.Context, configMap *corev1.ConfigMap, opts metav1.UpdateOptions) (*corev1.ConfigMap, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ConfigMap, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ConfigMapList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) - Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ConfigMap, err error) + Apply(ctx context.Context, configMap *applyconfigurationscorev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ConfigMap, err error) ConfigMapExpansion } // configMaps implements ConfigMapInterface type configMaps struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration] } // newConfigMaps returns a ConfigMaps func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { return &configMaps{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration]( + "configmaps", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.ConfigMap { return &corev1.ConfigMap{} }, + func() *corev1.ConfigMapList { return &corev1.ConfigMapList{} }, + gentype.PrefersProtobuf[*corev1.ConfigMap](), + ), } } - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(configMap). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *configMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - result = &v1.ConfigMap{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("configmaps"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go index 6e59e4cc6..abf85cba6 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" + corev1 "k8s.io/api/core/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -160,10 +160,10 @@ func New(c rest.Interface) *CoreV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := corev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/api" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index cdf464b06..b96a8b385 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointsGetter has a method to return a EndpointsInterface. @@ -41,168 +38,34 @@ type EndpointsGetter interface { // EndpointsInterface has methods to work with Endpoints resources. type EndpointsInterface interface { - Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error) - Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error) + Create(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.CreateOptions) (*corev1.Endpoints, error) + Update(ctx context.Context, endpoints *corev1.Endpoints, opts metav1.UpdateOptions) (*corev1.Endpoints, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Endpoints, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.EndpointsList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) - Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Endpoints, err error) + Apply(ctx context.Context, endpoints *applyconfigurationscorev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Endpoints, err error) EndpointsExpansion } // endpoints implements EndpointsInterface type endpoints struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration] } // newEndpoints returns a Endpoints func newEndpoints(c *CoreV1Client, namespace string) *endpoints { return &endpoints{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration]( + "endpoints", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.Endpoints { return &corev1.Endpoints{} }, + func() *corev1.EndpointsList { return &corev1.EndpointsList{} }, + gentype.PrefersProtobuf[*corev1.Endpoints](), + ), } } - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpoints). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *endpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - result = &v1.Endpoints{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpoints"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 8274d85ff..dd0cc80b8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -41,168 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) - Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Create(ctx context.Context, event *corev1.Event, opts metav1.CreateOptions) (*corev1.Event, error) + Update(ctx context.Context, event *corev1.Event, opts metav1.UpdateOptions) (*corev1.Event, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Event, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.EventList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) - Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationscorev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *CoreV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.Event, *corev1.EventList, *applyconfigurationscorev1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.Event { return &corev1.Event{} }, + func() *corev1.EventList { return &corev1.EventList{} }, + gentype.PrefersProtobuf[*corev1.Event](), + ), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go index a3fdf57a9..424357232 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -48,11 +48,11 @@ type EventExpansion interface { // event; it must either match this event client's namespace, or this event // client must have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -67,11 +67,11 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -87,11 +87,11 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { // match this event client's namespace, or this event client must have been // created with the "" namespace. func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + if e.GetNamespace() != "" && incompleteEvent.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.GetNamespace()) } result := &v1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). Resource("events"). Name(incompleteEvent.Name). @@ -109,8 +109,8 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev if err != nil { return nil, err } - if len(e.ns) > 0 && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + if len(e.GetNamespace()) > 0 && ref.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.GetNamespace()) } stringRefKind := string(ref.Kind) var refKind *string diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go deleted file mode 100644 index 39d4c3282..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeComponentStatuses implements ComponentStatusInterface -type FakeComponentStatuses struct { - Fake *FakeCoreV1 -} - -var componentstatusesResource = v1.SchemeGroupVersion.WithResource("componentstatuses") - -var componentstatusesKind = v1.SchemeGroupVersion.WithKind("ComponentStatus") - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *FakeComponentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(componentstatusesResource, name), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *FakeComponentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &v1.ComponentStatusList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ComponentStatusList{ListMeta: obj.(*v1.ComponentStatusList).ListMeta} - for _, item := range obj.(*v1.ComponentStatusList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *FakeComponentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts)) -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *FakeComponentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *FakeComponentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(componentstatusesResource, name, opts), &v1.ComponentStatus{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeComponentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ComponentStatusList{}) - return err -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *FakeComponentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, pt, data, subresources...), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied componentStatus. -func (c *FakeComponentStatuses) Apply(ctx context.Context, componentStatus *corev1.ComponentStatusApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ComponentStatus, err error) { - if componentStatus == nil { - return nil, fmt.Errorf("componentStatus provided to Apply must not be nil") - } - data, err := json.Marshal(componentStatus) - if err != nil { - return nil, err - } - name := componentStatus.Name - if name == nil { - return nil, fmt.Errorf("componentStatus.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, *name, types.ApplyPatchType, data), &v1.ComponentStatus{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ComponentStatus), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go deleted file mode 100644 index 6e8a38bd8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeConfigMaps implements ConfigMapInterface -type FakeConfigMaps struct { - Fake *FakeCoreV1 - ns string -} - -var configmapsResource = v1.SchemeGroupVersion.WithResource("configmaps") - -var configmapsKind = v1.SchemeGroupVersion.WithKind("ConfigMap") - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *FakeConfigMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *FakeConfigMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &v1.ConfigMapList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ConfigMapList{ListMeta: obj.(*v1.ConfigMapList).ListMeta} - for _, item := range obj.(*v1.ConfigMapList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *FakeConfigMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts)) - -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *FakeConfigMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *FakeConfigMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(configmapsResource, c.ns, name, opts), &v1.ConfigMap{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeConfigMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ConfigMapList{}) - return err -} - -// Patch applies the patch and returns the patched configMap. -func (c *FakeConfigMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, pt, data, subresources...), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied configMap. -func (c *FakeConfigMaps) Apply(ctx context.Context, configMap *corev1.ConfigMapApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ConfigMap, err error) { - if configMap == nil { - return nil, fmt.Errorf("configMap provided to Apply must not be nil") - } - data, err := json.Marshal(configMap) - if err != nil { - return nil, err - } - name := configMap.Name - if name == nil { - return nil, fmt.Errorf("configMap.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ConfigMap{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ConfigMap), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go deleted file mode 100644 index 5ad90943c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCoreV1 struct { - *testing.Fake -} - -func (c *FakeCoreV1) ComponentStatuses() v1.ComponentStatusInterface { - return &FakeComponentStatuses{c} -} - -func (c *FakeCoreV1) ConfigMaps(namespace string) v1.ConfigMapInterface { - return &FakeConfigMaps{c, namespace} -} - -func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface { - return &FakeEndpoints{c, namespace} -} - -func (c *FakeCoreV1) Events(namespace string) v1.EventInterface { - return &FakeEvents{c, namespace} -} - -func (c *FakeCoreV1) LimitRanges(namespace string) v1.LimitRangeInterface { - return &FakeLimitRanges{c, namespace} -} - -func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface { - return &FakeNamespaces{c} -} - -func (c *FakeCoreV1) Nodes() v1.NodeInterface { - return &FakeNodes{c} -} - -func (c *FakeCoreV1) PersistentVolumes() v1.PersistentVolumeInterface { - return &FakePersistentVolumes{c} -} - -func (c *FakeCoreV1) PersistentVolumeClaims(namespace string) v1.PersistentVolumeClaimInterface { - return &FakePersistentVolumeClaims{c, namespace} -} - -func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface { - return &FakePods{c, namespace} -} - -func (c *FakeCoreV1) PodTemplates(namespace string) v1.PodTemplateInterface { - return &FakePodTemplates{c, namespace} -} - -func (c *FakeCoreV1) ReplicationControllers(namespace string) v1.ReplicationControllerInterface { - return &FakeReplicationControllers{c, namespace} -} - -func (c *FakeCoreV1) ResourceQuotas(namespace string) v1.ResourceQuotaInterface { - return &FakeResourceQuotas{c, namespace} -} - -func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface { - return &FakeSecrets{c, namespace} -} - -func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface { - return &FakeServices{c, namespace} -} - -func (c *FakeCoreV1) ServiceAccounts(namespace string) v1.ServiceAccountInterface { - return &FakeServiceAccounts{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCoreV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go deleted file mode 100644 index 6b2f6c249..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeEndpoints implements EndpointsInterface -type FakeEndpoints struct { - Fake *FakeCoreV1 - ns string -} - -var endpointsResource = v1.SchemeGroupVersion.WithResource("endpoints") - -var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints") - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EndpointsList{ListMeta: obj.(*v1.EndpointsList).ListMeta} - for _, item := range obj.(*v1.EndpointsList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts)) - -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &v1.Endpoints{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EndpointsList{}) - return err -} - -// Patch applies the patch and returns the patched endpoints. -func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpoints. -func (c *FakeEndpoints) Apply(ctx context.Context, endpoints *corev1.EndpointsApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Endpoints, err error) { - if endpoints == nil { - return nil, fmt.Errorf("endpoints provided to Apply must not be nil") - } - data, err := json.Marshal(endpoints) - if err != nil { - return nil, err - } - name := endpoints.Name - if name == nil { - return nil, fmt.Errorf("endpoints.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Endpoints{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Endpoints), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go deleted file mode 100644 index 9ad879b39..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeEvents implements EventInterface -type FakeEvents struct { - Fake *FakeCoreV1 - ns string -} - -var eventsResource = v1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} - for _, item := range obj.(*v1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) - -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *corev1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go deleted file mode 100644 index 48282f86e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - core "k8s.io/client-go/testing" -) - -func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - var action core.CreateActionImpl - if c.ns != "" { - action = core.NewCreateAction(eventsResource, c.ns, event) - } else { - action = core.NewCreateAction(eventsResource, event.GetNamespace(), event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1.Event), err -} - -// Update replaces an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - var action core.UpdateActionImpl - if c.ns != "" { - action = core.NewUpdateAction(eventsResource, c.ns, event) - } else { - action = core.NewUpdateAction(eventsResource, event.GetNamespace(), event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1.Event), err -} - -// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. -// TODO: Should take a PatchType as an argument probably. -func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) { - // TODO: Should be configurable to support additional patch strategies. - pt := types.StrategicMergePatchType - var action core.PatchActionImpl - if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) - } else { - action = core.NewPatchAction(eventsResource, event.GetNamespace(), event.Name, pt, data) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1.Event), err -} - -// Search returns a list of events matching the specified object. -func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - var action core.ListActionImpl - if c.ns != "" { - action = core.NewListAction(eventsResource, eventsKind, c.ns, metav1.ListOptions{}) - } else { - action = core.NewListAction(eventsResource, eventsKind, v1.NamespaceDefault, metav1.ListOptions{}) - } - obj, err := c.Fake.Invokes(action, &v1.EventList{}) - if obj == nil { - return nil, err - } - - return obj.(*v1.EventList), err -} - -func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - action := core.GenericActionImpl{} - action.Verb = "get-field-selector" - action.Resource = eventsResource - - c.Fake.Invokes(action, nil) - return fields.Everything() -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go deleted file mode 100644 index f18b5741c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeLimitRanges implements LimitRangeInterface -type FakeLimitRanges struct { - Fake *FakeCoreV1 - ns string -} - -var limitrangesResource = v1.SchemeGroupVersion.WithResource("limitranges") - -var limitrangesKind = v1.SchemeGroupVersion.WithKind("LimitRange") - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *FakeLimitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *FakeLimitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &v1.LimitRangeList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.LimitRangeList{ListMeta: obj.(*v1.LimitRangeList).ListMeta} - for _, item := range obj.(*v1.LimitRangeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *FakeLimitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts)) - -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *FakeLimitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *FakeLimitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(limitrangesResource, c.ns, name, opts), &v1.LimitRange{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLimitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.LimitRangeList{}) - return err -} - -// Patch applies the patch and returns the patched limitRange. -func (c *FakeLimitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, pt, data, subresources...), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *FakeLimitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, *name, types.ApplyPatchType, data), &v1.LimitRange{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.LimitRange), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go deleted file mode 100644 index 52fcff591..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeNamespaces implements NamespaceInterface -type FakeNamespaces struct { - Fake *FakeCoreV1 -} - -var namespacesResource = v1.SchemeGroupVersion.WithResource("namespaces") - -var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace") - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NamespaceList{ListMeta: obj.(*v1.NamespaceList).ListMeta} - for _, item := range obj.(*v1.NamespaceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts)) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNamespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &v1.Namespace{}) - return err -} - -// Patch applies the patch and returns the patched namespace. -func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *FakeNamespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNamespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, *name, types.ApplyPatchType, data, "status"), &v1.Namespace{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Namespace), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go deleted file mode 100644 index d86b328a4..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeNamespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Resource = namespacesResource - action.Subresource = "finalize" - action.Object = namespace - - obj, err := c.Fake.Invokes(action, namespace) - if obj == nil { - return nil, err - } - - return obj.(*v1.Namespace), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go deleted file mode 100644 index 5df40f8d1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeNodes implements NodeInterface -type FakeNodes struct { - Fake *FakeCoreV1 -} - -var nodesResource = v1.SchemeGroupVersion.WithResource("nodes") - -var nodesKind = v1.SchemeGroupVersion.WithKind("Node") - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NodeList{ListMeta: obj.(*v1.NodeList).ListMeta} - for _, item := range obj.(*v1.NodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &v1.Node{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.NodeList{}) - return err -} - -// Patch applies the patch and returns the patched node. -func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *FakeNodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeNodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &v1.Node{}) - if obj == nil { - return nil, err - } - return obj.(*v1.Node), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go deleted file mode 100644 index eccf9fec6..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - v1 "k8s.io/api/core/v1" - types "k8s.io/apimachinery/pkg/types" - core "k8s.io/client-go/testing" -) - -// TODO: Should take a PatchType as an argument probably. -func (c *FakeNodes) PatchStatus(_ context.Context, nodeName string, data []byte) (*v1.Node, error) { - // TODO: Should be configurable to support additional patch strategies. - pt := types.StrategicMergePatchType - obj, err := c.Fake.Invokes( - core.NewRootPatchSubresourceAction(nodesResource, nodeName, pt, data, "status"), &v1.Node{}) - if obj == nil { - return nil, err - } - - return obj.(*v1.Node), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go deleted file mode 100644 index 5b06d0b19..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakePersistentVolumes implements PersistentVolumeInterface -type FakePersistentVolumes struct { - Fake *FakeCoreV1 -} - -var persistentvolumesResource = v1.SchemeGroupVersion.WithResource("persistentvolumes") - -var persistentvolumesKind = v1.SchemeGroupVersion.WithKind("PersistentVolume") - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *FakePersistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *FakePersistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &v1.PersistentVolumeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PersistentVolumeList{ListMeta: obj.(*v1.PersistentVolumeList).ListMeta} - for _, item := range obj.(*v1.PersistentVolumeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *FakePersistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts)) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *FakePersistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(persistentvolumesResource, name, opts), &v1.PersistentVolume{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeList{}) - return err -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *FakePersistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, pt, data, subresources...), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *FakePersistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolume{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolume), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go deleted file mode 100644 index b860e5367..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakePersistentVolumeClaims implements PersistentVolumeClaimInterface -type FakePersistentVolumeClaims struct { - Fake *FakeCoreV1 - ns string -} - -var persistentvolumeclaimsResource = v1.SchemeGroupVersion.WithResource("persistentvolumeclaims") - -var persistentvolumeclaimsKind = v1.SchemeGroupVersion.WithKind("PersistentVolumeClaim") - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *FakePersistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *FakePersistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &v1.PersistentVolumeClaimList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PersistentVolumeClaimList{ListMeta: obj.(*v1.PersistentVolumeClaimList).ListMeta} - for _, item := range obj.(*v1.PersistentVolumeClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *FakePersistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *FakePersistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePersistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *FakePersistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(persistentvolumeclaimsResource, c.ns, name, opts), &v1.PersistentVolumeClaim{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePersistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PersistentVolumeClaimList{}) - return err -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, pt, data, subresources...), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *FakePersistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePersistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PersistentVolumeClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PersistentVolumeClaim), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go deleted file mode 100644 index 23634c7d0..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakePods implements PodInterface -type FakePods struct { - Fake *FakeCoreV1 - ns string -} - -var podsResource = v1.SchemeGroupVersion.WithResource("pods") - -var podsKind = v1.SchemeGroupVersion.WithKind("Pod") - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodList{ListMeta: obj.(*v1.PodList).ListMeta} - for _, item := range obj.(*v1.PodList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts)) - -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &v1.Pod{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodList{}) - return err -} - -// Patch applies the patch and returns the patched pod. -func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *FakePods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} - -// UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Pod), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go deleted file mode 100644 index c814cadb0..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - "fmt" - "io" - "net/http" - "strings" - - v1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" - fakerest "k8s.io/client-go/rest/fake" - core "k8s.io/client-go/testing" -) - -func (c *FakePods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Namespace = binding.Namespace - action.Resource = podsResource - action.Subresource = "binding" - action.Object = binding - - _, err := c.Fake.Invokes(action, binding) - return err -} - -func (c *FakePods) GetBinding(name string) (result *v1.Binding, err error) { - obj, err := c.Fake. - Invokes(core.NewGetSubresourceAction(podsResource, c.ns, "binding", name), &v1.Binding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Binding), err -} - -func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - action := core.GenericActionImpl{} - action.Verb = "get" - action.Namespace = c.ns - action.Resource = podsResource - action.Subresource = "log" - action.Value = opts - - _, _ = c.Fake.Invokes(action, &v1.Pod{}) - fakeClient := &fakerest.RESTClient{ - Client: fakerest.CreateHTTPClient(func(request *http.Request) (*http.Response, error) { - resp := &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader("fake logs")), - } - return resp, nil - }), - NegotiatedSerializer: scheme.Codecs.WithoutConversion(), - GroupVersion: podsKind.GroupVersion(), - VersionedAPIPath: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/log", c.ns, name), - } - return fakeClient.Request() -} - -func (c *FakePods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.EvictV1beta1(ctx, eviction) -} - -func (c *FakePods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Namespace = c.ns - action.Resource = podsResource - action.Subresource = "eviction" - action.Object = eviction - - _, err := c.Fake.Invokes(action, eviction) - return err -} - -func (c *FakePods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Namespace = c.ns - action.Resource = podsResource - action.Subresource = "eviction" - action.Object = eviction - - _, err := c.Fake.Invokes(action, eviction) - return err -} - -func (c *FakePods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - return c.Fake.InvokesProxy(core.NewProxyGetAction(podsResource, c.ns, scheme, name, port, path, params)) -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go deleted file mode 100644 index 9fa97ab40..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakePodTemplates implements PodTemplateInterface -type FakePodTemplates struct { - Fake *FakeCoreV1 - ns string -} - -var podtemplatesResource = v1.SchemeGroupVersion.WithResource("podtemplates") - -var podtemplatesKind = v1.SchemeGroupVersion.WithKind("PodTemplate") - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *FakePodTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *FakePodTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &v1.PodTemplateList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodTemplateList{ListMeta: obj.(*v1.PodTemplateList).ListMeta} - for _, item := range obj.(*v1.PodTemplateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *FakePodTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts)) - -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *FakePodTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *FakePodTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podtemplatesResource, c.ns, name, opts), &v1.PodTemplate{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodTemplateList{}) - return err -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *FakePodTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, pt, data, subresources...), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *FakePodTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodTemplate), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go deleted file mode 100644 index 1e469c9b1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeReplicationControllers implements ReplicationControllerInterface -type FakeReplicationControllers struct { - Fake *FakeCoreV1 - ns string -} - -var replicationcontrollersResource = v1.SchemeGroupVersion.WithResource("replicationcontrollers") - -var replicationcontrollersKind = v1.SchemeGroupVersion.WithKind("ReplicationController") - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *FakeReplicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *FakeReplicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &v1.ReplicationControllerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ReplicationControllerList{ListMeta: obj.(*v1.ReplicationControllerList).ListMeta} - for _, item := range obj.(*v1.ReplicationControllerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *FakeReplicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts)) - -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *FakeReplicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *FakeReplicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicationcontrollersResource, c.ns, name, opts), &v1.ReplicationController{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ReplicationControllerList{}) - return err -} - -// Patch applies the patch and returns the patched replicationController. -func (c *FakeReplicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, pt, data, subresources...), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *FakeReplicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ReplicationController{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ReplicationController), err -} - -// GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*autoscalingv1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go deleted file mode 100644 index 87664985c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeResourceQuotas implements ResourceQuotaInterface -type FakeResourceQuotas struct { - Fake *FakeCoreV1 - ns string -} - -var resourcequotasResource = v1.SchemeGroupVersion.WithResource("resourcequotas") - -var resourcequotasKind = v1.SchemeGroupVersion.WithKind("ResourceQuota") - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *FakeResourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *FakeResourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &v1.ResourceQuotaList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ResourceQuotaList{ListMeta: obj.(*v1.ResourceQuotaList).ListMeta} - for _, item := range obj.(*v1.ResourceQuotaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *FakeResourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *FakeResourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *FakeResourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourcequotasResource, c.ns, name, opts), &v1.ResourceQuota{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ResourceQuotaList{}) - return err -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *FakeResourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, pt, data, subresources...), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *FakeResourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.ResourceQuota{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ResourceQuota), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go deleted file mode 100644 index 90035a703..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeSecrets implements SecretInterface -type FakeSecrets struct { - Fake *FakeCoreV1 - ns string -} - -var secretsResource = v1.SchemeGroupVersion.WithResource("secrets") - -var secretsKind = v1.SchemeGroupVersion.WithKind("Secret") - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.SecretList{ListMeta: obj.(*v1.SecretList).ListMeta} - for _, item := range obj.(*v1.SecretList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts)) - -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &v1.Secret{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.SecretList{}) - return err -} - -// Patch applies the patch and returns the patched secret. -func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *FakeSecrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Secret{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Secret), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go deleted file mode 100644 index 514ab19e3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeServices implements ServiceInterface -type FakeServices struct { - Fake *FakeCoreV1 - ns string -} - -var servicesResource = v1.SchemeGroupVersion.WithResource("services") - -var servicesKind = v1.SchemeGroupVersion.WithKind("Service") - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ServiceList{ListMeta: obj.(*v1.ServiceList).ListMeta} - for _, item := range obj.(*v1.ServiceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts)) - -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &v1.Service{}) - - return err -} - -// Patch applies the patch and returns the patched service. -func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *FakeServices) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServices) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Service{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Service), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go deleted file mode 100644 index 92e4930d7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - restclient "k8s.io/client-go/rest" - core "k8s.io/client-go/testing" -) - -func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params)) -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go deleted file mode 100644 index 115ff0712..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" - testing "k8s.io/client-go/testing" -) - -// FakeServiceAccounts implements ServiceAccountInterface -type FakeServiceAccounts struct { - Fake *FakeCoreV1 - ns string -} - -var serviceaccountsResource = v1.SchemeGroupVersion.WithResource("serviceaccounts") - -var serviceaccountsKind = v1.SchemeGroupVersion.WithKind("ServiceAccount") - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *FakeServiceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *FakeServiceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &v1.ServiceAccountList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ServiceAccountList{ListMeta: obj.(*v1.ServiceAccountList).ListMeta} - for _, item := range obj.(*v1.ServiceAccountList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *FakeServiceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts)) - -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *FakeServiceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *FakeServiceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(serviceaccountsResource, c.ns, name, opts), &v1.ServiceAccount{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeServiceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ServiceAccountList{}) - return err -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *FakeServiceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, pt, data, subresources...), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *FakeServiceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, *name, types.ApplyPatchType, data), &v1.ServiceAccount{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.ServiceAccount), err -} - -// CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. -func (c *FakeServiceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateSubresourceAction(serviceaccountsResource, serviceAccountName, "token", c.ns, tokenRequest), &authenticationv1.TokenRequest{}) - - if obj == nil { - return nil, err - } - return obj.(*authenticationv1.TokenRequest), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index e6883b607..51fa11d1b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // LimitRangesGetter has a method to return a LimitRangeInterface. @@ -41,168 +38,34 @@ type LimitRangesGetter interface { // LimitRangeInterface has methods to work with LimitRange resources. type LimitRangeInterface interface { - Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error) - Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error) + Create(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.CreateOptions) (*corev1.LimitRange, error) + Update(ctx context.Context, limitRange *corev1.LimitRange, opts metav1.UpdateOptions) (*corev1.LimitRange, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.LimitRange, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.LimitRangeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) - Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.LimitRange, err error) + Apply(ctx context.Context, limitRange *applyconfigurationscorev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.LimitRange, err error) LimitRangeExpansion } // limitRanges implements LimitRangeInterface type limitRanges struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration] } // newLimitRanges returns a LimitRanges func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { return &limitRanges{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.LimitRange, *corev1.LimitRangeList, *applyconfigurationscorev1.LimitRangeApplyConfiguration]( + "limitranges", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.LimitRange { return &corev1.LimitRange{} }, + func() *corev1.LimitRangeList { return &corev1.LimitRangeList{} }, + gentype.PrefersProtobuf[*corev1.LimitRange](), + ), } } - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(limitRange). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied limitRange. -func (c *limitRanges) Apply(ctx context.Context, limitRange *corev1.LimitRangeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.LimitRange, err error) { - if limitRange == nil { - return nil, fmt.Errorf("limitRange provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(limitRange) - if err != nil { - return nil, err - } - name := limitRange.Name - if name == nil { - return nil, fmt.Errorf("limitRange.Name must be provided to Apply") - } - result = &v1.LimitRange{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("limitranges"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 06c77b4c4..323934938 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NamespacesGetter has a method to return a NamespaceInterface. @@ -41,187 +38,37 @@ type NamespacesGetter interface { // NamespaceInterface has methods to work with Namespace resources. type NamespaceInterface interface { - Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) - Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) - UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error) + Create(ctx context.Context, namespace *corev1.Namespace, opts metav1.CreateOptions) (*corev1.Namespace, error) + Update(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, namespace *corev1.Namespace, opts metav1.UpdateOptions) (*corev1.Namespace, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Namespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.NamespaceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) - Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) - ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Namespace, err error) + Apply(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, namespace *applyconfigurationscorev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Namespace, err error) NamespaceExpansion } // namespaces implements NamespaceInterface type namespaces struct { - client rest.Interface + *gentype.ClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration] } // newNamespaces returns a Namespaces func newNamespaces(c *CoreV1Client) *namespaces { return &namespaces{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*corev1.Namespace, *corev1.NamespaceList, *applyconfigurationscorev1.NamespaceApplyConfiguration]( + "namespaces", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *corev1.Namespace { return &corev1.Namespace{} }, + func() *corev1.NamespaceList { return &corev1.NamespaceList{} }, + gentype.PrefersProtobuf[*corev1.Namespace](), + ), } } - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(namespace). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied namespace. -func (c *namespaces) Apply(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *namespaces) ApplyStatus(ctx context.Context, namespace *corev1.NamespaceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Namespace, err error) { - if namespace == nil { - return nil, fmt.Errorf("namespace provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(namespace) - if err != nil { - return nil, err - } - - name := namespace.Name - if name == nil { - return nil, fmt.Errorf("namespace.Name must be provided to Apply") - } - - result = &v1.Namespace{} - err = c.client.Patch(types.ApplyPatchType). - Resource("namespaces"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index be1116db1..4f720fb92 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -32,6 +32,6 @@ type NamespaceExpansion interface { // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) { result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) + err = c.GetClient().Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result) return } diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d9725b2f9..1851b025f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NodesGetter has a method to return a NodeInterface. @@ -41,203 +38,38 @@ type NodesGetter interface { // NodeInterface has methods to work with Node resources. type NodeInterface interface { - Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error) - Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) - UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) + Create(ctx context.Context, node *corev1.Node, opts metav1.CreateOptions) (*corev1.Node, error) + Update(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, node *corev1.Node, opts metav1.UpdateOptions) (*corev1.Node, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Node, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.NodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) - Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) - ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Node, err error) + Apply(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, node *applyconfigurationscorev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Node, err error) NodeExpansion } // nodes implements NodeInterface type nodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration] } // newNodes returns a Nodes func newNodes(c *CoreV1Client) *nodes { return &nodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*corev1.Node, *corev1.NodeList, *applyconfigurationscorev1.NodeApplyConfiguration]( + "nodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *corev1.Node { return &corev1.Node{} }, + func() *corev1.NodeList { return &corev1.NodeList{} }, + gentype.PrefersProtobuf[*corev1.Node](), + ), } } - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(node). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied node. -func (c *nodes) Apply(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *nodes) ApplyStatus(ctx context.Context, node *corev1.NodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Node, err error) { - if node == nil { - return nil, fmt.Errorf("node provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(node) - if err != nil { - return nil, err - } - - name := node.Name - if name == nil { - return nil, fmt.Errorf("node.Name must be provided to Apply") - } - - result = &v1.Node{} - err = c.client.Patch(types.ApplyPatchType). - Resource("nodes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index bdf7bfed8..df86253b0 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -34,7 +34,7 @@ type NodeExpansion interface { // the node that the server returns, or an error. func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) { result := &v1.Node{} - err := c.client.Patch(types.StrategicMergePatchType). + err := c.GetClient().Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index a8e229597..077a1ba4f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumesGetter has a method to return a PersistentVolumeInterface. @@ -41,203 +38,38 @@ type PersistentVolumesGetter interface { // PersistentVolumeInterface has methods to work with PersistentVolume resources. type PersistentVolumeInterface interface { - Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error) - Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) - UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error) + Create(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.CreateOptions) (*corev1.PersistentVolume, error) + Update(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, persistentVolume *corev1.PersistentVolume, opts metav1.UpdateOptions) (*corev1.PersistentVolume, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolume, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) - Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) - ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolume, err error) + Apply(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, persistentVolume *applyconfigurationscorev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolume, err error) PersistentVolumeExpansion } // persistentVolumes implements PersistentVolumeInterface type persistentVolumes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration] } // newPersistentVolumes returns a PersistentVolumes func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { return &persistentVolumes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration]( + "persistentvolumes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *corev1.PersistentVolume { return &corev1.PersistentVolume{} }, + func() *corev1.PersistentVolumeList { return &corev1.PersistentVolumeList{} }, + gentype.PrefersProtobuf[*corev1.PersistentVolume](), + ), } } - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolume). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolume. -func (c *persistentVolumes) Apply(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumes) ApplyStatus(ctx context.Context, persistentVolume *corev1.PersistentVolumeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolume, err error) { - if persistentVolume == nil { - return nil, fmt.Errorf("persistentVolume provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolume) - if err != nil { - return nil, err - } - - name := persistentVolume.Name - if name == nil { - return nil, fmt.Errorf("persistentVolume.Name must be provided to Apply") - } - - result = &v1.PersistentVolume{} - err = c.client.Patch(types.ApplyPatchType). - Resource("persistentvolumes"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 2e7f4fb44..cbe75e812 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. @@ -41,216 +38,38 @@ type PersistentVolumeClaimsGetter interface { // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. type PersistentVolumeClaimInterface interface { - Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error) - Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) - UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error) + Create(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.CreateOptions) (*corev1.PersistentVolumeClaim, error) + Update(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*corev1.PersistentVolumeClaim, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PersistentVolumeClaim, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PersistentVolumeClaimList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) - Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) - ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) + Apply(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, persistentVolumeClaim *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PersistentVolumeClaim, err error) PersistentVolumeClaimExpansion } // persistentVolumeClaims implements PersistentVolumeClaimInterface type persistentVolumeClaims struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration] } // newPersistentVolumeClaims returns a PersistentVolumeClaims func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration]( + "persistentvolumeclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{} }, + func() *corev1.PersistentVolumeClaimList { return &corev1.PersistentVolumeClaimList{} }, + gentype.PrefersProtobuf[*corev1.PersistentVolumeClaim](), + ), } } - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(persistentVolumeClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied persistentVolumeClaim. -func (c *persistentVolumeClaims) Apply(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *persistentVolumeClaims) ApplyStatus(ctx context.Context, persistentVolumeClaim *corev1.PersistentVolumeClaimApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PersistentVolumeClaim, err error) { - if persistentVolumeClaim == nil { - return nil, fmt.Errorf("persistentVolumeClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(persistentVolumeClaim) - if err != nil { - return nil, err - } - - name := persistentVolumeClaim.Name - if name == nil { - return nil, fmt.Errorf("persistentVolumeClaim.Name must be provided to Apply") - } - - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 63122cf3f..072a55941 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodsGetter has a method to return a PodInterface. @@ -41,117 +38,54 @@ type PodsGetter interface { // PodInterface has methods to work with Pod resources. type PodInterface interface { - Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error) - Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) - UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Create(ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) + Update(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) - Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) - ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) - UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Pod, err error) + Apply(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, pod *applyconfigurationscorev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Pod, err error) + UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) + UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) PodExpansion } // pods implements PodInterface type pods struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration] } // newPods returns a Pods func newPods(c *CoreV1Client, namespace string) *pods { return &pods{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration]( + "pods", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.Pod { return &corev1.Pod{} }, + func() *corev1.PodList { return &corev1.PodList{} }, + gentype.PrefersProtobuf[*corev1.Pod](), + ), } } -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pod). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). +// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) { + result = &corev1.Pod{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("pods"). - Name(pod.Name). - SubResource("status"). + Name(podName). + SubResource("ephemeralcontainers"). VersionedParams(&opts, scheme.ParameterCodec). Body(pod). Do(ctx). @@ -159,112 +93,15 @@ func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.Update return } -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied pod. -func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) { - if pod == nil { - return nil, fmt.Errorf("pod provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(pod) - if err != nil { - return nil, err - } - - name := pod.Name - if name == nil { - return nil, fmt.Errorf("pod.Name must be provided to Apply") - } - - result = &v1.Pod{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("pods"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). +// UpdateResize takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (result *corev1.Pod, err error) { + result = &corev1.Pod{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("pods"). Name(podName). - SubResource("ephemeralcontainers"). + SubResource("resize"). VersionedParams(&opts, scheme.ParameterCodec). Body(pod). Do(ctx). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go index 8b6e0e932..a2d4d70d4 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -47,33 +47,33 @@ type PodExpansion interface { // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error() } // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource. // Equivalent to calling EvictV1beta1. // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1(). func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error() } // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) + return c.GetClient().Get().Namespace(c.GetNamespace()).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) } // ProxyGet returns a response of the pod by calling it through the proxy. func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("pods"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index ff90fc0e6..b0cfa1bc1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodTemplatesGetter has a method to return a PodTemplateInterface. @@ -41,168 +38,34 @@ type PodTemplatesGetter interface { // PodTemplateInterface has methods to work with PodTemplate resources. type PodTemplateInterface interface { - Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error) - Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error) + Create(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.CreateOptions) (*corev1.PodTemplate, error) + Update(ctx context.Context, podTemplate *corev1.PodTemplate, opts metav1.UpdateOptions) (*corev1.PodTemplate, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.PodTemplate, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.PodTemplateList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) - Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.PodTemplate, err error) + Apply(ctx context.Context, podTemplate *applyconfigurationscorev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.PodTemplate, err error) PodTemplateExpansion } // podTemplates implements PodTemplateInterface type podTemplates struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration] } // newPodTemplates returns a PodTemplates func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { return &podTemplates{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.PodTemplate, *corev1.PodTemplateList, *applyconfigurationscorev1.PodTemplateApplyConfiguration]( + "podtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.PodTemplate { return &corev1.PodTemplate{} }, + func() *corev1.PodTemplateList { return &corev1.PodTemplateList{} }, + gentype.PrefersProtobuf[*corev1.PodTemplate](), + ), } } - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podTemplate. -func (c *podTemplates) Apply(ctx context.Context, podTemplate *corev1.PodTemplateApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodTemplate, err error) { - if podTemplate == nil { - return nil, fmt.Errorf("podTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podTemplate) - if err != nil { - return nil, err - } - name := podTemplate.Name - if name == nil { - return nil, fmt.Errorf("podTemplate.Name must be provided to Apply") - } - result = &v1.PodTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index 49c75d967..f8a7c9285 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,19 +19,16 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ReplicationControllersGetter has a method to return a ReplicationControllerInterface. @@ -42,17 +39,19 @@ type ReplicationControllersGetter interface { // ReplicationControllerInterface has methods to work with ReplicationController resources. type ReplicationControllerInterface interface { - Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error) - Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) - UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error) + Create(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.CreateOptions) (*corev1.ReplicationController, error) + Update(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, replicationController *corev1.ReplicationController, opts metav1.UpdateOptions) (*corev1.ReplicationController, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ReplicationController, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ReplicationControllerList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) - Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) - ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ReplicationController, err error) + Apply(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, replicationController *applyconfigurationscorev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ReplicationController, err error) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) @@ -61,209 +60,30 @@ type ReplicationControllerInterface interface { // replicationControllers implements ReplicationControllerInterface type replicationControllers struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration] } // newReplicationControllers returns a ReplicationControllers func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.ReplicationController, *corev1.ReplicationControllerList, *applyconfigurationscorev1.ReplicationControllerApplyConfiguration]( + "replicationcontrollers", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.ReplicationController { return &corev1.ReplicationController{} }, + func() *corev1.ReplicationControllerList { return &corev1.ReplicationControllerList{} }, + gentype.PrefersProtobuf[*corev1.ReplicationController](), + ), } } -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicationController). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicationController. -func (c *replicationControllers) Apply(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicationControllers) ApplyStatus(ctx context.Context, replicationController *corev1.ReplicationControllerApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicationController, err error) { - if replicationController == nil { - return nil, fmt.Errorf("replicationController provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicationController) - if err != nil { - return nil, err - } - - name := replicationController.Name - if name == nil { - return nil, fmt.Errorf("replicationController.Name must be provided to Apply") - } - - result = &v1.ReplicationController{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). @@ -276,8 +96,9 @@ func (c *replicationControllers) GetScale(ctx context.Context, replicationContro // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicationcontrollers"). Name(replicationControllerName). SubResource("scale"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 8444d164e..a0435accc 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ResourceQuotasGetter has a method to return a ResourceQuotaInterface. @@ -41,216 +38,38 @@ type ResourceQuotasGetter interface { // ResourceQuotaInterface has methods to work with ResourceQuota resources. type ResourceQuotaInterface interface { - Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error) - Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) - UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error) + Create(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.CreateOptions) (*corev1.ResourceQuota, error) + Update(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceQuota *corev1.ResourceQuota, opts metav1.UpdateOptions) (*corev1.ResourceQuota, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ResourceQuota, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ResourceQuotaList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) - Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) - ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ResourceQuota, err error) + Apply(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceQuota *applyconfigurationscorev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ResourceQuota, err error) ResourceQuotaExpansion } // resourceQuotas implements ResourceQuotaInterface type resourceQuotas struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration] } // newResourceQuotas returns a ResourceQuotas func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.ResourceQuota, *corev1.ResourceQuotaList, *applyconfigurationscorev1.ResourceQuotaApplyConfiguration]( + "resourcequotas", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.ResourceQuota { return &corev1.ResourceQuota{} }, + func() *corev1.ResourceQuotaList { return &corev1.ResourceQuotaList{} }, + gentype.PrefersProtobuf[*corev1.ResourceQuota](), + ), } } - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceQuota). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceQuota. -func (c *resourceQuotas) Apply(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceQuotas) ApplyStatus(ctx context.Context, resourceQuota *corev1.ResourceQuotaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ResourceQuota, err error) { - if resourceQuota == nil { - return nil, fmt.Errorf("resourceQuota provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceQuota) - if err != nil { - return nil, err - } - - name := resourceQuota.Name - if name == nil { - return nil, fmt.Errorf("resourceQuota.Name must be provided to Apply") - } - - result = &v1.ResourceQuota{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourcequotas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 4aba33038..a7ab56a27 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // SecretsGetter has a method to return a SecretInterface. @@ -41,168 +38,34 @@ type SecretsGetter interface { // SecretInterface has methods to work with Secret resources. type SecretInterface interface { - Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error) - Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error) + Create(ctx context.Context, secret *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error) + Update(ctx context.Context, secret *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Secret, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.SecretList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) - Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Secret, err error) + Apply(ctx context.Context, secret *applyconfigurationscorev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Secret, err error) SecretExpansion } // secrets implements SecretInterface type secrets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration] } // newSecrets returns a Secrets func newSecrets(c *CoreV1Client, namespace string) *secrets { return &secrets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration]( + "secrets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.Secret { return &corev1.Secret{} }, + func() *corev1.SecretList { return &corev1.SecretList{} }, + gentype.PrefersProtobuf[*corev1.Secret](), + ), } } - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(secret). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied secret. -func (c *secrets) Apply(ctx context.Context, secret *corev1.SecretApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Secret, err error) { - if secret == nil { - return nil, fmt.Errorf("secret provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(secret) - if err != nil { - return nil, err - } - name := secret.Name - if name == nil { - return nil, fmt.Errorf("secret.Name must be provided to Apply") - } - result = &v1.Secret{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("secrets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index 3fe22ba44..f145a137c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServicesGetter has a method to return a ServiceInterface. @@ -41,199 +38,37 @@ type ServicesGetter interface { // ServiceInterface has methods to work with Service resources. type ServiceInterface interface { - Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error) - Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) - UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) + Create(ctx context.Context, service *corev1.Service, opts metav1.CreateOptions) (*corev1.Service, error) + Update(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, service *corev1.Service, opts metav1.UpdateOptions) (*corev1.Service, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Service, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) - Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) - ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.Service, err error) + Apply(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, service *applyconfigurationscorev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.Service, err error) ServiceExpansion } // services implements ServiceInterface type services struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration] } // newServices returns a Services func newServices(c *CoreV1Client, namespace string) *services { return &services{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration]( + "services", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.Service { return &corev1.Service{} }, + func() *corev1.ServiceList { return &corev1.ServiceList{} }, + gentype.PrefersProtobuf[*corev1.Service](), + ), } } - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(service). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied service. -func (c *services) Apply(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *services) ApplyStatus(ctx context.Context, service *corev1.ServiceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Service, err error) { - if service == nil { - return nil, fmt.Errorf("service provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(service) - if err != nil { - return nil, err - } - - name := service.Name - if name == nil { - return nil, fmt.Errorf("service.Name must be provided to Apply") - } - - result = &v1.Service{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("services"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go index 4937fd1a3..9a6f78138 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -28,8 +28,8 @@ type ServiceExpansion interface { // ProxyGet returns a response of the service by calling it through the proxy. func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). + request := c.GetClient().Get(). + Namespace(c.GetNamespace()). Resource("services"). SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index bdf589b96..8458b6d9b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,19 +19,16 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" authenticationv1 "k8s.io/api/authentication/v1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - corev1 "k8s.io/client-go/applyconfigurations/core/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceAccountsGetter has a method to return a ServiceAccountInterface. @@ -42,15 +39,15 @@ type ServiceAccountsGetter interface { // ServiceAccountInterface has methods to work with ServiceAccount resources. type ServiceAccountInterface interface { - Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error) - Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error) + Create(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.CreateOptions) (*corev1.ServiceAccount, error) + Update(ctx context.Context, serviceAccount *corev1.ServiceAccount, opts metav1.UpdateOptions) (*corev1.ServiceAccount, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1.ServiceAccount, error) + List(ctx context.Context, opts metav1.ListOptions) (*corev1.ServiceAccountList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) - Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *corev1.ServiceAccount, err error) + Apply(ctx context.Context, serviceAccount *applyconfigurationscorev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *corev1.ServiceAccount, err error) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error) ServiceAccountExpansion @@ -58,163 +55,30 @@ type ServiceAccountInterface interface { // serviceAccounts implements ServiceAccountInterface type serviceAccounts struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration] } // newServiceAccounts returns a ServiceAccounts func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*corev1.ServiceAccount, *corev1.ServiceAccountList, *applyconfigurationscorev1.ServiceAccountApplyConfiguration]( + "serviceaccounts", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, + func() *corev1.ServiceAccountList { return &corev1.ServiceAccountList{} }, + gentype.PrefersProtobuf[*corev1.ServiceAccount](), + ), } } -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceAccount). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceAccount. -func (c *serviceAccounts) Apply(ctx context.Context, serviceAccount *corev1.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ServiceAccount, err error) { - if serviceAccount == nil { - return nil, fmt.Errorf("serviceAccount provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceAccount) - if err != nil { - return nil, err - } - name := serviceAccount.Name - if name == nil { - return nil, fmt.Errorf("serviceAccount.Name must be provided to Apply") - } - result = &v1.ServiceAccount{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - // CreateToken takes the representation of a tokenRequest and creates it. Returns the server's representation of the tokenRequest, and an error, if there is any. func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) { result = &authenticationv1.TokenRequest{} - err = c.client.Post(). - Namespace(c.ns). + err = c.GetClient().Post(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("serviceaccounts"). Name(serviceAccountName). SubResource("token"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go index 9041443b3..fbc685df8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/discovery_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/discovery/v1" - "k8s.io/client-go/kubernetes/scheme" + discoveryv1 "k8s.io/api/discovery/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := discoveryv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go index 63e616b03..75b9a559e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/endpointslice.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/discovery/v1" + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + applyconfigurationsdiscoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -41,168 +38,34 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (*v1.EndpointSlice, error) - Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (*v1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.CreateOptions) (*discoveryv1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *discoveryv1.EndpointSlice, opts metav1.UpdateOptions) (*discoveryv1.EndpointSlice, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EndpointSlice, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointSliceList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*discoveryv1.EndpointSlice, error) + List(ctx context.Context, opts metav1.ListOptions) (*discoveryv1.EndpointSliceList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) - Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *discoveryv1.EndpointSlice, err error) + Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *discoveryv1.EndpointSlice, err error) EndpointSliceExpansion } // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*discoveryv1.EndpointSlice, *discoveryv1.EndpointSliceList, *applyconfigurationsdiscoveryv1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *discoveryv1.EndpointSlice { return &discoveryv1.EndpointSlice{} }, + func() *discoveryv1.EndpointSliceList { return &discoveryv1.EndpointSliceList{} }, + gentype.PrefersProtobuf[*discoveryv1.EndpointSlice](), + ), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - result = &v1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go deleted file mode 100644 index 1ca9b23f5..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_discovery_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/discovery/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeDiscoveryV1 struct { - *testing.Fake -} - -func (c *FakeDiscoveryV1) EndpointSlices(namespace string) v1.EndpointSliceInterface { - return &FakeEndpointSlices{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeDiscoveryV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go deleted file mode 100644 index d159b5ea9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1/fake/fake_endpointslice.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/discovery/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - discoveryv1 "k8s.io/client-go/applyconfigurations/discovery/v1" - testing "k8s.io/client-go/testing" -) - -// FakeEndpointSlices implements EndpointSliceInterface -type FakeEndpointSlices struct { - Fake *FakeDiscoveryV1 - ns string -} - -var endpointslicesResource = v1.SchemeGroupVersion.WithResource("endpointslices") - -var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice") - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.EndpointSlice), err -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EndpointSliceList{ListMeta: obj.(*v1.EndpointSliceList).ListMeta} - for _, item := range obj.(*v1.EndpointSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) - -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.EndpointSlice), err -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.EndpointSlice), err -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1.EndpointSlice{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{}) - return err -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.EndpointSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1.EndpointSliceApplyConfiguration, opts metav1.ApplyOptions) (result *v1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.EndpointSlice), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go index 193d5e9eb..908446c6d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/discovery_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *DiscoveryV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := discoveryv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go index 2ade83302..4ef2752e7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/discovery/v1beta1" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + applyconfigurationsdiscoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EndpointSlicesGetter has a method to return a EndpointSliceInterface. @@ -41,168 +38,34 @@ type EndpointSlicesGetter interface { // EndpointSliceInterface has methods to work with EndpointSlice resources. type EndpointSliceInterface interface { - Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error) - Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error) + Create(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.CreateOptions) (*discoveryv1beta1.EndpointSlice, error) + Update(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSlice, opts v1.UpdateOptions) (*discoveryv1beta1.EndpointSlice, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*discoveryv1beta1.EndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*discoveryv1beta1.EndpointSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) - Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *discoveryv1beta1.EndpointSlice, err error) + Apply(ctx context.Context, endpointSlice *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *discoveryv1beta1.EndpointSlice, err error) EndpointSliceExpansion } // endpointSlices implements EndpointSliceInterface type endpointSlices struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration] } // newEndpointSlices returns a EndpointSlices func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices { return &endpointSlices{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*discoveryv1beta1.EndpointSlice, *discoveryv1beta1.EndpointSliceList, *applyconfigurationsdiscoveryv1beta1.EndpointSliceApplyConfiguration]( + "endpointslices", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *discoveryv1beta1.EndpointSlice { return &discoveryv1beta1.EndpointSlice{} }, + func() *discoveryv1beta1.EndpointSliceList { return &discoveryv1beta1.EndpointSliceList{} }, + gentype.PrefersProtobuf[*discoveryv1beta1.EndpointSlice](), + ), } } - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EndpointSliceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpointslices"). - Name(endpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(endpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("endpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *endpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - result = &v1beta1.EndpointSlice{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("endpointslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go deleted file mode 100644 index e285de647..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_discovery_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/discovery/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeDiscoveryV1beta1 struct { - *testing.Fake -} - -func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface { - return &FakeEndpointSlices{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeDiscoveryV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go deleted file mode 100644 index 268371811..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake/fake_endpointslice.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/discovery/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - discoveryv1beta1 "k8s.io/client-go/applyconfigurations/discovery/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeEndpointSlices implements EndpointSliceInterface -type FakeEndpointSlices struct { - Fake *FakeDiscoveryV1beta1 - ns string -} - -var endpointslicesResource = v1beta1.SchemeGroupVersion.WithResource("endpointslices") - -var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice") - -// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any. -func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors. -func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta} - for _, item := range obj.(*v1beta1.EndpointSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested endpointSlices. -func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts)) - -} - -// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any. -func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs. -func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1beta1.EndpointSlice{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{}) - return err -} - -// Patch applies the patch and returns the patched endpointSlice. -func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.EndpointSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied endpointSlice. -func (c *FakeEndpointSlices) Apply(ctx context.Context, endpointSlice *discoveryv1beta1.EndpointSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.EndpointSlice, err error) { - if endpointSlice == nil { - return nil, fmt.Errorf("endpointSlice provided to Apply must not be nil") - } - data, err := json.Marshal(endpointSlice) - if err != nil { - return nil, err - } - name := endpointSlice.Name - if name == nil { - return nil, fmt.Errorf("endpointSlice.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.EndpointSlice{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.EndpointSlice), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go index c9f2bbed5..fd3358476 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/event.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/events/v1" + eventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + applyconfigurationseventsv1 "k8s.io/client-go/applyconfigurations/events/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -41,168 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error) - Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error) + Create(ctx context.Context, event *eventsv1.Event, opts metav1.CreateOptions) (*eventsv1.Event, error) + Update(ctx context.Context, event *eventsv1.Event, opts metav1.UpdateOptions) (*eventsv1.Event, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*eventsv1.Event, error) + List(ctx context.Context, opts metav1.ListOptions) (*eventsv1.EventList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) - Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *eventsv1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationseventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *eventsv1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*eventsv1.Event, *eventsv1.EventList, *applyconfigurationseventsv1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *eventsv1.Event { return &eventsv1.Event{} }, + func() *eventsv1.EventList { return &eventsv1.EventList{} }, + gentype.PrefersProtobuf[*eventsv1.Event](), + ), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go index 8c73918d1..959ff5f81 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/events_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/events/v1" - "k8s.io/client-go/kubernetes/scheme" + eventsv1 "k8s.io/api/events/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := eventsv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go deleted file mode 100644 index 0928781f1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_event.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/events/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - eventsv1 "k8s.io/client-go/applyconfigurations/events/v1" - testing "k8s.io/client-go/testing" -) - -// FakeEvents implements EventInterface -type FakeEvents struct { - Fake *FakeEventsV1 - ns string -} - -var eventsResource = v1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1.EventList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.EventList{ListMeta: obj.(*v1.EventList).ListMeta} - for _, item := range obj.(*v1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) - -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1.EventApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Event), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go deleted file mode 100644 index 95ef2b307..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1/fake/fake_events_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/events/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeEventsV1 struct { - *testing.Fake -} - -func (c *FakeEventsV1) Events(namespace string) v1.EventInterface { - return &FakeEvents{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeEventsV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index dfdf8b897..c18a1aeb6 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/events/v1beta1" + eventsv1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + applyconfigurationseventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // EventsGetter has a method to return a EventInterface. @@ -41,168 +38,34 @@ type EventsGetter interface { // EventInterface has methods to work with Event resources. type EventInterface interface { - Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error) - Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error) + Create(ctx context.Context, event *eventsv1beta1.Event, opts v1.CreateOptions) (*eventsv1beta1.Event, error) + Update(ctx context.Context, event *eventsv1beta1.Event, opts v1.UpdateOptions) (*eventsv1beta1.Event, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*eventsv1beta1.Event, error) + List(ctx context.Context, opts v1.ListOptions) (*eventsv1beta1.EventList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) - Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *eventsv1beta1.Event, err error) + Apply(ctx context.Context, event *applyconfigurationseventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *eventsv1beta1.Event, err error) EventExpansion } // events implements EventInterface type events struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration] } // newEvents returns a Events func newEvents(c *EventsV1beta1Client, namespace string) *events { return &events{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*eventsv1beta1.Event, *eventsv1beta1.EventList, *applyconfigurationseventsv1beta1.EventApplyConfiguration]( + "events", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *eventsv1beta1.Event { return &eventsv1beta1.Event{} }, + func() *eventsv1beta1.EventList { return &eventsv1beta1.EventList{} }, + gentype.PrefersProtobuf[*eventsv1beta1.Event](), + ), } } - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(event). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *events) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - result = &v1beta1.Event{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("events"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go index 562f8d5e4..4ddbaa31a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go @@ -44,11 +44,11 @@ type EventExpansion interface { // it must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Post(). + err := e.GetClient().Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). @@ -64,11 +64,11 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // created with the "" namespace. // Update also requires the ResourceVersion to be set in the event object. func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't update an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Put(). + err := e.GetClient().Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). @@ -84,11 +84,11 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, // The namespace must either match this event client's namespace, or this event client must // have been created with the "" namespace. func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + if e.GetNamespace() != "" && event.Namespace != e.GetNamespace() { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.GetNamespace()) } result := &v1beta1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). + err := e.GetClient().Patch(types.StrategicMergePatchType). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go index 66506bf88..0bfc3cb60 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + eventsv1beta1 "k8s.io/api/events/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *EventsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := eventsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go deleted file mode 100644 index 522b4dc06..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/events/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - eventsv1beta1 "k8s.io/client-go/applyconfigurations/events/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeEvents implements EventInterface -type FakeEvents struct { - Fake *FakeEventsV1beta1 - ns string -} - -var eventsResource = v1beta1.SchemeGroupVersion.WithResource("events") - -var eventsKind = v1beta1.SchemeGroupVersion.WithKind("Event") - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *FakeEvents) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Event), err -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *FakeEvents) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.EventList{ListMeta: obj.(*v1beta1.EventList).ListMeta} - for _, item := range obj.(*v1beta1.EventList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *FakeEvents) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts)) - -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Event), err -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *FakeEvents) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Event), err -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *FakeEvents) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(eventsResource, c.ns, name, opts), &v1beta1.Event{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeEvents) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.EventList{}) - return err -} - -// Patch applies the patch and returns the patched event. -func (c *FakeEvents) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, pt, data, subresources...), &v1beta1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Event), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied event. -func (c *FakeEvents) Apply(ctx context.Context, event *eventsv1beta1.EventApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Event, err error) { - if event == nil { - return nil, fmt.Errorf("event provided to Apply must not be nil") - } - data, err := json.Marshal(event) - if err != nil { - return nil, err - } - name := event.Name - if name == nil { - return nil, fmt.Errorf("event.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Event{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Event), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go deleted file mode 100644 index 19c1b4415..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - v1beta1 "k8s.io/api/events/v1beta1" - types "k8s.io/apimachinery/pkg/types" - core "k8s.io/client-go/testing" -) - -// CreateWithEventNamespace creats a new event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - action := core.NewRootCreateAction(eventsResource, event) - if c.ns != "" { - action = core.NewCreateAction(eventsResource, c.ns, event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1beta1.Event), err -} - -// UpdateWithEventNamespace replaces an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event, error) { - action := core.NewRootUpdateAction(eventsResource, event) - if c.ns != "" { - action = core.NewUpdateAction(eventsResource, c.ns, event) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1beta1.Event), err -} - -// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error. -func (c *FakeEvents) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) { - pt := types.StrategicMergePatchType - action := core.NewRootPatchAction(eventsResource, event.Name, pt, data) - if c.ns != "" { - action = core.NewPatchAction(eventsResource, c.ns, event.Name, pt, data) - } - obj, err := c.Fake.Invokes(action, event) - if obj == nil { - return nil, err - } - - return obj.(*v1beta1.Event), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go deleted file mode 100644 index 875c774e3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeEventsV1beta1 struct { - *testing.Fake -} - -func (c *FakeEventsV1beta1) Events(namespace string) v1beta1.EventInterface { - return &FakeEvents{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeEventsV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index ffe219fda..c04be73a3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // DaemonSetsGetter has a method to return a DaemonSetInterface. @@ -41,216 +38,38 @@ type DaemonSetsGetter interface { // DaemonSetInterface has methods to work with DaemonSet resources. type DaemonSetInterface interface { - Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error) - Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) - UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) + Create(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.CreateOptions) (*extensionsv1beta1.DaemonSet, error) + Update(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSet, opts v1.UpdateOptions) (*extensionsv1beta1.DaemonSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.DaemonSet, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DaemonSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) - Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) - ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.DaemonSet, err error) + Apply(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, daemonSet *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.DaemonSet, err error) DaemonSetExpansion } // daemonSets implements DaemonSetInterface type daemonSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration] } // newDaemonSets returns a DaemonSets func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { return &daemonSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*extensionsv1beta1.DaemonSet, *extensionsv1beta1.DaemonSetList, *applyconfigurationsextensionsv1beta1.DaemonSetApplyConfiguration]( + "daemonsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *extensionsv1beta1.DaemonSet { return &extensionsv1beta1.DaemonSet{} }, + func() *extensionsv1beta1.DaemonSetList { return &extensionsv1beta1.DaemonSetList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.DaemonSet](), + ), } } - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(daemonSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *daemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *daemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - - result = &v1beta1.DaemonSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("daemonsets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index c41d8dbc2..1bcf3cbc8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,18 +19,17 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // DeploymentsGetter has a method to return a DeploymentInterface. @@ -41,229 +40,52 @@ type DeploymentsGetter interface { // DeploymentInterface has methods to work with Deployment resources. type DeploymentInterface interface { - Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error) - Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) - UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) + Create(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.CreateOptions) (*extensionsv1beta1.Deployment, error) + Update(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, deployment *extensionsv1beta1.Deployment, opts v1.UpdateOptions) (*extensionsv1beta1.Deployment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Deployment, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.DeploymentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) - Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) - ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) - GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) - ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Deployment, err error) + Apply(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, deployment *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Deployment, err error) + GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error) + UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error) + ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error) DeploymentExpansion } // deployments implements DeploymentInterface type deployments struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration] } // newDeployments returns a Deployments func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { return &deployments{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*extensionsv1beta1.Deployment, *extensionsv1beta1.DeploymentList, *applyconfigurationsextensionsv1beta1.DeploymentApplyConfiguration]( + "deployments", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *extensionsv1beta1.Deployment { return &extensionsv1beta1.Deployment{} }, + func() *extensionsv1beta1.DeploymentList { return &extensionsv1beta1.DeploymentList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.Deployment](), + ), } } -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(deployment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *deployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *deployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - - result = &v1beta1.Deployment{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("deployments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). +// GetScale takes name of the deployment, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any. +func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -274,10 +96,11 @@ func (c *deployments) GetScale(ctx context.Context, deploymentName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). +func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). @@ -290,24 +113,24 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + result = &extensionsv1beta1.Scale{} + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("deployments"). Name(deploymentName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 5c409ac99..bd75b8a38 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -31,5 +31,5 @@ type DeploymentExpansion interface { // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() + return c.GetClient().Post().Namespace(c.GetNamespace()).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error() } diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 4725d2cd1..88f2279bb 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *ExtensionsV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := extensionsv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go deleted file mode 100644 index abe3d2da1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeDaemonSets implements DaemonSetInterface -type FakeDaemonSets struct { - Fake *FakeExtensionsV1beta1 - ns string -} - -var daemonsetsResource = v1beta1.SchemeGroupVersion.WithResource("daemonsets") - -var daemonsetsKind = v1beta1.SchemeGroupVersion.WithKind("DaemonSet") - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *FakeDaemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *FakeDaemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DaemonSetList{ListMeta: obj.(*v1beta1.DaemonSetList).ListMeta} - for _, item := range obj.(*v1beta1.DaemonSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *FakeDaemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts)) - -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *FakeDaemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDaemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *FakeDaemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(daemonsetsResource, c.ns, name, opts), &v1beta1.DaemonSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDaemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{}) - return err -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *FakeDaemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, pt, data, subresources...), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied daemonSet. -func (c *FakeDaemonSets) Apply(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDaemonSets) ApplyStatus(ctx context.Context, daemonSet *extensionsv1beta1.DaemonSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.DaemonSet, err error) { - if daemonSet == nil { - return nil, fmt.Errorf("daemonSet provided to Apply must not be nil") - } - data, err := json.Marshal(daemonSet) - if err != nil { - return nil, err - } - name := daemonSet.Name - if name == nil { - return nil, fmt.Errorf("daemonSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.DaemonSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.DaemonSet), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go deleted file mode 100644 index e399361a9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeDeployments implements DeploymentInterface -type FakeDeployments struct { - Fake *FakeExtensionsV1beta1 - ns string -} - -var deploymentsResource = v1beta1.SchemeGroupVersion.WithResource("deployments") - -var deploymentsKind = v1beta1.SchemeGroupVersion.WithKind("Deployment") - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *FakeDeployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *FakeDeployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta} - for _, item := range obj.(*v1beta1.DeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *FakeDeployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *FakeDeployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeDeployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *FakeDeployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(deploymentsResource, c.ns, name, opts), &v1beta1.Deployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeDeployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched deployment. -func (c *FakeDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, pt, data, subresources...), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied deployment. -func (c *FakeDeployments) Apply(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeDeployments) ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error) { - if deployment == nil { - return nil, fmt.Errorf("deployment provided to Apply must not be nil") - } - data, err := json.Marshal(deployment) - if err != nil { - return nil, err - } - name := deployment.Name - if name == nil { - return nil, fmt.Errorf("deployment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Deployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Deployment), err -} - -// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any. -func (c *FakeDeployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeDeployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeDeployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, deploymentName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go deleted file mode 100644 index 6ea1acd85..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - core "k8s.io/client-go/testing" -) - -func (c *FakeDeployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Resource = deploymentsResource - action.Subresource = "rollback" - action.Object = deploymentRollback - - _, err := c.Fake.Invokes(action, deploymentRollback) - return err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go deleted file mode 100644 index a54c182ea..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeExtensionsV1beta1 struct { - *testing.Fake -} - -func (c *FakeExtensionsV1beta1) DaemonSets(namespace string) v1beta1.DaemonSetInterface { - return &FakeDaemonSets{c, namespace} -} - -func (c *FakeExtensionsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface { - return &FakeDeployments{c, namespace} -} - -func (c *FakeExtensionsV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { - return &FakeIngresses{c, namespace} -} - -func (c *FakeExtensionsV1beta1) NetworkPolicies(namespace string) v1beta1.NetworkPolicyInterface { - return &FakeNetworkPolicies{c, namespace} -} - -func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface { - return &FakeReplicaSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go deleted file mode 100644 index 48ae51e80..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeIngresses implements IngressInterface -type FakeIngresses struct { - Fake *FakeExtensionsV1beta1 - ns string -} - -var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta} - for _, item := range obj.(*v1beta1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go deleted file mode 100644 index a32022140..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeNetworkPolicies implements NetworkPolicyInterface -type FakeNetworkPolicies struct { - Fake *FakeExtensionsV1beta1 - ns string -} - -var networkpoliciesResource = v1beta1.SchemeGroupVersion.WithResource("networkpolicies") - -var networkpoliciesKind = v1beta1.SchemeGroupVersion.WithKind("NetworkPolicy") - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1beta1.NetworkPolicyList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.NetworkPolicyList{ListMeta: obj.(*v1beta1.NetworkPolicyList).ListMeta} - for _, item := range obj.(*v1beta1.NetworkPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) - -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1beta1.NetworkPolicy{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.NetworkPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.NetworkPolicy), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go deleted file mode 100644 index 42da6fa8b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeReplicaSets implements ReplicaSetInterface -type FakeReplicaSets struct { - Fake *FakeExtensionsV1beta1 - ns string -} - -var replicasetsResource = v1beta1.SchemeGroupVersion.WithResource("replicasets") - -var replicasetsKind = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet") - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *FakeReplicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *FakeReplicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ReplicaSetList{ListMeta: obj.(*v1beta1.ReplicaSetList).ListMeta} - for _, item := range obj.(*v1beta1.ReplicaSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *FakeReplicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts)) - -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *FakeReplicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *FakeReplicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicasetsResource, c.ns, name, opts), &v1beta1.ReplicaSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{}) - return err -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *FakeReplicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, pt, data, subresources...), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *FakeReplicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeReplicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.ReplicaSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ReplicaSet), err -} - -// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any. -func (c *FakeReplicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} - -// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *FakeReplicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} - -// ApplyScale takes top resource name and the apply declarative configuration for scale, -// applies it and returns the applied scale, and an error, if there is any. -func (c *FakeReplicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { - if scale == nil { - return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") - } - data, err := json.Marshal(scale) - if err != nil { - return nil, err - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, replicaSetName, types.ApplyPatchType, data, "status"), &v1beta1.Scale{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Scale), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index dd4012cc2..9a24621ea 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -41,216 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) - Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) - UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.CreateOptions) (*extensionsv1beta1.Ingress, error) + Update(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ingress *extensionsv1beta1.Ingress, opts v1.UpdateOptions) (*extensionsv1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) - Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) - ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, ingress *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*extensionsv1beta1.Ingress, *extensionsv1beta1.IngressList, *applyconfigurationsextensionsv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *extensionsv1beta1.Ingress { return &extensionsv1beta1.Ingress{} }, + func() *extensionsv1beta1.IngressList { return &extensionsv1beta1.IngressList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.Ingress](), + ), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *extensionsv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go index 978b26db0..ce6a45a27 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -41,168 +38,34 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error) - Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.CreateOptions) (*extensionsv1beta1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicy, opts v1.UpdateOptions) (*extensionsv1beta1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.NetworkPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.NetworkPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) - Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.NetworkPolicy, err error) + Apply(ctx context.Context, networkPolicy *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.NetworkPolicy, err error) NetworkPolicyExpansion } // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*extensionsv1beta1.NetworkPolicy, *extensionsv1beta1.NetworkPolicyList, *applyconfigurationsextensionsv1beta1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *extensionsv1beta1.NetworkPolicy { return &extensionsv1beta1.NetworkPolicy{} }, + func() *extensionsv1beta1.NetworkPolicyList { return &extensionsv1beta1.NetworkPolicyList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.NetworkPolicy](), + ), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) { - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1beta1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 3c907a3a0..f918be417 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,18 +19,17 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" + fmt "fmt" - v1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - extensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + applyconfigurationsextensionsv1beta1 "k8s.io/client-go/applyconfigurations/extensions/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" + apply "k8s.io/client-go/util/apply" ) // ReplicaSetsGetter has a method to return a ReplicaSetInterface. @@ -41,229 +40,52 @@ type ReplicaSetsGetter interface { // ReplicaSetInterface has methods to work with ReplicaSet resources. type ReplicaSetInterface interface { - Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error) - Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) - UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error) + Create(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.CreateOptions) (*extensionsv1beta1.ReplicaSet, error) + Update(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSet, opts v1.UpdateOptions) (*extensionsv1beta1.ReplicaSet, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*extensionsv1beta1.ReplicaSet, error) + List(ctx context.Context, opts v1.ListOptions) (*extensionsv1beta1.ReplicaSetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) - Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) - ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) - GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error) - ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *extensionsv1beta1.ReplicaSet, err error) + Apply(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, replicaSet *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.ReplicaSet, err error) + GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*extensionsv1beta1.Scale, error) + UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (*extensionsv1beta1.Scale, error) + ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*extensionsv1beta1.Scale, error) ReplicaSetExpansion } // replicaSets implements ReplicaSetInterface type replicaSets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration] } // newReplicaSets returns a ReplicaSets func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { return &replicaSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*extensionsv1beta1.ReplicaSet, *extensionsv1beta1.ReplicaSetList, *applyconfigurationsextensionsv1beta1.ReplicaSetApplyConfiguration]( + "replicasets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *extensionsv1beta1.ReplicaSet { return &extensionsv1beta1.ReplicaSet{} }, + func() *extensionsv1beta1.ReplicaSetList { return &extensionsv1beta1.ReplicaSetList{} }, + gentype.PrefersProtobuf[*extensionsv1beta1.ReplicaSet](), + ), } } -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicaSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied replicaSet. -func (c *replicaSets) Apply(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *replicaSets) ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error) { - if replicaSet == nil { - return nil, fmt.Errorf("replicaSet provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(replicaSet) - if err != nil { - return nil, err - } - - name := replicaSet.Name - if name == nil { - return nil, fmt.Errorf("replicaSet.Name must be provided to Apply") - } - - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("replicasets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). +// GetScale takes name of the replicaSet, and returns the corresponding extensionsv1beta1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} + err = c.GetClient().Get(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -274,10 +96,11 @@ func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, optio } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). +func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.Scale, opts v1.UpdateOptions) (result *extensionsv1beta1.Scale, err error) { + result = &extensionsv1beta1.Scale{} + err = c.GetClient().Put(). + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). @@ -290,24 +113,24 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc // ApplyScale takes top resource name and the apply declarative configuration for scale, // applies it and returns the applied scale, and an error, if there is any. -func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) { +func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsextensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *extensionsv1beta1.Scale, err error) { if scale == nil { return nil, fmt.Errorf("scale provided to ApplyScale must not be nil") } patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(scale) + request, err := apply.NewRequest(c.GetClient(), scale) if err != nil { return nil, err } - result = &v1beta1.Scale{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). + result = &extensionsv1beta1.Scale{} + err = request. + UseProtobufAsDefault(). + Namespace(c.GetNamespace()). Resource("replicasets"). Name(replicaSetName). SubResource("scale"). VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). Do(ctx). Into(result) return diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go deleted file mode 100644 index d15f4b242..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowcontrol_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeFlowcontrolV1 struct { - *testing.Fake -} - -func (c *FakeFlowcontrolV1) FlowSchemas() v1.FlowSchemaInterface { - return &FakeFlowSchemas{c} -} - -func (c *FakeFlowcontrolV1) PriorityLevelConfigurations() v1.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeFlowcontrolV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go deleted file mode 100644 index 922a60d89..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_flowschema.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" - testing "k8s.io/client-go/testing" -) - -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1 -} - -var flowschemasResource = v1.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1.FlowSchemaList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.FlowSchemaList{ListMeta: obj.(*v1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1.FlowSchema), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go deleted file mode 100644 index 27d958674..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake/fake_prioritylevelconfiguration.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" - testing "k8s.io/client-go/testing" -) - -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1 -} - -var prioritylevelconfigurationsResource = v1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1.PriorityLevelConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PriorityLevelConfigurationList{ListMeta: obj.(*v1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityLevelConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go index 3d7d93ef1..3b19586e9 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := flowcontrolv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go index bd36c5e6a..56d4d8065 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -41,203 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.CreateOptions) (*flowcontrolv1.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchema, opts metav1.UpdateOptions) (*flowcontrolv1.FlowSchema, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.FlowSchema, error) + List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.FlowSchemaList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1.FlowSchema, *flowcontrolv1.FlowSchemaList, *applyconfigurationsflowcontrolv1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1.FlowSchema { return &flowcontrolv1.FlowSchema{} }, + func() *flowcontrolv1.FlowSchemaList { return &flowcontrolv1.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1.FlowSchema](), + ), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go index 797fe9403..5d25f393a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + applyconfigurationsflowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -41,203 +38,40 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*flowcontrolv1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts metav1.ListOptions) (*flowcontrolv1.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *flowcontrolv1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *flowcontrolv1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1.PriorityLevelConfiguration, *flowcontrolv1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1.PriorityLevelConfiguration { return &flowcontrolv1.PriorityLevelConfiguration{} }, + func() *flowcontrolv1.PriorityLevelConfigurationList { + return &flowcontrolv1.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1.PriorityLevelConfiguration](), + ), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go deleted file mode 100644 index 1bd58d088..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowcontrol_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeFlowcontrolV1beta1 struct { - *testing.Fake -} - -func (c *FakeFlowcontrolV1beta1) FlowSchemas() v1beta1.FlowSchemaInterface { - return &FakeFlowSchemas{c} -} - -func (c *FakeFlowcontrolV1beta1) PriorityLevelConfigurations() v1beta1.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeFlowcontrolV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go deleted file mode 100644 index be7a7e390..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_flowschema.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1beta1 -} - -var flowschemasResource = v1beta1.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta1.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta1.FlowSchemaList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.FlowSchemaList{ListMeta: obj.(*v1beta1.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta1.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta1.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.FlowSchema), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go deleted file mode 100644 index 698a168b3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake/fake_prioritylevelconfiguration.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1beta1 -} - -var prioritylevelconfigurationsResource = v1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta1.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta1.PriorityLevelConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta1.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta1.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta1.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityLevelConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go index c29cfca95..ac3f5ffe8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := flowcontrolv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go index a9d38becf..f0def3947 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/flowschema.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -41,203 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (*v1beta1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (*v1beta1.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta1.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta1.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta1Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta1.FlowSchema, *flowcontrolv1beta1.FlowSchemaList, *applyconfigurationsflowcontrolv1beta1.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta1.FlowSchema { return &flowcontrolv1beta1.FlowSchema{} }, + func() *flowcontrolv1beta1.FlowSchemaList { return &flowcontrolv1beta1.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta1.FlowSchema](), + ), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.CreateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta1.FlowSchema, opts v1.UpdateOptions) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.FlowSchema, err error) { - result = &v1beta1.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta1.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go index 41f35cbcc..15ee1b8b5 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + applyconfigurationsflowcontrolv1beta1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -41,203 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta1.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta1.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta1.PriorityLevelConfiguration, *flowcontrolv1beta1.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta1.PriorityLevelConfiguration { + return &flowcontrolv1beta1.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta1.PriorityLevelConfigurationList { + return &flowcontrolv1beta1.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta1.PriorityLevelConfiguration](), + ), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityLevelConfiguration, err error) { - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta1.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go deleted file mode 100644 index 9f36b3b7a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowcontrol_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeFlowcontrolV1beta2 struct { - *testing.Fake -} - -func (c *FakeFlowcontrolV1beta2) FlowSchemas() v1beta2.FlowSchemaInterface { - return &FakeFlowSchemas{c} -} - -func (c *FakeFlowcontrolV1beta2) PriorityLevelConfigurations() v1beta2.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeFlowcontrolV1beta2) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go deleted file mode 100644 index 7ce6d2116..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_flowschema.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1beta2 -} - -var flowschemasResource = v1beta2.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta2.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta2.FlowSchemaList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.FlowSchemaList{ListMeta: obj.(*v1beta2.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta2.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta2.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.FlowSchema), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go deleted file mode 100644 index 7340f8a09..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake/fake_prioritylevelconfiguration.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" - testing "k8s.io/client-go/testing" -) - -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1beta2 -} - -var prioritylevelconfigurationsResource = v1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta2.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta2.PriorityLevelConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta2.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta2.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta2.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta2.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta2.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta2.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta2.PriorityLevelConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go index f3cca4fc7..7652d4f39 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - "net/http" + http "net/http" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta2.SchemeGroupVersion + gv := flowcontrolv1beta2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go index 3a1f12b6a..780cf030e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/flowschema.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -41,203 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (*v1beta2.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (*v1beta2.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta2.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta2.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta2Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta2.FlowSchema, *flowcontrolv1beta2.FlowSchemaList, *applyconfigurationsflowcontrolv1beta2.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta2.FlowSchema { return &flowcontrolv1beta2.FlowSchema{} }, + func() *flowcontrolv1beta2.FlowSchemaList { return &flowcontrolv1beta2.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta2.FlowSchema](), + ), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.CreateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta2.FlowSchema, opts v1.UpdateOptions) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.FlowSchema, err error) { - result = &v1beta2.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta2.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta2.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go index f028869f1..65b9feafa 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta2 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta2 "k8s.io/api/flowcontrol/v1beta2" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + applyconfigurationsflowcontrolv1beta2 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -41,203 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta2.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta2.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta2.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta2.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta2Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta2.PriorityLevelConfiguration, *flowcontrolv1beta2.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta2.PriorityLevelConfiguration { + return &flowcontrolv1beta2.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta2.PriorityLevelConfigurationList { + return &flowcontrolv1beta2.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta2.PriorityLevelConfiguration](), + ), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta2.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta2.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PriorityLevelConfiguration, err error) { - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta2.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go deleted file mode 100644 index 1cb0198d0..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowcontrol_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeFlowcontrolV1beta3 struct { - *testing.Fake -} - -func (c *FakeFlowcontrolV1beta3) FlowSchemas() v1beta3.FlowSchemaInterface { - return &FakeFlowSchemas{c} -} - -func (c *FakeFlowcontrolV1beta3) PriorityLevelConfigurations() v1beta3.PriorityLevelConfigurationInterface { - return &FakePriorityLevelConfigurations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeFlowcontrolV1beta3) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go deleted file mode 100644 index 1371f6ed6..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_flowschema.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" - testing "k8s.io/client-go/testing" -) - -// FakeFlowSchemas implements FlowSchemaInterface -type FakeFlowSchemas struct { - Fake *FakeFlowcontrolV1beta3 -} - -var flowschemasResource = v1beta3.SchemeGroupVersion.WithResource("flowschemas") - -var flowschemasKind = v1beta3.SchemeGroupVersion.WithKind("FlowSchema") - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *FakeFlowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(flowschemasResource, name), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *FakeFlowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(flowschemasResource, flowschemasKind, opts), &v1beta3.FlowSchemaList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta3.FlowSchemaList{ListMeta: obj.(*v1beta3.FlowSchemaList).ListMeta} - for _, item := range obj.(*v1beta3.FlowSchemaList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *FakeFlowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(flowschemasResource, opts)) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *FakeFlowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(flowschemasResource, flowSchema), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFlowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(flowschemasResource, "status", flowSchema), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *FakeFlowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(flowschemasResource, name, opts), &v1beta3.FlowSchema{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFlowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(flowschemasResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta3.FlowSchemaList{}) - return err -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *FakeFlowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, name, pt, data, subresources...), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *FakeFlowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeFlowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(flowschemasResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.FlowSchema{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.FlowSchema), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go deleted file mode 100644 index a0e266fec..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake/fake_prioritylevelconfiguration.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" - testing "k8s.io/client-go/testing" -) - -// FakePriorityLevelConfigurations implements PriorityLevelConfigurationInterface -type FakePriorityLevelConfigurations struct { - Fake *FakeFlowcontrolV1beta3 -} - -var prioritylevelconfigurationsResource = v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations") - -var prioritylevelconfigurationsKind = v1beta3.SchemeGroupVersion.WithKind("PriorityLevelConfiguration") - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *FakePriorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(prioritylevelconfigurationsResource, name), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *FakePriorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(prioritylevelconfigurationsResource, prioritylevelconfigurationsKind, opts), &v1beta3.PriorityLevelConfigurationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta3.PriorityLevelConfigurationList{ListMeta: obj.(*v1beta3.PriorityLevelConfigurationList).ListMeta} - for _, item := range obj.(*v1beta3.PriorityLevelConfigurationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *FakePriorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(prioritylevelconfigurationsResource, opts)) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *FakePriorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(prioritylevelconfigurationsResource, priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePriorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(prioritylevelconfigurationsResource, "status", priorityLevelConfiguration), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *FakePriorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(prioritylevelconfigurationsResource, name, opts), &v1beta3.PriorityLevelConfiguration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(prioritylevelconfigurationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta3.PriorityLevelConfigurationList{}) - return err -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, name, pt, data, subresources...), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *FakePriorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePriorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(prioritylevelconfigurationsResource, *name, types.ApplyPatchType, data, "status"), &v1beta3.PriorityLevelConfiguration{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta3.PriorityLevelConfiguration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go index 461120bd3..b32dc911c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowcontrol_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - "net/http" + http "net/http" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/client-go/kubernetes/scheme" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *FlowcontrolV1beta3Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta3.SchemeGroupVersion + gv := flowcontrolv1beta3.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go index 5fa39d6ba..1e0d9feb1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/flowschema.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta3 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // FlowSchemasGetter has a method to return a FlowSchemaInterface. @@ -41,203 +38,38 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (*v1beta3.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (*v1beta3.FlowSchema, error) + Create(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.CreateOptions) (*flowcontrolv1beta3.FlowSchema, error) + Update(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchema, opts v1.UpdateOptions) (*flowcontrolv1beta3.FlowSchema, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.FlowSchema, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta3.FlowSchemaList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.FlowSchemaList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, flowSchema *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.FlowSchema, err error) FlowSchemaExpansion } // flowSchemas implements FlowSchemaInterface type flowSchemas struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration] } // newFlowSchemas returns a FlowSchemas func newFlowSchemas(c *FlowcontrolV1beta3Client) *flowSchemas { return &flowSchemas{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta3.FlowSchema, *flowcontrolv1beta3.FlowSchemaList, *applyconfigurationsflowcontrolv1beta3.FlowSchemaApplyConfiguration]( + "flowschemas", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta3.FlowSchema { return &flowcontrolv1beta3.FlowSchema{} }, + func() *flowcontrolv1beta3.FlowSchemaList { return &flowcontrolv1beta3.FlowSchemaList{} }, + gentype.PrefersProtobuf[*flowcontrolv1beta3.FlowSchema](), + ), } } - -// Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Get(). - Resource("flowschemas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.FlowSchemaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.FlowSchemaList{} - err = c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.CreateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Post(). - Resource("flowschemas"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1beta3.FlowSchema, opts v1.UpdateOptions) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Put(). - Resource("flowschemas"). - Name(flowSchema.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(flowSchema). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("flowschemas"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("flowschemas"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.FlowSchema, err error) { - result = &v1beta3.FlowSchema{} - err = c.client.Patch(pt). - Resource("flowschemas"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1beta3.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.FlowSchema, err error) { - if flowSchema == nil { - return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(flowSchema) - if err != nil { - return nil, err - } - - name := flowSchema.Name - if name == nil { - return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") - } - - result = &v1beta3.FlowSchema{} - err = c.client.Patch(types.ApplyPatchType). - Resource("flowschemas"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go index 49f05257c..91bbf3fb1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta3 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta3 "k8s.io/api/flowcontrol/v1beta3" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + applyconfigurationsflowcontrolv1beta3 "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityLevelConfigurationsGetter has a method to return a PriorityLevelConfigurationInterface. @@ -41,203 +38,42 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1beta3.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1beta3.PriorityLevelConfiguration, error) + Create(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.PriorityLevelConfiguration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta3.PriorityLevelConfigurationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*flowcontrolv1beta3.PriorityLevelConfigurationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, priorityLevelConfiguration *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *flowcontrolv1beta3.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } // priorityLevelConfigurations implements PriorityLevelConfigurationInterface type priorityLevelConfigurations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration] } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations func newPriorityLevelConfigurations(c *FlowcontrolV1beta3Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*flowcontrolv1beta3.PriorityLevelConfiguration, *flowcontrolv1beta3.PriorityLevelConfigurationList, *applyconfigurationsflowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration]( + "prioritylevelconfigurations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *flowcontrolv1beta3.PriorityLevelConfiguration { + return &flowcontrolv1beta3.PriorityLevelConfiguration{} + }, + func() *flowcontrolv1beta3.PriorityLevelConfigurationList { + return &flowcontrolv1beta3.PriorityLevelConfigurationList{} + }, + gentype.PrefersProtobuf[*flowcontrolv1beta3.PriorityLevelConfiguration](), + ), } } - -// Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.PriorityLevelConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta3.PriorityLevelConfigurationList{} - err = c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Post(). - Resource("prioritylevelconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1beta3.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Put(). - Resource("prioritylevelconfigurations"). - Name(priorityLevelConfiguration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityLevelConfiguration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("prioritylevelconfigurations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.PriorityLevelConfiguration, err error) { - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(pt). - Resource("prioritylevelconfigurations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1beta3.PriorityLevelConfiguration, err error) { - if priorityLevelConfiguration == nil { - return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityLevelConfiguration) - if err != nil { - return nil, err - } - - name := priorityLevelConfiguration.Name - if name == nil { - return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") - } - - result = &v1beta3.PriorityLevelConfiguration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("prioritylevelconfigurations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go deleted file mode 100644 index 002de0dd8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingress.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" -) - -// FakeIngresses implements IngressInterface -type FakeIngresses struct { - Fake *FakeNetworkingV1 - ns string -} - -var ingressesResource = v1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.IngressList{ListMeta: obj.(*v1.IngressList).ListMeta} - for _, item := range obj.(*v1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Ingress), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go deleted file mode 100644 index 208a97508..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_ingressclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" -) - -// FakeIngressClasses implements IngressClassInterface -type FakeIngressClasses struct { - Fake *FakeNetworkingV1 -} - -var ingressclassesResource = v1.SchemeGroupVersion.WithResource("ingressclasses") - -var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass") - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.IngressClass), err -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.IngressClassList{ListMeta: obj.(*v1.IngressClassList).ListMeta} - for _, item := range obj.(*v1.IngressClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.IngressClass), err -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.IngressClass), err -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1.IngressClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.IngressClassList{}) - return err -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.IngressClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.IngressClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go deleted file mode 100644 index ed1639e2f..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/networking/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNetworkingV1 struct { - *testing.Fake -} - -func (c *FakeNetworkingV1) Ingresses(namespace string) v1.IngressInterface { - return &FakeIngresses{c, namespace} -} - -func (c *FakeNetworkingV1) IngressClasses() v1.IngressClassInterface { - return &FakeIngressClasses{c} -} - -func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface { - return &FakeNetworkPolicies{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNetworkingV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go deleted file mode 100644 index dde09774c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" - testing "k8s.io/client-go/testing" -) - -// FakeNetworkPolicies implements NetworkPolicyInterface -type FakeNetworkPolicies struct { - Fake *FakeNetworkingV1 - ns string -} - -var networkpoliciesResource = v1.SchemeGroupVersion.WithResource("networkpolicies") - -var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy") - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.NetworkPolicyList{ListMeta: obj.(*v1.NetworkPolicyList).ListMeta} - for _, item := range obj.(*v1.NetworkPolicyList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts)) - -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1.NetworkPolicy{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{}) - return err -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *FakeNetworkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, *name, types.ApplyPatchType, data), &v1.NetworkPolicy{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.NetworkPolicy), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go index 9923d6cba..76d416249 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingress.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -41,216 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error) - Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) - UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) + Create(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.CreateOptions) (*networkingv1.Ingress, error) + Update(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ingress *networkingv1.Ingress, opts metav1.UpdateOptions) (*networkingv1.Ingress, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.Ingress, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) - Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) - ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*networkingv1.Ingress, *networkingv1.IngressList, *applyconfigurationsnetworkingv1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkingv1.Ingress { return &networkingv1.Ingress{} }, + func() *networkingv1.IngressList { return &networkingv1.IngressList{} }, + gentype.PrefersProtobuf[*networkingv1.Ingress](), + ), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) { - result = &v1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1.IngressApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go index 16c8e48bf..3bbbf9e15 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/ingressclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -41,157 +38,34 @@ type IngressClassesGetter interface { // IngressClassInterface has methods to work with IngressClass resources. type IngressClassInterface interface { - Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (*v1.IngressClass, error) - Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (*v1.IngressClass, error) + Create(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.CreateOptions) (*networkingv1.IngressClass, error) + Update(ctx context.Context, ingressClass *networkingv1.IngressClass, opts metav1.UpdateOptions) (*networkingv1.IngressClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IngressClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.IngressClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.IngressClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) - Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.IngressClass, err error) + Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.IngressClass, err error) IngressClassExpansion } // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*networkingv1.IngressClass, *networkingv1.IngressClassList, *applyconfigurationsnetworkingv1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1.IngressClass { return &networkingv1.IngressClass{} }, + func() *networkingv1.IngressClassList { return &networkingv1.IngressClassList{} }, + gentype.PrefersProtobuf[*networkingv1.IngressClass](), + ), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) { - result = &v1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1.IngressClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go index 3b72a7ae9..692b52f02 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/networking/v1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1 "k8s.io/api/networking/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *NetworkingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := networkingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index d7454ce14..2758c2bfb 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/networking/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. @@ -41,168 +38,34 @@ type NetworkPoliciesGetter interface { // NetworkPolicyInterface has methods to work with NetworkPolicy resources. type NetworkPolicyInterface interface { - Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error) - Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error) + Create(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.CreateOptions) (*networkingv1.NetworkPolicy, error) + Update(ctx context.Context, networkPolicy *networkingv1.NetworkPolicy, opts metav1.UpdateOptions) (*networkingv1.NetworkPolicy, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*networkingv1.NetworkPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*networkingv1.NetworkPolicyList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) - Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *networkingv1.NetworkPolicy, err error) + Apply(ctx context.Context, networkPolicy *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *networkingv1.NetworkPolicy, err error) NetworkPolicyExpansion } // networkPolicies implements NetworkPolicyInterface type networkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration] } // newNetworkPolicies returns a NetworkPolicies func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*networkingv1.NetworkPolicy, *networkingv1.NetworkPolicyList, *applyconfigurationsnetworkingv1.NetworkPolicyApplyConfiguration]( + "networkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{} }, + func() *networkingv1.NetworkPolicyList { return &networkingv1.NetworkPolicyList{} }, + gentype.PrefersProtobuf[*networkingv1.NetworkPolicy](), + ), } } - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(networkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied networkPolicy. -func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) { - if networkPolicy == nil { - return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(networkPolicy) - if err != nil { - return nil, err - } - name := networkPolicy.Name - if name == nil { - return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply") - } - result = &v1.NetworkPolicy{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("networkpolicies"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go deleted file mode 100644 index 4db8df68c..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_ipaddress.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeIPAddresses implements IPAddressInterface -type FakeIPAddresses struct { - Fake *FakeNetworkingV1alpha1 -} - -var ipaddressesResource = v1alpha1.SchemeGroupVersion.WithResource("ipaddresses") - -var ipaddressesKind = v1alpha1.SchemeGroupVersion.WithKind("IPAddress") - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *FakeIPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ipaddressesResource, name), &v1alpha1.IPAddress{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *FakeIPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(ipaddressesResource, ipaddressesKind, opts), &v1alpha1.IPAddressList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.IPAddressList{ListMeta: obj.(*v1alpha1.IPAddressList).ListMeta} - for _, item := range obj.(*v1alpha1.IPAddressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *FakeIPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ipaddressesResource, opts)) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *FakeIPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ipaddressesResource, iPAddress), &v1alpha1.IPAddress{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *FakeIPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ipaddressesResource, name, opts), &v1alpha1.IPAddress{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ipaddressesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.IPAddressList{}) - return err -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *FakeIPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, name, pt, data, subresources...), &v1alpha1.IPAddress{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAddress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *FakeIPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ipaddressesResource, *name, types.ApplyPatchType, data), &v1alpha1.IPAddress{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAddress), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go deleted file mode 100644 index 80ad184bb..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNetworkingV1alpha1 struct { - *testing.Fake -} - -func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { - return &FakeIPAddresses{c} -} - -func (c *FakeNetworkingV1alpha1) ServiceCIDRs() v1alpha1.ServiceCIDRInterface { - return &FakeServiceCIDRs{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go deleted file mode 100644 index 653ef631a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_servicecidr.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeServiceCIDRs implements ServiceCIDRInterface -type FakeServiceCIDRs struct { - Fake *FakeNetworkingV1alpha1 -} - -var servicecidrsResource = v1alpha1.SchemeGroupVersion.WithResource("servicecidrs") - -var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR") - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ServiceCIDRList{ListMeta: obj.(*v1alpha1.ServiceCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ServiceCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts)) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1alpha1.ServiceCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ServiceCIDR), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go index fff193d68..e96a564ab 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/ipaddress.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IPAddressesGetter has a method to return a IPAddressInterface. @@ -41,157 +38,34 @@ type IPAddressesGetter interface { // IPAddressInterface has methods to work with IPAddress resources. type IPAddressInterface interface { - Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (*v1alpha1.IPAddress, error) - Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (*v1alpha1.IPAddress, error) + Create(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.CreateOptions) (*networkingv1alpha1.IPAddress, error) + Update(ctx context.Context, iPAddress *networkingv1alpha1.IPAddress, opts v1.UpdateOptions) (*networkingv1alpha1.IPAddress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAddress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAddressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.IPAddressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) - Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.IPAddress, err error) IPAddressExpansion } // iPAddresses implements IPAddressInterface type iPAddresses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration] } // newIPAddresses returns a IPAddresses func newIPAddresses(c *NetworkingV1alpha1Client) *iPAddresses { return &iPAddresses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*networkingv1alpha1.IPAddress, *networkingv1alpha1.IPAddressList, *applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1alpha1.IPAddress { return &networkingv1alpha1.IPAddress{} }, + func() *networkingv1alpha1.IPAddressList { return &networkingv1alpha1.IPAddressList{} }, + gentype.PrefersProtobuf[*networkingv1alpha1.IPAddress](), + ), } } - -// Get takes name of the iPAddress, and returns the corresponding iPAddress object, and an error if there is any. -func (c *iPAddresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Get(). - Resource("ipaddresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IPAddresses that match those selectors. -func (c *iPAddresses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAddressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.IPAddressList{} - err = c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested iPAddresses. -func (c *iPAddresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a iPAddress and creates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Create(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.CreateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Post(). - Resource("ipaddresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a iPAddress and updates it. Returns the server's representation of the iPAddress, and an error, if there is any. -func (c *iPAddresses) Update(ctx context.Context, iPAddress *v1alpha1.IPAddress, opts v1.UpdateOptions) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Put(). - Resource("ipaddresses"). - Name(iPAddress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAddress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the iPAddress and deletes it. Returns an error if one occurs. -func (c *iPAddresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ipaddresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *iPAddresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ipaddresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched iPAddress. -func (c *iPAddresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAddress, err error) { - result = &v1alpha1.IPAddress{} - err = c.client.Patch(pt). - Resource("ipaddresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied iPAddress. -func (c *iPAddresses) Apply(ctx context.Context, iPAddress *networkingv1alpha1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.IPAddress, err error) { - if iPAddress == nil { - return nil, fmt.Errorf("iPAddress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(iPAddress) - if err != nil { - return nil, err - } - name := iPAddress.Name - if name == nil { - return nil, fmt.Errorf("iPAddress.Name must be provided to Apply") - } - result = &v1alpha1.IPAddress{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ipaddresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index c730e6246..9e1b3064d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *NetworkingV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := networkingv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go index 100f290a1..38cc26010 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/networking/v1alpha1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. @@ -41,203 +38,38 @@ type ServiceCIDRsGetter interface { // ServiceCIDRInterface has methods to work with ServiceCIDR resources. type ServiceCIDRInterface interface { - Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) - Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) - UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) + Create(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1alpha1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1alpha1.ServiceCIDR, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1alpha1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1alpha1.ServiceCIDRList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) - Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) - ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1alpha1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1alpha1.ServiceCIDR, err error) ServiceCIDRExpansion } // serviceCIDRs implements ServiceCIDRInterface type serviceCIDRs struct { - client rest.Interface + *gentype.ClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration] } // newServiceCIDRs returns a ServiceCIDRs func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { return &serviceCIDRs{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*networkingv1alpha1.ServiceCIDR, *networkingv1alpha1.ServiceCIDRList, *applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1alpha1.ServiceCIDR { return &networkingv1alpha1.ServiceCIDR{} }, + func() *networkingv1alpha1.ServiceCIDRList { return &networkingv1alpha1.ServiceCIDRList{} }, + gentype.PrefersProtobuf[*networkingv1alpha1.ServiceCIDR](), + ), } } - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Get(). - Resource("servicecidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ServiceCIDRList{} - err = c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Post(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("servicecidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("servicecidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(pt). - Resource("servicecidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go deleted file mode 100644 index 7a3b861be..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeIngresses implements IngressInterface -type FakeIngresses struct { - Fake *FakeNetworkingV1beta1 - ns string -} - -var ingressesResource = v1beta1.SchemeGroupVersion.WithResource("ingresses") - -var ingressesKind = v1beta1.SchemeGroupVersion.WithKind("Ingress") - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta} - for _, item := range obj.(*v1beta1.IngressList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts)) - -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *FakeIngresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1beta1.Ingress{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressList{}) - return err -} - -// Patch applies the patch and returns the patched ingress. -func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *FakeIngresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.Ingress{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Ingress), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go deleted file mode 100644 index 1804e61fc..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingressclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeIngressClasses implements IngressClassInterface -type FakeIngressClasses struct { - Fake *FakeNetworkingV1beta1 -} - -var ingressclassesResource = v1beta1.SchemeGroupVersion.WithResource("ingressclasses") - -var ingressclassesKind = v1beta1.SchemeGroupVersion.WithKind("IngressClass") - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *FakeIngressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1beta1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.IngressClass), err -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *FakeIngressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1beta1.IngressClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.IngressClassList{ListMeta: obj.(*v1beta1.IngressClassList).ListMeta} - for _, item := range obj.(*v1beta1.IngressClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *FakeIngressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts)) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1beta1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1beta1.IngressClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.IngressClassList{}) - return err -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1beta1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.IngressClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *FakeIngressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, *name, types.ApplyPatchType, data), &v1beta1.IngressClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.IngressClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go deleted file mode 100644 index b8792a306..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNetworkingV1beta1 struct { - *testing.Fake -} - -func (c *FakeNetworkingV1beta1) Ingresses(namespace string) v1beta1.IngressInterface { - return &FakeIngresses{c, namespace} -} - -func (c *FakeNetworkingV1beta1) IngressClasses() v1beta1.IngressClassInterface { - return &FakeIngressClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNetworkingV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go index f74c7257a..ac1ffbb98 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type IPAddressExpansion interface{} + type IngressExpansion interface{} type IngressClassExpansion interface{} + +type ServiceCIDRExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index b309281af..b2d5aa2ce 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressesGetter has a method to return a IngressInterface. @@ -41,216 +38,38 @@ type IngressesGetter interface { // IngressInterface has methods to work with Ingress resources. type IngressInterface interface { - Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error) - Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) - UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error) + Create(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.CreateOptions) (*networkingv1beta1.Ingress, error) + Update(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ingress *networkingv1beta1.Ingress, opts v1.UpdateOptions) (*networkingv1beta1.Ingress, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.Ingress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) - Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) - ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.Ingress, err error) + Apply(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, ingress *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.Ingress, err error) IngressExpansion } // ingresses implements IngressInterface type ingresses struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration] } // newIngresses returns a Ingresses func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { return &ingresses{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*networkingv1beta1.Ingress, *networkingv1beta1.IngressList, *applyconfigurationsnetworkingv1beta1.IngressApplyConfiguration]( + "ingresses", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *networkingv1beta1.Ingress { return &networkingv1beta1.Ingress{} }, + func() *networkingv1beta1.IngressList { return &networkingv1beta1.IngressList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.Ingress](), + ), } } - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingress). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. -func (c *ingresses) Apply(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *ingresses) ApplyStatus(ctx context.Context, ingress *networkingv1beta1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Ingress, err error) { - if ingress == nil { - return nil, fmt.Errorf("ingress provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingress) - if err != nil { - return nil, err - } - - name := ingress.Name - if name == nil { - return nil, fmt.Errorf("ingress.Name must be provided to Apply") - } - - result = &v1beta1.Ingress{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("ingresses"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go index 50ccdfdbb..dd37fc5cd 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/networking/v1beta1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - networkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // IngressClassesGetter has a method to return a IngressClassInterface. @@ -41,157 +38,34 @@ type IngressClassesGetter interface { // IngressClassInterface has methods to work with IngressClass resources. type IngressClassInterface interface { - Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (*v1beta1.IngressClass, error) - Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (*v1beta1.IngressClass, error) + Create(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.CreateOptions) (*networkingv1beta1.IngressClass, error) + Update(ctx context.Context, ingressClass *networkingv1beta1.IngressClass, opts v1.UpdateOptions) (*networkingv1beta1.IngressClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IngressClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IngressClass, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IngressClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) - Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IngressClass, err error) + Apply(ctx context.Context, ingressClass *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IngressClass, err error) IngressClassExpansion } // ingressClasses implements IngressClassInterface type ingressClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration] } // newIngressClasses returns a IngressClasses func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses { return &ingressClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*networkingv1beta1.IngressClass, *networkingv1beta1.IngressClassList, *applyconfigurationsnetworkingv1beta1.IngressClassApplyConfiguration]( + "ingressclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1beta1.IngressClass { return &networkingv1beta1.IngressClass{} }, + func() *networkingv1beta1.IngressClassList { return &networkingv1beta1.IngressClassList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.IngressClass](), + ), } } - -// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any. -func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Get(). - Resource("ingressclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IngressClasses that match those selectors. -func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressClassList{} - err = c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingressClasses. -func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Post(). - Resource("ingressclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any. -func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Put(). - Resource("ingressclasses"). - Name(ingressClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ingressClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs. -func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ingressclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ingressclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ingressClass. -func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) { - result = &v1beta1.IngressClass{} - err = c.client.Patch(pt). - Resource("ingressclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied ingressClass. -func (c *ingressClasses) Apply(ctx context.Context, ingressClass *networkingv1beta1.IngressClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.IngressClass, err error) { - if ingressClass == nil { - return nil, fmt.Errorf("ingressClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(ingressClass) - if err != nil { - return nil, err - } - name := ingressClass.Name - if name == nil { - return nil, fmt.Errorf("ingressClass.Name must be provided to Apply") - } - result = &v1beta1.IngressClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("ingressclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..0b7ffff72 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// IPAddressesGetter has a method to return a IPAddressInterface. +// A group's client should implement this interface. +type IPAddressesGetter interface { + IPAddresses() IPAddressInterface +} + +// IPAddressInterface has methods to work with IPAddress resources. +type IPAddressInterface interface { + Create(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.CreateOptions) (*networkingv1beta1.IPAddress, error) + Update(ctx context.Context, iPAddress *networkingv1beta1.IPAddress, opts v1.UpdateOptions) (*networkingv1beta1.IPAddress, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.IPAddress, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.IPAddressList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.IPAddress, err error) + Apply(ctx context.Context, iPAddress *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.IPAddress, err error) + IPAddressExpansion +} + +// iPAddresses implements IPAddressInterface +type iPAddresses struct { + *gentype.ClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration] +} + +// newIPAddresses returns a IPAddresses +func newIPAddresses(c *NetworkingV1beta1Client) *iPAddresses { + return &iPAddresses{ + gentype.NewClientWithListAndApply[*networkingv1beta1.IPAddress, *networkingv1beta1.IPAddressList, *applyconfigurationsnetworkingv1beta1.IPAddressApplyConfiguration]( + "ipaddresses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1beta1.IPAddress { return &networkingv1beta1.IPAddress{} }, + func() *networkingv1beta1.IPAddressList { return &networkingv1beta1.IPAddressList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.IPAddress](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go index 851634ed0..cb4b0c601 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -19,17 +19,19 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) type NetworkingV1beta1Interface interface { RESTClient() rest.Interface + IPAddressesGetter IngressesGetter IngressClassesGetter + ServiceCIDRsGetter } // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,6 +39,10 @@ type NetworkingV1beta1Client struct { restClient rest.Interface } +func (c *NetworkingV1beta1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) +} + func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { return newIngresses(c, namespace) } @@ -45,6 +51,10 @@ func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface { return newIngressClasses(c) } +func (c *NetworkingV1beta1Client) ServiceCIDRs() ServiceCIDRInterface { + return newServiceCIDRs(c) +} + // NewForConfig creates a new NetworkingV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). @@ -90,10 +100,10 @@ func New(c rest.Interface) *NetworkingV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := networkingv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go new file mode 100644 index 000000000..6ad1daf74 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + networkingv1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. +// A group's client should implement this interface. +type ServiceCIDRsGetter interface { + ServiceCIDRs() ServiceCIDRInterface +} + +// ServiceCIDRInterface has methods to work with ServiceCIDR resources. +type ServiceCIDRInterface interface { + Create(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.CreateOptions) (*networkingv1beta1.ServiceCIDR, error) + Update(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, serviceCIDR *networkingv1beta1.ServiceCIDR, opts v1.UpdateOptions) (*networkingv1beta1.ServiceCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*networkingv1beta1.ServiceCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*networkingv1beta1.ServiceCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *networkingv1beta1.ServiceCIDR, err error) + Apply(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, serviceCIDR *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *networkingv1beta1.ServiceCIDR, err error) + ServiceCIDRExpansion +} + +// serviceCIDRs implements ServiceCIDRInterface +type serviceCIDRs struct { + *gentype.ClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration] +} + +// newServiceCIDRs returns a ServiceCIDRs +func newServiceCIDRs(c *NetworkingV1beta1Client) *serviceCIDRs { + return &serviceCIDRs{ + gentype.NewClientWithListAndApply[*networkingv1beta1.ServiceCIDR, *networkingv1beta1.ServiceCIDRList, *applyconfigurationsnetworkingv1beta1.ServiceCIDRApplyConfiguration]( + "servicecidrs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *networkingv1beta1.ServiceCIDR { return &networkingv1beta1.ServiceCIDR{} }, + func() *networkingv1beta1.ServiceCIDRList { return &networkingv1beta1.ServiceCIDRList{} }, + gentype.PrefersProtobuf[*networkingv1beta1.ServiceCIDR](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go deleted file mode 100644 index dea10cbad..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_node_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/node/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNodeV1 struct { - *testing.Fake -} - -func (c *FakeNodeV1) RuntimeClasses() v1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNodeV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go deleted file mode 100644 index 35cfbcae4..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/fake/fake_runtimeclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/node/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - nodev1 "k8s.io/client-go/applyconfigurations/node/v1" - testing "k8s.io/client-go/testing" -) - -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { - Fake *FakeNodeV1 -} - -var runtimeclassesResource = v1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1.RuntimeClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RuntimeClassList{ListMeta: obj.(*v1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.RuntimeClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go index 844f9fc70..3bde21171 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/node/v1" - "k8s.io/client-go/kubernetes/scheme" + nodev1 "k8s.io/api/node/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := nodev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go index 5ec38b203..77311fab7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1/runtimeclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/node/v1" + nodev1 "k8s.io/api/node/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1 "k8s.io/client-go/applyconfigurations/node/v1" + applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -41,157 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (*v1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (*v1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.CreateOptions) (*nodev1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1.RuntimeClass, opts metav1.UpdateOptions) (*nodev1.RuntimeClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RuntimeClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*nodev1.RuntimeClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*nodev1.RuntimeClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *nodev1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *nodev1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*nodev1.RuntimeClass, *nodev1.RuntimeClassList, *applyconfigurationsnodev1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *nodev1.RuntimeClass { return &nodev1.RuntimeClass{} }, + func() *nodev1.RuntimeClassList { return &nodev1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1.RuntimeClass](), + ), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.CreateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1.RuntimeClass, opts metav1.UpdateOptions) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RuntimeClass, err error) { - result = &v1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1.RuntimeClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go deleted file mode 100644 index 21ab9de33..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNodeV1alpha1 struct { - *testing.Fake -} - -func (c *FakeNodeV1alpha1) RuntimeClasses() v1alpha1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNodeV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go deleted file mode 100644 index 2ff7d3f97..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/node/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { - Fake *FakeNodeV1alpha1 -} - -var runtimeclassesResource = v1alpha1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1alpha1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1alpha1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1alpha1.RuntimeClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RuntimeClassList{ListMeta: obj.(*v1alpha1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1alpha1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1alpha1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1alpha1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1alpha1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RuntimeClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index 2a197d58e..e47ef3548 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := nodev1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go index 039a7ace1..f9da4f07b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/node/v1alpha1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -41,157 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.CreateOptions) (*nodev1alpha1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClass, opts v1.UpdateOptions) (*nodev1alpha1.RuntimeClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1alpha1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*nodev1alpha1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1alpha1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1alpha1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*nodev1alpha1.RuntimeClass, *nodev1alpha1.RuntimeClassList, *applyconfigurationsnodev1alpha1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *nodev1alpha1.RuntimeClass { return &nodev1alpha1.RuntimeClass{} }, + func() *nodev1alpha1.RuntimeClassList { return &nodev1alpha1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1alpha1.RuntimeClass](), + ), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1alpha1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go deleted file mode 100644 index 36976ce54..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeNodeV1beta1 struct { - *testing.Fake -} - -func (c *FakeNodeV1beta1) RuntimeClasses() v1beta1.RuntimeClassInterface { - return &FakeRuntimeClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeNodeV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go deleted file mode 100644 index e6552f9ac..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/node/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeRuntimeClasses implements RuntimeClassInterface -type FakeRuntimeClasses struct { - Fake *FakeNodeV1beta1 -} - -var runtimeclassesResource = v1beta1.SchemeGroupVersion.WithResource("runtimeclasses") - -var runtimeclassesKind = v1beta1.SchemeGroupVersion.WithKind("RuntimeClass") - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *FakeRuntimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(runtimeclassesResource, name), &v1beta1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *FakeRuntimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(runtimeclassesResource, runtimeclassesKind, opts), &v1beta1.RuntimeClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RuntimeClassList{ListMeta: obj.(*v1beta1.RuntimeClassList).ListMeta} - for _, item := range obj.(*v1beta1.RuntimeClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *FakeRuntimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(runtimeclassesResource, opts)) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *FakeRuntimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(runtimeclassesResource, runtimeClass), &v1beta1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *FakeRuntimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(runtimeclassesResource, name, opts), &v1beta1.RuntimeClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRuntimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(runtimeclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RuntimeClassList{}) - return err -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *FakeRuntimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, name, pt, data, subresources...), &v1beta1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RuntimeClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *FakeRuntimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(runtimeclassesResource, *name, types.ApplyPatchType, data), &v1beta1.RuntimeClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RuntimeClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go index 4f6802ffa..c7864a479 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + nodev1beta1 "k8s.io/api/node/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *NodeV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := nodev1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go index f8990adf1..18089defd 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/node/v1beta1" + nodev1beta1 "k8s.io/api/node/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - nodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + applyconfigurationsnodev1beta1 "k8s.io/client-go/applyconfigurations/node/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RuntimeClassesGetter has a method to return a RuntimeClassInterface. @@ -41,157 +38,34 @@ type RuntimeClassesGetter interface { // RuntimeClassInterface has methods to work with RuntimeClass resources. type RuntimeClassInterface interface { - Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error) - Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error) + Create(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.CreateOptions) (*nodev1beta1.RuntimeClass, error) + Update(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClass, opts v1.UpdateOptions) (*nodev1beta1.RuntimeClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*nodev1beta1.RuntimeClass, error) + List(ctx context.Context, opts v1.ListOptions) (*nodev1beta1.RuntimeClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) - Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *nodev1beta1.RuntimeClass, err error) + Apply(ctx context.Context, runtimeClass *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *nodev1beta1.RuntimeClass, err error) RuntimeClassExpansion } // runtimeClasses implements RuntimeClassInterface type runtimeClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration] } // newRuntimeClasses returns a RuntimeClasses func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { return &runtimeClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*nodev1beta1.RuntimeClass, *nodev1beta1.RuntimeClassList, *applyconfigurationsnodev1beta1.RuntimeClassApplyConfiguration]( + "runtimeclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *nodev1beta1.RuntimeClass { return &nodev1beta1.RuntimeClass{} }, + func() *nodev1beta1.RuntimeClassList { return &nodev1beta1.RuntimeClassList{} }, + gentype.PrefersProtobuf[*nodev1beta1.RuntimeClass](), + ), } } - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(runtimeClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied runtimeClass. -func (c *runtimeClasses) Apply(ctx context.Context, runtimeClass *nodev1beta1.RuntimeClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RuntimeClass, err error) { - if runtimeClass == nil { - return nil, fmt.Errorf("runtimeClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(runtimeClass) - if err != nil { - return nil, err - } - name := runtimeClass.Name - if name == nil { - return nil, fmt.Errorf("runtimeClass.Name must be provided to Apply") - } - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("runtimeclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go index cd1aac9c2..6cf60806a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1 import ( - rest "k8s.io/client-go/rest" + policyv1 "k8s.io/api/policy/v1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,19 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*policyv1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*policyv1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *policyv1.Eviction { return &policyv1.Eviction{} }, + gentype.PrefersProtobuf[*policyv1.Eviction](), + ), } } diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go index 853187feb..2c7e95b72 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go deleted file mode 100644 index a579067ce..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeEvictions implements EvictionInterface -type FakeEvictions struct { - Fake *FakePolicyV1 - ns string -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go deleted file mode 100644 index 1b6b4ade1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_eviction_expansion.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - policy "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - core "k8s.io/client-go/testing" -) - -func (c *FakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - action.Subresource = "eviction" - action.Object = eviction - - _, err := c.Fake.Invokes(action, eviction) - return err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go deleted file mode 100644 index 7b5f51caf..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_poddisruptionbudget.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" - testing "k8s.io/client-go/testing" -) - -// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface -type FakePodDisruptionBudgets struct { - Fake *FakePolicyV1 - ns string -} - -var poddisruptionbudgetsResource = v1.SchemeGroupVersion.WithResource("poddisruptionbudgets") - -var poddisruptionbudgetsKind = v1.SchemeGroupVersion.WithKind("PodDisruptionBudget") - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1.PodDisruptionBudgetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PodDisruptionBudgetList{ListMeta: obj.(*v1.PodDisruptionBudgetList).ListMeta} - for _, item := range obj.(*v1.PodDisruptionBudgetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1.PodDisruptionBudget{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PodDisruptionBudgetList{}) - return err -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.PodDisruptionBudget), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go deleted file mode 100644 index d5bb3d549..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/fake/fake_policy_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/policy/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakePolicyV1 struct { - *testing.Fake -} - -func (c *FakePolicyV1) Evictions(namespace string) v1.EvictionInterface { - return &FakeEvictions{c, namespace} -} - -func (c *FakePolicyV1) PodDisruptionBudgets(namespace string) v1.PodDisruptionBudgetInterface { - return &FakePodDisruptionBudgets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakePolicyV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go index 58db3acf9..d45ed21f5 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/poddisruptionbudget.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/policy/v1" + policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - policyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + applyconfigurationspolicyv1 "k8s.io/client-go/applyconfigurations/policy/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -41,216 +38,38 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (*v1.PodDisruptionBudget, error) - Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) - UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (*v1.PodDisruptionBudget, error) + Create(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.CreateOptions) (*policyv1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudget, opts metav1.UpdateOptions) (*policyv1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodDisruptionBudget, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PodDisruptionBudgetList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*policyv1.PodDisruptionBudget, error) + List(ctx context.Context, opts metav1.ListOptions) (*policyv1.PodDisruptionBudgetList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) - Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) - ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *policyv1.PodDisruptionBudget, err error) + Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *policyv1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*policyv1.PodDisruptionBudget, *policyv1.PodDisruptionBudgetList, *applyconfigurationspolicyv1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *policyv1.PodDisruptionBudget { return &policyv1.PodDisruptionBudget{} }, + func() *policyv1.PodDisruptionBudgetList { return &policyv1.PodDisruptionBudgetList{} }, + gentype.PrefersProtobuf[*policyv1.PodDisruptionBudget](), + ), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.CreateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1.PodDisruptionBudget, opts metav1.UpdateOptions) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodDisruptionBudget, err error) { - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1.PodDisruptionBudgetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go index 9bfd98aa9..8d84f460b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/policy/v1" - "k8s.io/client-go/kubernetes/scheme" + policyv1 "k8s.io/api/policy/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := policyv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go index 12e8e76ed..de4c35e76 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -19,7 +19,9 @@ limitations under the License. package v1beta1 import ( - rest "k8s.io/client-go/rest" + policyv1beta1 "k8s.io/api/policy/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" ) // EvictionsGetter has a method to return a EvictionInterface. @@ -35,14 +37,19 @@ type EvictionInterface interface { // evictions implements EvictionInterface type evictions struct { - client rest.Interface - ns string + *gentype.Client[*policyv1beta1.Eviction] } // newEvictions returns a Evictions func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { return &evictions{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClient[*policyv1beta1.Eviction]( + "evictions", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *policyv1beta1.Eviction { return &policyv1beta1.Eviction{} }, + gentype.PrefersProtobuf[*policyv1beta1.Eviction](), + ), } } diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go index c003671f5..d7c28987c 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -28,7 +28,7 @@ type EvictionExpansion interface { } func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - return c.client.Post(). + return c.GetClient().Post(). AbsPath("/api/v1"). Namespace(eviction.Namespace). Resource("pods"). diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go deleted file mode 100644 index b8f6f3eae..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -// FakeEvictions implements EvictionInterface -type FakeEvictions struct { - Fake *FakePolicyV1beta1 - ns string -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go deleted file mode 100644 index f97522bb3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - "context" - - policy "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" - core "k8s.io/client-go/testing" -) - -func (c *FakeEvictions) Evict(ctx context.Context, eviction *policy.Eviction) error { - action := core.CreateActionImpl{} - action.Verb = "create" - action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - action.Subresource = "eviction" - action.Object = eviction - - _, err := c.Fake.Invokes(action, eviction) - return err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go deleted file mode 100644 index bcee8e777..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface -type FakePodDisruptionBudgets struct { - Fake *FakePolicyV1beta1 - ns string -} - -var poddisruptionbudgetsResource = v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets") - -var poddisruptionbudgetsKind = v1beta1.SchemeGroupVersion.WithKind("PodDisruptionBudget") - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *FakePodDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *FakePodDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PodDisruptionBudgetList{ListMeta: obj.(*v1beta1.PodDisruptionBudgetList).ListMeta} - for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *FakePodDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts)) - -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *FakePodDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *FakePodDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(poddisruptionbudgetsResource, c.ns, name, opts), &v1beta1.PodDisruptionBudget{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{}) - return err -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, pt, data, subresources...), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *FakePodDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.PodDisruptionBudget{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PodDisruptionBudget), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go deleted file mode 100644 index 90670b113..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakePolicyV1beta1 struct { - *testing.Fake -} - -func (c *FakePolicyV1beta1) Evictions(namespace string) v1beta1.EvictionInterface { - return &FakeEvictions{c, namespace} -} - -func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDisruptionBudgetInterface { - return &FakePodDisruptionBudgets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakePolicyV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 168728992..00e044961 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + applyconfigurationspolicyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. @@ -41,216 +38,38 @@ type PodDisruptionBudgetsGetter interface { // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. type PodDisruptionBudgetInterface interface { - Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error) - Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) - UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error) + Create(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*policyv1beta1.PodDisruptionBudget, error) + Update(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*policyv1beta1.PodDisruptionBudget, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*policyv1beta1.PodDisruptionBudget, error) + List(ctx context.Context, opts v1.ListOptions) (*policyv1beta1.PodDisruptionBudgetList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) - Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) - ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *policyv1beta1.PodDisruptionBudget, err error) + Apply(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, podDisruptionBudget *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *policyv1beta1.PodDisruptionBudget, err error) PodDisruptionBudgetExpansion } // podDisruptionBudgets implements PodDisruptionBudgetInterface type podDisruptionBudgets struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration] } // newPodDisruptionBudgets returns a PodDisruptionBudgets func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*policyv1beta1.PodDisruptionBudget, *policyv1beta1.PodDisruptionBudgetList, *applyconfigurationspolicyv1beta1.PodDisruptionBudgetApplyConfiguration]( + "poddisruptionbudgets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *policyv1beta1.PodDisruptionBudget { return &policyv1beta1.PodDisruptionBudget{} }, + func() *policyv1beta1.PodDisruptionBudgetList { return &policyv1beta1.PodDisruptionBudgetList{} }, + gentype.PrefersProtobuf[*policyv1beta1.PodDisruptionBudget](), + ), } } - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podDisruptionBudget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podDisruptionBudget. -func (c *podDisruptionBudgets) Apply(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podDisruptionBudgets) ApplyStatus(ctx context.Context, podDisruptionBudget *policyv1beta1.PodDisruptionBudgetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodDisruptionBudget, err error) { - if podDisruptionBudget == nil { - return nil, fmt.Errorf("podDisruptionBudget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podDisruptionBudget) - if err != nil { - return nil, err - } - - name := podDisruptionBudget.Name - if name == nil { - return nil, fmt.Errorf("podDisruptionBudget.Name must be provided to Apply") - } - - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index fdb509321..d8e78627e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + policyv1beta1 "k8s.io/api/policy/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -90,10 +90,10 @@ func New(c rest.Interface) *PolicyV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := policyv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 000d737f0..cccad0487 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -41,157 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.CreateOptions) (*rbacv1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1.ClusterRole, opts metav1.UpdateOptions) (*rbacv1.ClusterRole, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRole, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1.ClusterRole, *rbacv1.ClusterRoleList, *applyconfigurationsrbacv1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1.ClusterRole { return &rbacv1.ClusterRole{} }, + func() *rbacv1.ClusterRoleList { return &rbacv1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1.ClusterRole](), + ), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index 31db43d98..4a75fdcb5 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -41,157 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.CreateOptions) (*rbacv1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBinding, opts metav1.UpdateOptions) (*rbacv1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.ClusterRoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1.ClusterRoleBinding, *rbacv1.ClusterRoleBindingList, *applyconfigurationsrbacv1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{} }, + func() *rbacv1.ClusterRoleBindingList { return &rbacv1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1.ClusterRoleBinding](), + ), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go deleted file mode 100644 index 5add33ddf..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { - Fake *FakeRbacV1 -} - -var clusterrolesResource = v1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1.ClusterRoleList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ClusterRoleList{ListMeta: obj.(*v1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRole), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go deleted file mode 100644 index d42e93e65..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { - Fake *FakeRbacV1 -} - -var clusterrolebindingsResource = v1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1.ClusterRoleBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.ClusterRoleBindingList{ListMeta: obj.(*v1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1.ClusterRoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go deleted file mode 100644 index 426fd70d6..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/rbac/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeRbacV1 struct { - *testing.Fake -} - -func (c *FakeRbacV1) ClusterRoles() v1.ClusterRoleInterface { - return &FakeClusterRoles{c} -} - -func (c *FakeRbacV1) ClusterRoleBindings() v1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} -} - -func (c *FakeRbacV1) Roles(namespace string) v1.RoleInterface { - return &FakeRoles{c, namespace} -} - -func (c *FakeRbacV1) RoleBindings(namespace string) v1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeRbacV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go deleted file mode 100644 index a3bc5da66..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" -) - -// FakeRoles implements RoleInterface -type FakeRoles struct { - Fake *FakeRbacV1 - ns string -} - -var rolesResource = v1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1.RoleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RoleList{ListMeta: obj.(*v1.RoleList).ListMeta} - for _, item := range obj.(*v1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) - -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.Role), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go deleted file mode 100644 index eeb37e9db..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" - testing "k8s.io/client-go/testing" -) - -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { - Fake *FakeRbacV1 - ns string -} - -var rolebindingsResource = v1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1.RoleBindingList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.RoleBindingList{ListMeta: obj.(*v1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) - -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.RoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go index a02f0357d..c586ee638 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/rbac/v1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1 "k8s.io/api/rbac/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := rbacv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index 93810a3ff..c3a9ba135 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -41,168 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error) - Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error) + Create(ctx context.Context, role *rbacv1.Role, opts metav1.CreateOptions) (*rbacv1.Role, error) + Update(ctx context.Context, role *rbacv1.Role, opts metav1.UpdateOptions) (*rbacv1.Role, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.Role, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) - Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1.Role, *rbacv1.RoleList, *applyconfigurationsrbacv1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1.Role { return &rbacv1.Role{} }, + func() *rbacv1.RoleList { return &rbacv1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1.Role](), + ), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1.RoleApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index 2ace93860..1f5a39490 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/rbac/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + applyconfigurationsrbacv1 "k8s.io/client-go/applyconfigurations/rbac/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -41,168 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.CreateOptions) (*rbacv1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1.RoleBinding, opts metav1.UpdateOptions) (*rbacv1.RoleBinding, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*rbacv1.RoleBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*rbacv1.RoleBindingList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *rbacv1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *rbacv1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1.RoleBinding, *rbacv1.RoleBindingList, *applyconfigurationsrbacv1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1.RoleBinding { return &rbacv1.RoleBinding{} }, + func() *rbacv1.RoleBindingList { return &rbacv1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1.RoleBinding](), + ), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (result *v1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index d6d30e99e..3874f9dee 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -41,157 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRole, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRole, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRole, *rbacv1alpha1.ClusterRoleList, *applyconfigurationsrbacv1alpha1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1alpha1.ClusterRole { return &rbacv1alpha1.ClusterRole{} }, + func() *rbacv1alpha1.ClusterRoleList { return &rbacv1alpha1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRole](), + ), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 2eded92ac..434f875f8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -41,157 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1alpha1.ClusterRoleBinding, *rbacv1alpha1.ClusterRoleBindingList, *applyconfigurationsrbacv1alpha1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1alpha1.ClusterRoleBinding { return &rbacv1alpha1.ClusterRoleBinding{} }, + func() *rbacv1alpha1.ClusterRoleBindingList { return &rbacv1alpha1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.ClusterRoleBinding](), + ), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go deleted file mode 100644 index 534a1990f..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { - Fake *FakeRbacV1alpha1 -} - -var clusterrolesResource = v1alpha1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterRoleList{ListMeta: obj.(*v1alpha1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1alpha1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1alpha1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRole), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go deleted file mode 100644 index 0a4359392..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { - Fake *FakeRbacV1alpha1 -} - -var clusterrolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterRoleBindingList{ListMeta: obj.(*v1alpha1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1alpha1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1alpha1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterRoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go deleted file mode 100644 index 3447e9be8..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeRbacV1alpha1 struct { - *testing.Fake -} - -func (c *FakeRbacV1alpha1) ClusterRoles() v1alpha1.ClusterRoleInterface { - return &FakeClusterRoles{c} -} - -func (c *FakeRbacV1alpha1) ClusterRoleBindings() v1alpha1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} -} - -func (c *FakeRbacV1alpha1) Roles(namespace string) v1alpha1.RoleInterface { - return &FakeRoles{c, namespace} -} - -func (c *FakeRbacV1alpha1) RoleBindings(namespace string) v1alpha1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeRbacV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go deleted file mode 100644 index a0e28348a..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeRoles implements RoleInterface -type FakeRoles struct { - Fake *FakeRbacV1alpha1 - ns string -} - -var rolesResource = v1alpha1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1alpha1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RoleList{ListMeta: obj.(*v1alpha1.RoleList).ListMeta} - for _, item := range obj.(*v1alpha1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) - -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1alpha1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Role), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go deleted file mode 100644 index 76649f5c2..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { - Fake *FakeRbacV1alpha1 - ns string -} - -var rolebindingsResource = v1alpha1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1alpha1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RoleBindingList{ListMeta: obj.(*v1alpha1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1alpha1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) - -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1alpha1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go index cc5b309e9..df46fc3aa 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := rbacv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 43c16fde7..3a47f673e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -41,168 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error) - Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error) + Create(ctx context.Context, role *rbacv1alpha1.Role, opts v1.CreateOptions) (*rbacv1alpha1.Role, error) + Update(ctx context.Context, role *rbacv1alpha1.Role, opts v1.UpdateOptions) (*rbacv1alpha1.Role, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) - Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1alpha1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1alpha1.Role, *rbacv1alpha1.RoleList, *applyconfigurationsrbacv1alpha1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1alpha1.Role { return &rbacv1alpha1.Role{} }, + func() *rbacv1alpha1.RoleList { return &rbacv1alpha1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.Role](), + ), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1alpha1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1alpha1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index 3129c9b4e..a6293171d 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + applyconfigurationsrbacv1alpha1 "k8s.io/client-go/applyconfigurations/rbac/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -41,168 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.CreateOptions) (*rbacv1alpha1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1alpha1.RoleBinding, opts v1.UpdateOptions) (*rbacv1alpha1.RoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1alpha1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1alpha1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1alpha1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1alpha1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1alpha1.RoleBinding, *rbacv1alpha1.RoleBindingList, *applyconfigurationsrbacv1alpha1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1alpha1.RoleBinding { return &rbacv1alpha1.RoleBinding{} }, + func() *rbacv1alpha1.RoleBindingList { return &rbacv1alpha1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1alpha1.RoleBinding](), + ), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1alpha1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index a3d67f031..92388f2f1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRolesGetter has a method to return a ClusterRoleInterface. @@ -41,157 +38,34 @@ type ClusterRolesGetter interface { // ClusterRoleInterface has methods to work with ClusterRole resources. type ClusterRoleInterface interface { - Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error) - Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error) + Create(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.CreateOptions) (*rbacv1beta1.ClusterRole, error) + Update(ctx context.Context, clusterRole *rbacv1beta1.ClusterRole, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRole, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRole, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) - Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRole, err error) + Apply(ctx context.Context, clusterRole *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRole, err error) ClusterRoleExpansion } // clusterRoles implements ClusterRoleInterface type clusterRoles struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration] } // newClusterRoles returns a ClusterRoles func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { return &clusterRoles{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRole, *rbacv1beta1.ClusterRoleList, *applyconfigurationsrbacv1beta1.ClusterRoleApplyConfiguration]( + "clusterroles", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1beta1.ClusterRole { return &rbacv1beta1.ClusterRole{} }, + func() *rbacv1beta1.ClusterRoleList { return &rbacv1beta1.ClusterRoleList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.ClusterRole](), + ), } } - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRole). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *clusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - result = &v1beta1.ClusterRole{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterroles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index ae39cbb9a..beb50f7b7 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. @@ -41,157 +38,34 @@ type ClusterRoleBindingsGetter interface { // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. type ClusterRoleBindingInterface interface { - Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error) - Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error) + Create(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*rbacv1beta1.ClusterRoleBinding, error) + Update(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.ClusterRoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.ClusterRoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.ClusterRoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) - Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.ClusterRoleBinding, err error) + Apply(ctx context.Context, clusterRoleBinding *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.ClusterRoleBinding, err error) ClusterRoleBindingExpansion } // clusterRoleBindings implements ClusterRoleBindingInterface type clusterRoleBindings struct { - client rest.Interface + *gentype.ClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration] } // newClusterRoleBindings returns a ClusterRoleBindings func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { return &clusterRoleBindings{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*rbacv1beta1.ClusterRoleBinding, *rbacv1beta1.ClusterRoleBindingList, *applyconfigurationsrbacv1beta1.ClusterRoleBindingApplyConfiguration]( + "clusterrolebindings", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *rbacv1beta1.ClusterRoleBinding { return &rbacv1beta1.ClusterRoleBinding{} }, + func() *rbacv1beta1.ClusterRoleBindingList { return &rbacv1beta1.ClusterRoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.ClusterRoleBinding](), + ), } } - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterRoleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *clusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clusterrolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go deleted file mode 100644 index 2a94a4315..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoles implements ClusterRoleInterface -type FakeClusterRoles struct { - Fake *FakeRbacV1beta1 -} - -var clusterrolesResource = v1beta1.SchemeGroupVersion.WithResource("clusterroles") - -var clusterrolesKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRole") - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *FakeClusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *FakeClusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ClusterRoleList{ListMeta: obj.(*v1beta1.ClusterRoleList).ListMeta} - for _, item := range obj.(*v1beta1.ClusterRoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *FakeClusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *FakeClusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolesResource, name, opts), &v1beta1.ClusterRole{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *FakeClusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, pt, data, subresources...), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRole. -func (c *FakeClusterRoles) Apply(ctx context.Context, clusterRole *rbacv1beta1.ClusterRoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRole, err error) { - if clusterRole == nil { - return nil, fmt.Errorf("clusterRole provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRole) - if err != nil { - return nil, err - } - name := clusterRole.Name - if name == nil { - return nil, fmt.Errorf("clusterRole.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRole{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRole), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go deleted file mode 100644 index c9fd7c0cd..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterRoleBindings implements ClusterRoleBindingInterface -type FakeClusterRoleBindings struct { - Fake *FakeRbacV1beta1 -} - -var clusterrolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings") - -var clusterrolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("ClusterRoleBinding") - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *FakeClusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *FakeClusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ClusterRoleBindingList{ListMeta: obj.(*v1beta1.ClusterRoleBindingList).ListMeta} - for _, item := range obj.(*v1beta1.ClusterRoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *FakeClusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *FakeClusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *FakeClusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clusterrolebindingsResource, name, opts), &v1beta1.ClusterRoleBinding{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *FakeClusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, pt, data, subresources...), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterRoleBinding. -func (c *FakeClusterRoleBindings) Apply(ctx context.Context, clusterRoleBinding *rbacv1beta1.ClusterRoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ClusterRoleBinding, err error) { - if clusterRoleBinding == nil { - return nil, fmt.Errorf("clusterRoleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(clusterRoleBinding) - if err != nil { - return nil, err - } - name := clusterRoleBinding.Name - if name == nil { - return nil, fmt.Errorf("clusterRoleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, *name, types.ApplyPatchType, data), &v1beta1.ClusterRoleBinding{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ClusterRoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go deleted file mode 100644 index bdbc246b7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeRbacV1beta1 struct { - *testing.Fake -} - -func (c *FakeRbacV1beta1) ClusterRoles() v1beta1.ClusterRoleInterface { - return &FakeClusterRoles{c} -} - -func (c *FakeRbacV1beta1) ClusterRoleBindings() v1beta1.ClusterRoleBindingInterface { - return &FakeClusterRoleBindings{c} -} - -func (c *FakeRbacV1beta1) Roles(namespace string) v1beta1.RoleInterface { - return &FakeRoles{c, namespace} -} - -func (c *FakeRbacV1beta1) RoleBindings(namespace string) v1beta1.RoleBindingInterface { - return &FakeRoleBindings{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeRbacV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go deleted file mode 100644 index 4158cf1d5..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeRoles implements RoleInterface -type FakeRoles struct { - Fake *FakeRbacV1beta1 - ns string -} - -var rolesResource = v1beta1.SchemeGroupVersion.WithResource("roles") - -var rolesKind = v1beta1.SchemeGroupVersion.WithKind("Role") - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *FakeRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *FakeRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RoleList{ListMeta: obj.(*v1beta1.RoleList).ListMeta} - for _, item := range obj.(*v1beta1.RoleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *FakeRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) - -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *FakeRoles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *FakeRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolesResource, c.ns, name, opts), &v1beta1.Role{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleList{}) - return err -} - -// Patch applies the patch and returns the patched role. -func (c *FakeRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, pt, data, subresources...), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *FakeRoles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.Role{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.Role), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go deleted file mode 100644 index 4616f0fd1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeRoleBindings implements RoleBindingInterface -type FakeRoleBindings struct { - Fake *FakeRbacV1beta1 - ns string -} - -var rolebindingsResource = v1beta1.SchemeGroupVersion.WithResource("rolebindings") - -var rolebindingsKind = v1beta1.SchemeGroupVersion.WithKind("RoleBinding") - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *FakeRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *FakeRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.RoleBindingList{ListMeta: obj.(*v1beta1.RoleBindingList).ListMeta} - for _, item := range obj.(*v1beta1.RoleBindingList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *FakeRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) - -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *FakeRoleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *FakeRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(rolebindingsResource, c.ns, name, opts), &v1beta1.RoleBinding{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{}) - return err -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *FakeRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, pt, data, subresources...), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *FakeRoleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.RoleBinding{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.RoleBinding), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go index 8dac5c1d4..5739bb289 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -100,10 +100,10 @@ func New(c rest.Interface) *RbacV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := rbacv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index e789e42fe..700fc6d22 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RolesGetter has a method to return a RoleInterface. @@ -41,168 +38,34 @@ type RolesGetter interface { // RoleInterface has methods to work with Role resources. type RoleInterface interface { - Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error) - Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error) + Create(ctx context.Context, role *rbacv1beta1.Role, opts v1.CreateOptions) (*rbacv1beta1.Role, error) + Update(ctx context.Context, role *rbacv1beta1.Role, opts v1.UpdateOptions) (*rbacv1beta1.Role, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.Role, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) - Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.Role, err error) + Apply(ctx context.Context, role *applyconfigurationsrbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration] } // newRoles returns a Roles func newRoles(c *RbacV1beta1Client, namespace string) *roles { return &roles{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1beta1.Role, *rbacv1beta1.RoleList, *applyconfigurationsrbacv1beta1.RoleApplyConfiguration]( + "roles", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1beta1.Role { return &rbacv1beta1.Role{} }, + func() *rbacv1beta1.RoleList { return &rbacv1beta1.RoleList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.Role](), + ), } } - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(role). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied role. -func (c *roles) Apply(ctx context.Context, role *rbacv1beta1.RoleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Role, err error) { - if role == nil { - return nil, fmt.Errorf("role provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(role) - if err != nil { - return nil, err - } - name := role.Name - if name == nil { - return nil, fmt.Errorf("role.Name must be provided to Apply") - } - result = &v1beta1.Role{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("roles"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 1461ba3b6..0f423a0d9 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + applyconfigurationsrbacv1beta1 "k8s.io/client-go/applyconfigurations/rbac/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // RoleBindingsGetter has a method to return a RoleBindingInterface. @@ -41,168 +38,34 @@ type RoleBindingsGetter interface { // RoleBindingInterface has methods to work with RoleBinding resources. type RoleBindingInterface interface { - Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error) - Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error) + Create(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.CreateOptions) (*rbacv1beta1.RoleBinding, error) + Update(ctx context.Context, roleBinding *rbacv1beta1.RoleBinding, opts v1.UpdateOptions) (*rbacv1beta1.RoleBinding, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*rbacv1beta1.RoleBinding, error) + List(ctx context.Context, opts v1.ListOptions) (*rbacv1beta1.RoleBindingList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) - Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *rbacv1beta1.RoleBinding, err error) + Apply(ctx context.Context, roleBinding *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *rbacv1beta1.RoleBinding, err error) RoleBindingExpansion } // roleBindings implements RoleBindingInterface type roleBindings struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration] } // newRoleBindings returns a RoleBindings func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { return &roleBindings{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*rbacv1beta1.RoleBinding, *rbacv1beta1.RoleBindingList, *applyconfigurationsrbacv1beta1.RoleBindingApplyConfiguration]( + "rolebindings", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *rbacv1beta1.RoleBinding { return &rbacv1beta1.RoleBinding{} }, + func() *rbacv1beta1.RoleBindingList { return &rbacv1beta1.RoleBindingList{} }, + gentype.PrefersProtobuf[*rbacv1beta1.RoleBinding](), + ), } } - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(roleBinding). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied roleBinding. -func (c *roleBindings) Apply(ctx context.Context, roleBinding *rbacv1beta1.RoleBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.RoleBinding, err error) { - if roleBinding == nil { - return nil, fmt.Errorf("roleBinding provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(roleBinding) - if err != nil { - return nil, err - } - name := roleBinding.Name - if name == nil { - return nil, fmt.Errorf("roleBinding.Name must be provided to Apply") - } - result = &v1beta1.RoleBinding{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("rolebindings"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go deleted file mode 100644 index 54882f817..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakePodSchedulingContexts implements PodSchedulingContextInterface -type FakePodSchedulingContexts struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var podschedulingcontextsResource = v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts") - -var podschedulingcontextsKind = v1alpha2.SchemeGroupVersion.WithKind("PodSchedulingContext") - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(podschedulingcontextsResource, c.ns, name), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), &v1alpha2.PodSchedulingContextList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.PodSchedulingContextList{ListMeta: obj.(*v1alpha2.PodSchedulingContextList).ListMeta} - for _, item := range obj.(*v1alpha2.PodSchedulingContextList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(podschedulingcontextsResource, c.ns, opts)) - -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(podschedulingcontextsResource, c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(podschedulingcontextsResource, "status", c.ns, podSchedulingContext), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha2.PodSchedulingContext{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(podschedulingcontextsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.PodSchedulingContextList{}) - return err -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, name, pt, data, subresources...), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.PodSchedulingContext{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.PodSchedulingContext), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go deleted file mode 100644 index 6f69d0fa7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resource_client.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha2 "k8s.io/client-go/kubernetes/typed/resource/v1alpha2" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeResourceV1alpha2 struct { - *testing.Fake -} - -func (c *FakeResourceV1alpha2) PodSchedulingContexts(namespace string) v1alpha2.PodSchedulingContextInterface { - return &FakePodSchedulingContexts{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceClaims(namespace string) v1alpha2.ResourceClaimInterface { - return &FakeResourceClaims{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceClaimParameters(namespace string) v1alpha2.ResourceClaimParametersInterface { - return &FakeResourceClaimParameters{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceClaimTemplates(namespace string) v1alpha2.ResourceClaimTemplateInterface { - return &FakeResourceClaimTemplates{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceClasses() v1alpha2.ResourceClassInterface { - return &FakeResourceClasses{c} -} - -func (c *FakeResourceV1alpha2) ResourceClassParameters(namespace string) v1alpha2.ResourceClassParametersInterface { - return &FakeResourceClassParameters{c, namespace} -} - -func (c *FakeResourceV1alpha2) ResourceSlices() v1alpha2.ResourceSliceInterface { - return &FakeResourceSlices{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeResourceV1alpha2) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go deleted file mode 100644 index 087e51f71..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClaims implements ResourceClaimInterface -type FakeResourceClaims struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclaimsResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaims") - -var resourceclaimsKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaim") - -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *FakeResourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimsResource, c.ns, name), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *FakeResourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimsResource, resourceclaimsKind, c.ns, opts), &v1alpha2.ResourceClaimList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClaimList{ListMeta: obj.(*v1alpha2.ResourceClaimList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaims. -func (c *FakeResourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *FakeResourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimsResource, c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resourceclaimsResource, "status", c.ns, resourceClaim), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimsResource, c.ns, name, opts), &v1alpha2.ResourceClaim{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaim. -func (c *FakeResourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *FakeResourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeResourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha2.ResourceClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaim), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go deleted file mode 100644 index da32b5cae..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimparameters.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClaimParameters implements ResourceClaimParametersInterface -type FakeResourceClaimParameters struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclaimparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimparameters") - -var resourceclaimparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimParameters") - -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any. -func (c *FakeResourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimparametersResource, c.ns, name), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors. -func (c *FakeResourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimparametersResource, resourceclaimparametersKind, c.ns, opts), &v1alpha2.ResourceClaimParametersList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClaimParametersList{ListMeta: obj.(*v1alpha2.ResourceClaimParametersList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimParametersList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaimParameters. -func (c *FakeResourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimparametersResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClaimParameters and creates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *FakeResourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *FakeResourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimparametersResource, c.ns, resourceClaimParameters), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimparametersResource, c.ns, name, opts), &v1alpha2.ResourceClaimParameters{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimparametersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimParametersList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaimParameters. -func (c *FakeResourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters. -func (c *FakeResourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - if resourceClaimParameters == nil { - return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaimParameters) - if err != nil { - return nil, err - } - name := resourceClaimParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimParameters), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go deleted file mode 100644 index 2a1b4554e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClaimTemplates implements ResourceClaimTemplateInterface -type FakeResourceClaimTemplates struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclaimtemplatesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates") - -var resourceclaimtemplatesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClaimTemplate") - -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *FakeResourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclaimtemplatesResource, c.ns, name), &v1alpha2.ResourceClaimTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimTemplate), err -} - -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *FakeResourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclaimtemplatesResource, resourceclaimtemplatesKind, c.ns, opts), &v1alpha2.ResourceClaimTemplateList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClaimTemplateList{ListMeta: obj.(*v1alpha2.ResourceClaimTemplateList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClaimTemplateList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. -func (c *FakeResourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclaimtemplatesResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimTemplate), err -} - -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *FakeResourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclaimtemplatesResource, c.ns, resourceClaimTemplate), &v1alpha2.ResourceClaimTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimTemplate), err -} - -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. -func (c *FakeResourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclaimtemplatesResource, c.ns, name, opts), &v1alpha2.ResourceClaimTemplate{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclaimtemplatesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClaimTemplateList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClaimTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimTemplate), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *FakeResourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - if resourceClaimTemplate == nil { - return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClaimTemplate) - if err != nil { - return nil, err - } - name := resourceClaimTemplate.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclaimtemplatesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClaimTemplate{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClaimTemplate), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go deleted file mode 100644 index 4d247c513..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClasses implements ResourceClassInterface -type FakeResourceClasses struct { - Fake *FakeResourceV1alpha2 -} - -var resourceclassesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclasses") - -var resourceclassesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClass") - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *FakeResourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceclassesResource, name), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *FakeResourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceclassesResource, resourceclassesKind, opts), &v1alpha2.ResourceClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClassList{ListMeta: obj.(*v1alpha2.ResourceClassList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *FakeResourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(resourceclassesResource, opts)) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *FakeResourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceclassesResource, resourceClass), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *FakeResourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceclassesResource, name, opts), &v1alpha2.ResourceClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(resourceclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *FakeResourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, name, pt, data, subresources...), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *FakeResourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceclassesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go deleted file mode 100644 index c11762963..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclassparameters.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceClassParameters implements ResourceClassParametersInterface -type FakeResourceClassParameters struct { - Fake *FakeResourceV1alpha2 - ns string -} - -var resourceclassparametersResource = v1alpha2.SchemeGroupVersion.WithResource("resourceclassparameters") - -var resourceclassparametersKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceClassParameters") - -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any. -func (c *FakeResourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resourceclassparametersResource, c.ns, name), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors. -func (c *FakeResourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resourceclassparametersResource, resourceclassparametersKind, c.ns, opts), &v1alpha2.ResourceClassParametersList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceClassParametersList{ListMeta: obj.(*v1alpha2.ResourceClassParametersList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceClassParametersList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceClassParameters. -func (c *FakeResourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resourceclassparametersResource, c.ns, opts)) - -} - -// Create takes the representation of a resourceClassParameters and creates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *FakeResourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *FakeResourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resourceclassparametersResource, c.ns, resourceClassParameters), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs. -func (c *FakeResourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(resourceclassparametersResource, c.ns, name, opts), &v1alpha2.ResourceClassParameters{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resourceclassparametersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceClassParametersList{}) - return err -} - -// Patch applies the patch and returns the patched resourceClassParameters. -func (c *FakeResourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, name, pt, data, subresources...), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters. -func (c *FakeResourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) { - if resourceClassParameters == nil { - return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil") - } - data, err := json.Marshal(resourceClassParameters) - if err != nil { - return nil, err - } - name := resourceClassParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resourceclassparametersResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha2.ResourceClassParameters{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceClassParameters), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go deleted file mode 100644 index 325e729e9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceslice.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - testing "k8s.io/client-go/testing" -) - -// FakeResourceSlices implements ResourceSliceInterface -type FakeResourceSlices struct { - Fake *FakeResourceV1alpha2 -} - -var resourceslicesResource = v1alpha2.SchemeGroupVersion.WithResource("resourceslices") - -var resourceslicesKind = v1alpha2.SchemeGroupVersion.WithKind("ResourceSlice") - -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. -func (c *FakeResourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(resourceslicesResource, name), &v1alpha2.ResourceSlice{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceSlice), err -} - -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. -func (c *FakeResourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(resourceslicesResource, resourceslicesKind, opts), &v1alpha2.ResourceSliceList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha2.ResourceSliceList{ListMeta: obj.(*v1alpha2.ResourceSliceList).ListMeta} - for _, item := range obj.(*v1alpha2.ResourceSliceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resourceSlices. -func (c *FakeResourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(resourceslicesResource, opts)) -} - -// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceSlice), err -} - -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *FakeResourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(resourceslicesResource, resourceSlice), &v1alpha2.ResourceSlice{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceSlice), err -} - -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. -func (c *FakeResourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(resourceslicesResource, name, opts), &v1alpha2.ResourceSlice{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(resourceslicesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha2.ResourceSliceList{}) - return err -} - -// Patch applies the patch and returns the patched resourceSlice. -func (c *FakeResourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, name, pt, data, subresources...), &v1alpha2.ResourceSlice{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceSlice), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. -func (c *FakeResourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) { - if resourceSlice == nil { - return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") - } - data, err := json.Marshal(resourceSlice) - if err != nil { - return nil, err - } - name := resourceSlice.Name - if name == nil { - return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(resourceslicesResource, *name, types.ApplyPatchType, data), &v1alpha2.ResourceSlice{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha2.ResourceSlice), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go deleted file mode 100644 index d11410bb9..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/generated_expansion.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -type PodSchedulingContextExpansion interface{} - -type ResourceClaimExpansion interface{} - -type ResourceClaimParametersExpansion interface{} - -type ResourceClaimTemplateExpansion interface{} - -type ResourceClassExpansion interface{} - -type ResourceClassParametersExpansion interface{} - -type ResourceSliceExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index 72e81a29e..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. -// A group's client should implement this interface. -type PodSchedulingContextsGetter interface { - PodSchedulingContexts(namespace string) PodSchedulingContextInterface -} - -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. -type PodSchedulingContextInterface interface { - Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (*v1alpha2.PodSchedulingContext, error) - Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (*v1alpha2.PodSchedulingContext, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.PodSchedulingContext, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.PodSchedulingContextList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) - Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) - PodSchedulingContextExpansion -} - -// podSchedulingContexts implements PodSchedulingContextInterface -type podSchedulingContexts struct { - client rest.Interface - ns string -} - -// newPodSchedulingContexts returns a PodSchedulingContexts -func newPodSchedulingContexts(c *ResourceV1alpha2Client, namespace string) *podSchedulingContexts { - return &podSchedulingContexts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *podSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *podSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.PodSchedulingContextList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.PodSchedulingContextList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *podSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *podSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *podSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha2.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(podSchedulingContext.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(podSchedulingContext). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *podSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("podschedulingcontexts"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *podSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.PodSchedulingContext, err error) { - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *podSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *podSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha2.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - - result = &v1alpha2.PodSchedulingContext{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("podschedulingcontexts"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go deleted file mode 100644 index 8e258b3e1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resource_client.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "net/http" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type ResourceV1alpha2Interface interface { - RESTClient() rest.Interface - PodSchedulingContextsGetter - ResourceClaimsGetter - ResourceClaimParametersGetter - ResourceClaimTemplatesGetter - ResourceClassesGetter - ResourceClassParametersGetter - ResourceSlicesGetter -} - -// ResourceV1alpha2Client is used to interact with features provided by the resource.k8s.io group. -type ResourceV1alpha2Client struct { - restClient rest.Interface -} - -func (c *ResourceV1alpha2Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { - return newPodSchedulingContexts(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceClaims(namespace string) ResourceClaimInterface { - return newResourceClaims(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceClaimParameters(namespace string) ResourceClaimParametersInterface { - return newResourceClaimParameters(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { - return newResourceClaimTemplates(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceClasses() ResourceClassInterface { - return newResourceClasses(c) -} - -func (c *ResourceV1alpha2Client) ResourceClassParameters(namespace string) ResourceClassParametersInterface { - return newResourceClassParameters(c, namespace) -} - -func (c *ResourceV1alpha2Client) ResourceSlices() ResourceSliceInterface { - return newResourceSlices(c) -} - -// NewForConfig creates a new ResourceV1alpha2Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*ResourceV1alpha2Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new ResourceV1alpha2Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha2Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &ResourceV1alpha2Client{client}, nil -} - -// NewForConfigOrDie creates a new ResourceV1alpha2Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha2Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ResourceV1alpha2Client for the given RESTClient. -func New(c rest.Interface) *ResourceV1alpha2Client { - return &ResourceV1alpha2Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha2.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ResourceV1alpha2Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index cfb27c9db..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimsGetter has a method to return a ResourceClaimInterface. -// A group's client should implement this interface. -type ResourceClaimsGetter interface { - ResourceClaims(namespace string) ResourceClaimInterface -} - -// ResourceClaimInterface has methods to work with ResourceClaim resources. -type ResourceClaimInterface interface { - Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (*v1alpha2.ResourceClaim, error) - Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (*v1alpha2.ResourceClaim, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) - Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) - ResourceClaimExpansion -} - -// resourceClaims implements ResourceClaimInterface -type resourceClaims struct { - client rest.Interface - ns string -} - -// newResourceClaims returns a ResourceClaims -func newResourceClaims(c *ResourceV1alpha2Client, namespace string) *resourceClaims { - return &resourceClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaim, and returns the corresponding resourceClaim object, and an error if there is any. -func (c *resourceClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaims that match those selectors. -func (c *resourceClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaims. -func (c *resourceClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaim and creates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Create(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.CreateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaim and updates it. Returns the server's representation of the resourceClaim, and an error, if there is any. -func (c *resourceClaims) Update(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resourceClaims) UpdateStatus(ctx context.Context, resourceClaim *v1alpha2.ResourceClaim, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(resourceClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaim and deletes it. Returns an error if one occurs. -func (c *resourceClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaim. -func (c *resourceClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaim, err error) { - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaim. -func (c *resourceClaims) Apply(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *resourceClaims) ApplyStatus(ctx context.Context, resourceClaim *resourcev1alpha2.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaim, err error) { - if resourceClaim == nil { - return nil, fmt.Errorf("resourceClaim provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaim) - if err != nil { - return nil, err - } - - name := resourceClaim.Name - if name == nil { - return nil, fmt.Errorf("resourceClaim.Name must be provided to Apply") - } - - result = &v1alpha2.ResourceClaim{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaims"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index d08afcb61..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimParametersGetter has a method to return a ResourceClaimParametersInterface. -// A group's client should implement this interface. -type ResourceClaimParametersGetter interface { - ResourceClaimParameters(namespace string) ResourceClaimParametersInterface -} - -// ResourceClaimParametersInterface has methods to work with ResourceClaimParameters resources. -type ResourceClaimParametersInterface interface { - Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClaimParameters, error) - Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimParameters, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimParameters, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimParametersList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) - Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) - ResourceClaimParametersExpansion -} - -// resourceClaimParameters implements ResourceClaimParametersInterface -type resourceClaimParameters struct { - client rest.Interface - ns string -} - -// newResourceClaimParameters returns a ResourceClaimParameters -func newResourceClaimParameters(c *ResourceV1alpha2Client, namespace string) *resourceClaimParameters { - return &resourceClaimParameters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaimParameters, and returns the corresponding resourceClaimParameters object, and an error if there is any. -func (c *resourceClaimParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaimParameters that match those selectors. -func (c *resourceClaimParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimParametersList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimParametersList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaimParameters. -func (c *resourceClaimParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaimParameters and creates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *resourceClaimParameters) Create(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimParameters). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaimParameters and updates it. Returns the server's representation of the resourceClaimParameters, and an error, if there is any. -func (c *resourceClaimParameters) Update(ctx context.Context, resourceClaimParameters *v1alpha2.ResourceClaimParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(resourceClaimParameters.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimParameters). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaimParameters and deletes it. Returns an error if one occurs. -func (c *resourceClaimParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaimParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimparameters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaimParameters. -func (c *resourceClaimParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimParameters, err error) { - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimParameters. -func (c *resourceClaimParameters) Apply(ctx context.Context, resourceClaimParameters *resourcev1alpha2.ResourceClaimParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimParameters, err error) { - if resourceClaimParameters == nil { - return nil, fmt.Errorf("resourceClaimParameters provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaimParameters) - if err != nil { - return nil, err - } - name := resourceClaimParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimParameters.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaimParameters{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaimparameters"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index 3f4e32006..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. -// A group's client should implement this interface. -type ResourceClaimTemplatesGetter interface { - ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface -} - -// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. -type ResourceClaimTemplateInterface interface { - Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (*v1alpha2.ResourceClaimTemplate, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClaimTemplate, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClaimTemplateList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) - Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) - ResourceClaimTemplateExpansion -} - -// resourceClaimTemplates implements ResourceClaimTemplateInterface -type resourceClaimTemplates struct { - client rest.Interface - ns string -} - -// newResourceClaimTemplates returns a ResourceClaimTemplates -func newResourceClaimTemplates(c *ResourceV1alpha2Client, namespace string) *resourceClaimTemplates { - return &resourceClaimTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClaimTemplate, and returns the corresponding resourceClaimTemplate object, and an error if there is any. -func (c *resourceClaimTemplates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClaimTemplates that match those selectors. -func (c *resourceClaimTemplates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClaimTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClaimTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClaimTemplates. -func (c *resourceClaimTemplates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClaimTemplate and creates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Create(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.CreateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClaimTemplate and updates it. Returns the server's representation of the resourceClaimTemplate, and an error, if there is any. -func (c *resourceClaimTemplates) Update(ctx context.Context, resourceClaimTemplate *v1alpha2.ResourceClaimTemplate, opts v1.UpdateOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(resourceClaimTemplate.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClaimTemplate). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClaimTemplate and deletes it. Returns an error if one occurs. -func (c *resourceClaimTemplates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClaimTemplates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClaimTemplate. -func (c *resourceClaimTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClaimTemplate, err error) { - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClaimTemplate. -func (c *resourceClaimTemplates) Apply(ctx context.Context, resourceClaimTemplate *resourcev1alpha2.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClaimTemplate, err error) { - if resourceClaimTemplate == nil { - return nil, fmt.Errorf("resourceClaimTemplate provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClaimTemplate) - if err != nil { - return nil, err - } - name := resourceClaimTemplate.Name - if name == nil { - return nil, fmt.Errorf("resourceClaimTemplate.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClaimTemplate{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclaimtemplates"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go deleted file mode 100644 index 95a4ac566..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClassesGetter has a method to return a ResourceClassInterface. -// A group's client should implement this interface. -type ResourceClassesGetter interface { - ResourceClasses() ResourceClassInterface -} - -// ResourceClassInterface has methods to work with ResourceClass resources. -type ResourceClassInterface interface { - Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (*v1alpha2.ResourceClass, error) - Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (*v1alpha2.ResourceClass, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) - Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) - ResourceClassExpansion -} - -// resourceClasses implements ResourceClassInterface -type resourceClasses struct { - client rest.Interface -} - -// newResourceClasses returns a ResourceClasses -func newResourceClasses(c *ResourceV1alpha2Client) *resourceClasses { - return &resourceClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the resourceClass, and returns the corresponding resourceClass object, and an error if there is any. -func (c *resourceClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Get(). - Resource("resourceclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClasses that match those selectors. -func (c *resourceClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClassList{} - err = c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClasses. -func (c *resourceClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClass and creates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Create(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.CreateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Post(). - Resource("resourceclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClass and updates it. Returns the server's representation of the resourceClass, and an error, if there is any. -func (c *resourceClasses) Update(ctx context.Context, resourceClass *v1alpha2.ResourceClass, opts v1.UpdateOptions) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Put(). - Resource("resourceclasses"). - Name(resourceClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClass and deletes it. Returns an error if one occurs. -func (c *resourceClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("resourceclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("resourceclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClass. -func (c *resourceClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClass, err error) { - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(pt). - Resource("resourceclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClass. -func (c *resourceClasses) Apply(ctx context.Context, resourceClass *resourcev1alpha2.ResourceClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClass, err error) { - if resourceClass == nil { - return nil, fmt.Errorf("resourceClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClass) - if err != nil { - return nil, err - } - name := resourceClass.Name - if name == nil { - return nil, fmt.Errorf("resourceClass.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("resourceclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 8ac9be078..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceClassParametersGetter has a method to return a ResourceClassParametersInterface. -// A group's client should implement this interface. -type ResourceClassParametersGetter interface { - ResourceClassParameters(namespace string) ResourceClassParametersInterface -} - -// ResourceClassParametersInterface has methods to work with ResourceClassParameters resources. -type ResourceClassParametersInterface interface { - Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (*v1alpha2.ResourceClassParameters, error) - Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (*v1alpha2.ResourceClassParameters, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceClassParameters, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceClassParametersList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) - Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) - ResourceClassParametersExpansion -} - -// resourceClassParameters implements ResourceClassParametersInterface -type resourceClassParameters struct { - client rest.Interface - ns string -} - -// newResourceClassParameters returns a ResourceClassParameters -func newResourceClassParameters(c *ResourceV1alpha2Client, namespace string) *resourceClassParameters { - return &resourceClassParameters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceClassParameters, and returns the corresponding resourceClassParameters object, and an error if there is any. -func (c *resourceClassParameters) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceClassParameters that match those selectors. -func (c *resourceClassParameters) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceClassParametersList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceClassParametersList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceClassParameters. -func (c *resourceClassParameters) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceClassParameters and creates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *resourceClassParameters) Create(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.CreateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClassParameters). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceClassParameters and updates it. Returns the server's representation of the resourceClassParameters, and an error, if there is any. -func (c *resourceClassParameters) Update(ctx context.Context, resourceClassParameters *v1alpha2.ResourceClassParameters, opts v1.UpdateOptions) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(resourceClassParameters.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceClassParameters). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceClassParameters and deletes it. Returns an error if one occurs. -func (c *resourceClassParameters) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceClassParameters) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("resourceclassparameters"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceClassParameters. -func (c *resourceClassParameters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceClassParameters, err error) { - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceClassParameters. -func (c *resourceClassParameters) Apply(ctx context.Context, resourceClassParameters *resourcev1alpha2.ResourceClassParametersApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceClassParameters, err error) { - if resourceClassParameters == nil { - return nil, fmt.Errorf("resourceClassParameters provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceClassParameters) - if err != nil { - return nil, err - } - name := resourceClassParameters.Name - if name == nil { - return nil, fmt.Errorf("resourceClassParameters.Name must be provided to Apply") - } - result = &v1alpha2.ResourceClassParameters{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("resourceclassparameters"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go deleted file mode 100644 index 302f370d5..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceslice.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha2 "k8s.io/api/resource/v1alpha2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha2 "k8s.io/client-go/applyconfigurations/resource/v1alpha2" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceSlicesGetter has a method to return a ResourceSliceInterface. -// A group's client should implement this interface. -type ResourceSlicesGetter interface { - ResourceSlices() ResourceSliceInterface -} - -// ResourceSliceInterface has methods to work with ResourceSlice resources. -type ResourceSliceInterface interface { - Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (*v1alpha2.ResourceSlice, error) - Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (*v1alpha2.ResourceSlice, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha2.ResourceSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha2.ResourceSliceList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) - Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) - ResourceSliceExpansion -} - -// resourceSlices implements ResourceSliceInterface -type resourceSlices struct { - client rest.Interface -} - -// newResourceSlices returns a ResourceSlices -func newResourceSlices(c *ResourceV1alpha2Client) *resourceSlices { - return &resourceSlices{ - client: c.RESTClient(), - } -} - -// Get takes name of the resourceSlice, and returns the corresponding resourceSlice object, and an error if there is any. -func (c *resourceSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Get(). - Resource("resourceslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceSlices that match those selectors. -func (c *resourceSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha2.ResourceSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha2.ResourceSliceList{} - err = c.client.Get(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceSlices. -func (c *resourceSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a resourceSlice and creates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *resourceSlices) Create(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.CreateOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Post(). - Resource("resourceslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a resourceSlice and updates it. Returns the server's representation of the resourceSlice, and an error, if there is any. -func (c *resourceSlices) Update(ctx context.Context, resourceSlice *v1alpha2.ResourceSlice, opts v1.UpdateOptions) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Put(). - Resource("resourceslices"). - Name(resourceSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(resourceSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the resourceSlice and deletes it. Returns an error if one occurs. -func (c *resourceSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("resourceslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("resourceslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched resourceSlice. -func (c *resourceSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha2.ResourceSlice, err error) { - result = &v1alpha2.ResourceSlice{} - err = c.client.Patch(pt). - Resource("resourceslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied resourceSlice. -func (c *resourceSlices) Apply(ctx context.Context, resourceSlice *resourcev1alpha2.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha2.ResourceSlice, err error) { - if resourceSlice == nil { - return nil, fmt.Errorf("resourceSlice provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(resourceSlice) - if err != nil { - return nil, err - } - name := resourceSlice.Name - if name == nil { - return nil, fmt.Errorf("resourceSlice.Name must be provided to Apply") - } - result = &v1alpha2.ResourceSlice{} - err = c.client.Patch(types.ApplyPatchType). - Resource("resourceslices"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..6cdf57c53 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.CreateOptions) (*resourcev1alpha3.DeviceClass, error) + Update(ctx context.Context, deviceClass *resourcev1alpha3.DeviceClass, opts v1.UpdateOptions) (*resourcev1alpha3.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1alpha3Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*resourcev1alpha3.DeviceClass, *resourcev1alpha3.DeviceClassList, *applyconfigurationsresourcev1alpha3.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1alpha3.DeviceClass { return &resourcev1alpha3.DeviceClass{} }, + func() *resourcev1alpha3.DeviceClassList { return &resourcev1alpha3.DeviceClassList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.DeviceClass](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go new file mode 100644 index 000000000..fdb23fd37 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go new file mode 100644 index 000000000..cd8862ea8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +type DeviceClassExpansion interface{} + +type ResourceClaimExpansion interface{} + +type ResourceClaimTemplateExpansion interface{} + +type ResourceSliceExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go new file mode 100644 index 000000000..acc9b97c2 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + http "net/http" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ResourceV1alpha3Interface interface { + RESTClient() rest.Interface + DeviceClassesGetter + ResourceClaimsGetter + ResourceClaimTemplatesGetter + ResourceSlicesGetter +} + +// ResourceV1alpha3Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1alpha3Client struct { + restClient rest.Interface +} + +func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) +} + +func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) +} + +func (c *ResourceV1alpha3Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { + return newResourceClaimTemplates(c, namespace) +} + +func (c *ResourceV1alpha3Client) ResourceSlices() ResourceSliceInterface { + return newResourceSlices(c) +} + +// NewForConfig creates a new ResourceV1alpha3Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResourceV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResourceV1alpha3Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResourceV1alpha3Client{client}, nil +} + +// NewForConfigOrDie creates a new ResourceV1alpha3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResourceV1alpha3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResourceV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1alpha3Client { + return &ResourceV1alpha3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := resourcev1alpha3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResourceV1alpha3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..a95ac56d3 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *resourcev1alpha3.ResourceClaim, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1alpha3Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaim, *resourcev1alpha3.ResourceClaimList, *applyconfigurationsresourcev1alpha3.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1alpha3.ResourceClaim { return &resourcev1alpha3.ResourceClaim{} }, + func() *resourcev1alpha3.ResourceClaimList { return &resourcev1alpha3.ResourceClaimList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaim](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..a8ba1f696 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *resourcev1alpha3.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1alpha3Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceClaimTemplate, *resourcev1alpha3.ResourceClaimTemplateList, *applyconfigurationsresourcev1alpha3.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1alpha3.ResourceClaimTemplate { return &resourcev1alpha3.ResourceClaimTemplate{} }, + func() *resourcev1alpha3.ResourceClaimTemplateList { + return &resourcev1alpha3.ResourceClaimTemplateList{} + }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceClaimTemplate](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..91dfce5ec --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + context "context" + + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.CreateOptions) (*resourcev1alpha3.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *resourcev1alpha3.ResourceSlice, opts v1.UpdateOptions) (*resourcev1alpha3.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1alpha3Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*resourcev1alpha3.ResourceSlice, *resourcev1alpha3.ResourceSliceList, *applyconfigurationsresourcev1alpha3.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1alpha3.ResourceSlice { return &resourcev1alpha3.ResourceSlice{} }, + func() *resourcev1alpha3.ResourceSliceList { return &resourcev1alpha3.ResourceSliceList{} }, + gentype.PrefersProtobuf[*resourcev1alpha3.ResourceSlice](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..e41416d39 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// DeviceClassesGetter has a method to return a DeviceClassInterface. +// A group's client should implement this interface. +type DeviceClassesGetter interface { + DeviceClasses() DeviceClassInterface +} + +// DeviceClassInterface has methods to work with DeviceClass resources. +type DeviceClassInterface interface { + Create(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.CreateOptions) (*resourcev1beta1.DeviceClass, error) + Update(ctx context.Context, deviceClass *resourcev1beta1.DeviceClass, opts v1.UpdateOptions) (*resourcev1beta1.DeviceClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.DeviceClass, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.DeviceClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.DeviceClass, err error) + Apply(ctx context.Context, deviceClass *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.DeviceClass, err error) + DeviceClassExpansion +} + +// deviceClasses implements DeviceClassInterface +type deviceClasses struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration] +} + +// newDeviceClasses returns a DeviceClasses +func newDeviceClasses(c *ResourceV1beta1Client) *deviceClasses { + return &deviceClasses{ + gentype.NewClientWithListAndApply[*resourcev1beta1.DeviceClass, *resourcev1beta1.DeviceClassList, *applyconfigurationsresourcev1beta1.DeviceClassApplyConfiguration]( + "deviceclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1beta1.DeviceClass { return &resourcev1beta1.DeviceClass{} }, + func() *resourcev1beta1.DeviceClassList { return &resourcev1beta1.DeviceClassList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.DeviceClass](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go new file mode 100644 index 000000000..771101956 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go new file mode 100644 index 000000000..d5fcfc214 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type DeviceClassExpansion interface{} + +type ResourceClaimExpansion interface{} + +type ResourceClaimTemplateExpansion interface{} + +type ResourceSliceExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go new file mode 100644 index 000000000..c6a3b2836 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + http "net/http" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ResourceV1beta1Interface interface { + RESTClient() rest.Interface + DeviceClassesGetter + ResourceClaimsGetter + ResourceClaimTemplatesGetter + ResourceSlicesGetter +} + +// ResourceV1beta1Client is used to interact with features provided by the resource.k8s.io group. +type ResourceV1beta1Client struct { + restClient rest.Interface +} + +func (c *ResourceV1beta1Client) DeviceClasses() DeviceClassInterface { + return newDeviceClasses(c) +} + +func (c *ResourceV1beta1Client) ResourceClaims(namespace string) ResourceClaimInterface { + return newResourceClaims(c, namespace) +} + +func (c *ResourceV1beta1Client) ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface { + return newResourceClaimTemplates(c, namespace) +} + +func (c *ResourceV1beta1Client) ResourceSlices() ResourceSliceInterface { + return newResourceSlices(c) +} + +// NewForConfig creates a new ResourceV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResourceV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResourceV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResourceV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResourceV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ResourceV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResourceV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResourceV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ResourceV1beta1Client { + return &ResourceV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := resourcev1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResourceV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..3172ab5df --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimsGetter has a method to return a ResourceClaimInterface. +// A group's client should implement this interface. +type ResourceClaimsGetter interface { + ResourceClaims(namespace string) ResourceClaimInterface +} + +// ResourceClaimInterface has methods to work with ResourceClaim resources. +type ResourceClaimInterface interface { + Create(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaim, error) + Update(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, resourceClaim *resourcev1beta1.ResourceClaim, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaim, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaim, err error) + Apply(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, resourceClaim *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaim, err error) + ResourceClaimExpansion +} + +// resourceClaims implements ResourceClaimInterface +type resourceClaims struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration] +} + +// newResourceClaims returns a ResourceClaims +func newResourceClaims(c *ResourceV1beta1Client, namespace string) *resourceClaims { + return &resourceClaims{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaim, *resourcev1beta1.ResourceClaimList, *applyconfigurationsresourcev1beta1.ResourceClaimApplyConfiguration]( + "resourceclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1beta1.ResourceClaim { return &resourcev1beta1.ResourceClaim{} }, + func() *resourcev1beta1.ResourceClaimList { return &resourcev1beta1.ResourceClaimList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaim](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..26c6fe829 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceClaimTemplatesGetter has a method to return a ResourceClaimTemplateInterface. +// A group's client should implement this interface. +type ResourceClaimTemplatesGetter interface { + ResourceClaimTemplates(namespace string) ResourceClaimTemplateInterface +} + +// ResourceClaimTemplateInterface has methods to work with ResourceClaimTemplate resources. +type ResourceClaimTemplateInterface interface { + Create(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.CreateOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + Update(ctx context.Context, resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, opts v1.UpdateOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceClaimTemplate, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceClaimTemplateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceClaimTemplate, err error) + Apply(ctx context.Context, resourceClaimTemplate *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceClaimTemplate, err error) + ResourceClaimTemplateExpansion +} + +// resourceClaimTemplates implements ResourceClaimTemplateInterface +type resourceClaimTemplates struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration] +} + +// newResourceClaimTemplates returns a ResourceClaimTemplates +func newResourceClaimTemplates(c *ResourceV1beta1Client, namespace string) *resourceClaimTemplates { + return &resourceClaimTemplates{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceClaimTemplate, *resourcev1beta1.ResourceClaimTemplateList, *applyconfigurationsresourcev1beta1.ResourceClaimTemplateApplyConfiguration]( + "resourceclaimtemplates", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *resourcev1beta1.ResourceClaimTemplate { return &resourcev1beta1.ResourceClaimTemplate{} }, + func() *resourcev1beta1.ResourceClaimTemplateList { return &resourcev1beta1.ResourceClaimTemplateList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceClaimTemplate](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..c4e985ea4 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + resourcev1beta1 "k8s.io/api/resource/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsresourcev1beta1 "k8s.io/client-go/applyconfigurations/resource/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// ResourceSlicesGetter has a method to return a ResourceSliceInterface. +// A group's client should implement this interface. +type ResourceSlicesGetter interface { + ResourceSlices() ResourceSliceInterface +} + +// ResourceSliceInterface has methods to work with ResourceSlice resources. +type ResourceSliceInterface interface { + Create(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.CreateOptions) (*resourcev1beta1.ResourceSlice, error) + Update(ctx context.Context, resourceSlice *resourcev1beta1.ResourceSlice, opts v1.UpdateOptions) (*resourcev1beta1.ResourceSlice, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1beta1.ResourceSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*resourcev1beta1.ResourceSliceList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1beta1.ResourceSlice, err error) + Apply(ctx context.Context, resourceSlice *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1beta1.ResourceSlice, err error) + ResourceSliceExpansion +} + +// resourceSlices implements ResourceSliceInterface +type resourceSlices struct { + *gentype.ClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration] +} + +// newResourceSlices returns a ResourceSlices +func newResourceSlices(c *ResourceV1beta1Client) *resourceSlices { + return &resourceSlices{ + gentype.NewClientWithListAndApply[*resourcev1beta1.ResourceSlice, *resourcev1beta1.ResourceSliceList, *applyconfigurationsresourcev1beta1.ResourceSliceApplyConfiguration]( + "resourceslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *resourcev1beta1.ResourceSlice { return &resourcev1beta1.ResourceSlice{} }, + func() *resourcev1beta1.ResourceSliceList { return &resourcev1beta1.ResourceSliceList{} }, + gentype.PrefersProtobuf[*resourcev1beta1.ResourceSlice](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go deleted file mode 100644 index 40ab9fb40..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" - testing "k8s.io/client-go/testing" -) - -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { - Fake *FakeSchedulingV1 -} - -var priorityclassesResource = v1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1.PriorityClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.PriorityClassList{ListMeta: obj.(*v1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.PriorityClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go deleted file mode 100644 index a64ac945b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeSchedulingV1 struct { - *testing.Fake -} - -func (c *FakeSchedulingV1) PriorityClasses() v1.PriorityClassInterface { - return &FakePriorityClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeSchedulingV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go index c68ec5da4..3642b404a 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/scheduling/v1" + schedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + applyconfigurationsschedulingv1 "k8s.io/client-go/applyconfigurations/scheduling/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -41,157 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.CreateOptions) (*schedulingv1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1.PriorityClass, opts metav1.UpdateOptions) (*schedulingv1.PriorityClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*schedulingv1.PriorityClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1.PriorityClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *schedulingv1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *schedulingv1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*schedulingv1.PriorityClass, *schedulingv1.PriorityClassList, *applyconfigurationsschedulingv1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *schedulingv1.PriorityClass { return &schedulingv1.PriorityClass{} }, + func() *schedulingv1.PriorityClassList { return &schedulingv1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1.PriorityClass](), + ), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1.PriorityClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go index 11fc4b9f3..bbb46a9de 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/scheduling/v1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1 "k8s.io/api/scheduling/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := schedulingv1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go deleted file mode 100644 index 3c8404a72..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { - Fake *FakeSchedulingV1alpha1 -} - -var priorityclassesResource = v1alpha1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1alpha1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PriorityClassList{ListMeta: obj.(*v1alpha1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1alpha1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1alpha1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1alpha1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PriorityClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go deleted file mode 100644 index 974ba193f..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeSchedulingV1alpha1 struct { - *testing.Fake -} - -func (c *FakeSchedulingV1alpha1) PriorityClasses() v1alpha1.PriorityClassInterface { - return &FakePriorityClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index a9b8c19c7..e7125f9fc 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -41,157 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.CreateOptions) (*schedulingv1alpha1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1alpha1.PriorityClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1alpha1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*schedulingv1alpha1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1alpha1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1alpha1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*schedulingv1alpha1.PriorityClass, *schedulingv1alpha1.PriorityClassList, *applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *schedulingv1alpha1.PriorityClass { return &schedulingv1alpha1.PriorityClass{} }, + func() *schedulingv1alpha1.PriorityClassList { return &schedulingv1alpha1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1alpha1.PriorityClass](), + ), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1alpha1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go index 47fb774a3..056ab855e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := schedulingv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go deleted file mode 100644 index 4cf2e26c7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/scheduling/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakePriorityClasses implements PriorityClassInterface -type FakePriorityClasses struct { - Fake *FakeSchedulingV1beta1 -} - -var priorityclassesResource = v1beta1.SchemeGroupVersion.WithResource("priorityclasses") - -var priorityclassesKind = v1beta1.SchemeGroupVersion.WithKind("PriorityClass") - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *FakePriorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *FakePriorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.PriorityClassList{ListMeta: obj.(*v1beta1.PriorityClassList).ListMeta} - for _, item := range obj.(*v1beta1.PriorityClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *FakePriorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts)) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *FakePriorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *FakePriorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(priorityclassesResource, name, opts), &v1beta1.PriorityClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePriorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{}) - return err -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *FakePriorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, pt, data, subresources...), &v1beta1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *FakePriorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, *name, types.ApplyPatchType, data), &v1beta1.PriorityClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.PriorityClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go deleted file mode 100644 index 4a6878a45..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeSchedulingV1beta1 struct { - *testing.Fake -} - -func (c *FakeSchedulingV1beta1) PriorityClasses() v1beta1.PriorityClassInterface { - return &FakePriorityClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeSchedulingV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 155476e4c..dcba291e3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/scheduling/v1beta1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - schedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + applyconfigurationsschedulingv1beta1 "k8s.io/client-go/applyconfigurations/scheduling/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // PriorityClassesGetter has a method to return a PriorityClassInterface. @@ -41,157 +38,34 @@ type PriorityClassesGetter interface { // PriorityClassInterface has methods to work with PriorityClass resources. type PriorityClassInterface interface { - Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error) - Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error) + Create(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.CreateOptions) (*schedulingv1beta1.PriorityClass, error) + Update(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClass, opts v1.UpdateOptions) (*schedulingv1beta1.PriorityClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1beta1.PriorityClass, error) + List(ctx context.Context, opts v1.ListOptions) (*schedulingv1beta1.PriorityClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) - Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1beta1.PriorityClass, err error) + Apply(ctx context.Context, priorityClass *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1beta1.PriorityClass, err error) PriorityClassExpansion } // priorityClasses implements PriorityClassInterface type priorityClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration] } // newPriorityClasses returns a PriorityClasses func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { return &priorityClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*schedulingv1beta1.PriorityClass, *schedulingv1beta1.PriorityClassList, *applyconfigurationsschedulingv1beta1.PriorityClassApplyConfiguration]( + "priorityclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *schedulingv1beta1.PriorityClass { return &schedulingv1beta1.PriorityClass{} }, + func() *schedulingv1beta1.PriorityClassList { return &schedulingv1beta1.PriorityClassList{} }, + gentype.PrefersProtobuf[*schedulingv1beta1.PriorityClass](), + ), } } - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(priorityClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied priorityClass. -func (c *priorityClasses) Apply(ctx context.Context, priorityClass *schedulingv1beta1.PriorityClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PriorityClass, err error) { - if priorityClass == nil { - return nil, fmt.Errorf("priorityClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(priorityClass) - if err != nil { - return nil, err - } - name := priorityClass.Name - if name == nil { - return nil, fmt.Errorf("priorityClass.Name must be provided to Apply") - } - result = &v1beta1.PriorityClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("priorityclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go index dbaf69414..9e383398e 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *SchedulingV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := schedulingv1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go index d9dc4151e..9eb82f9ed 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -41,157 +38,34 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (*v1.CSIDriver, error) - Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (*v1.CSIDriver, error) + Create(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.CreateOptions) (*storagev1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *storagev1.CSIDriver, opts metav1.UpdateOptions) (*storagev1.CSIDriver, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIDriver, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIDriverList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIDriver, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIDriverList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) - Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIDriver, err error) + Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIDriver, err error) CSIDriverExpansion } // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1.CSIDriver, *storagev1.CSIDriverList, *applyconfigurationsstoragev1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1.CSIDriver { return &storagev1.CSIDriver{} }, + func() *storagev1.CSIDriverList { return &storagev1.CSIDriverList{} }, + gentype.PrefersProtobuf[*storagev1.CSIDriver](), + ), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - result = &v1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go index 17dbc8c1c..a4fe6a0ee 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -41,157 +38,34 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error) - Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error) + Create(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.CreateOptions) (*storagev1.CSINode, error) + Update(ctx context.Context, cSINode *storagev1.CSINode, opts metav1.UpdateOptions) (*storagev1.CSINode, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSINode, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSINodeList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) - Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSINode, err error) + Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSINode, err error) CSINodeExpansion } // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1.CSINode, *storagev1.CSINodeList, *applyconfigurationsstoragev1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1.CSINode { return &storagev1.CSINode{} }, + func() *storagev1.CSINodeList { return &storagev1.CSINodeList{} }, + gentype.PrefersProtobuf[*storagev1.CSINode](), + ), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - result = &v1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go index 6bb50e0da..50a942978 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csistoragecapacity.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -41,168 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (*v1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (*v1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.CreateOptions) (*storagev1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacity, opts metav1.UpdateOptions) (*storagev1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIStorageCapacity, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.CSIStorageCapacity, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*storagev1.CSIStorageCapacity, *storagev1.CSIStorageCapacityList, *applyconfigurationsstoragev1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *storagev1.CSIStorageCapacity { return &storagev1.CSIStorageCapacity{} }, + func() *storagev1.CSIStorageCapacityList { return &storagev1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1.CSIStorageCapacity](), + ), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go deleted file mode 100644 index 498322737..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csidriver.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" -) - -// FakeCSIDrivers implements CSIDriverInterface -type FakeCSIDrivers struct { - Fake *FakeStorageV1 -} - -var csidriversResource = v1.SchemeGroupVersion.WithResource("csidrivers") - -var csidriversKind = v1.SchemeGroupVersion.WithKind("CSIDriver") - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSIDriver), err -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1.CSIDriverList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSIDriverList{ListMeta: obj.(*v1.CSIDriverList).ListMeta} - for _, item := range obj.(*v1.CSIDriverList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSIDriver), err -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSIDriver), err -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1.CSIDriver{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSIDriverList{}) - return err -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSIDriver), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1.CSIDriverApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSIDriver), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go deleted file mode 100644 index 0271a20f3..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csinode.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" -) - -// FakeCSINodes implements CSINodeInterface -type FakeCSINodes struct { - Fake *FakeStorageV1 -} - -var csinodesResource = v1.SchemeGroupVersion.WithResource("csinodes") - -var csinodesKind = v1.SchemeGroupVersion.WithKind("CSINode") - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSINode), err -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1.CSINodeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSINodeList{ListMeta: obj.(*v1.CSINodeList).ListMeta} - for _, item := range obj.(*v1.CSINodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSINode), err -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSINode), err -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1.CSINode{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSINodeList{}) - return err -} - -// Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSINode), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1.CSINodeApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1.CSINode), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go deleted file mode 100644 index b12bbe3c1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_csistoragecapacity.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" -) - -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { - Fake *FakeStorageV1 - ns string -} - -var csistoragecapacitiesResource = v1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIStorageCapacityList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1.CSIStorageCapacityList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.CSIStorageCapacityList{ListMeta: obj.(*v1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) - -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.CreateOptions) (result *v1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1.CSIStorageCapacity, opts metav1.UpdateOptions) (result *v1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1.CSIStorageCapacityApplyConfiguration, opts metav1.ApplyOptions) (result *v1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1.CSIStorageCapacity), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go deleted file mode 100644 index 5cb91b516..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/client-go/kubernetes/typed/storage/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeStorageV1 struct { - *testing.Fake -} - -func (c *FakeStorageV1) CSIDrivers() v1.CSIDriverInterface { - return &FakeCSIDrivers{c} -} - -func (c *FakeStorageV1) CSINodes() v1.CSINodeInterface { - return &FakeCSINodes{c} -} - -func (c *FakeStorageV1) CSIStorageCapacities(namespace string) v1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} -} - -func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface { - return &FakeStorageClasses{c} -} - -func (c *FakeStorageV1) VolumeAttachments() v1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeStorageV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go deleted file mode 100644 index e232f4c8d..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" -) - -// FakeStorageClasses implements StorageClassInterface -type FakeStorageClasses struct { - Fake *FakeStorageV1 -} - -var storageclassesResource = v1.SchemeGroupVersion.WithResource("storageclasses") - -var storageclassesKind = v1.SchemeGroupVersion.WithKind("StorageClass") - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1.StorageClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.StorageClassList{ListMeta: obj.(*v1.StorageClassList).ListMeta} - for _, item := range obj.(*v1.StorageClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1.StorageClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.StorageClassList{}) - return err -} - -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1.StorageClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go deleted file mode 100644 index 3f5f2aec5..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { - Fake *FakeStorageV1 -} - -var volumeattachmentsResource = v1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1.VolumeAttachmentList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1.VolumeAttachmentList{ListMeta: obj.(*v1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1.VolumeAttachment), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 750fe8b62..70aaff169 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - "net/http" + http "net/http" - v1 "k8s.io/api/storage/v1" - "k8s.io/client-go/kubernetes/scheme" + storagev1 "k8s.io/api/storage/v1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -105,10 +105,10 @@ func New(c rest.Interface) *StorageV1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := storagev1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 8e97d90a0..f33a351f1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -41,157 +38,34 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error) - Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error) + Create(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.CreateOptions) (*storagev1.StorageClass, error) + Update(ctx context.Context, storageClass *storagev1.StorageClass, opts metav1.UpdateOptions) (*storagev1.StorageClass, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.StorageClass, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.StorageClassList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) - Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.StorageClass, err error) + Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.StorageClass, err error) StorageClassExpansion } // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1.StorageClass, *storagev1.StorageClassList, *applyconfigurationsstoragev1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1.StorageClass { return &storagev1.StorageClass{} }, + func() *storagev1.StorageClassList { return &storagev1.StorageClassList{} }, + gentype.PrefersProtobuf[*storagev1.StorageClass](), + ), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1.StorageClassApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go index c1dbec84f..60db4844f 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -19,18 +19,15 @@ limitations under the License. package v1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1 "k8s.io/api/storage/v1" + storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1 "k8s.io/client-go/applyconfigurations/storage/v1" + applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -41,203 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) - UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.CreateOptions) (*storagev1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachment, opts metav1.UpdateOptions) (*storagev1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*storagev1.VolumeAttachment, error) + List(ctx context.Context, opts metav1.ListOptions) (*storagev1.VolumeAttachmentList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) - ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *storagev1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *storagev1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1.VolumeAttachment, *storagev1.VolumeAttachmentList, *applyconfigurationsstoragev1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1.VolumeAttachment { return &storagev1.VolumeAttachment{} }, + func() *storagev1.VolumeAttachmentList { return &storagev1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1.VolumeAttachment](), + ), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1.VolumeAttachmentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go index bf5d64ddd..63ca27fa4 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/csistoragecapacity.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -41,168 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*v1alpha1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1alpha1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1alpha1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1alpha1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CSIStorageCapacity, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.CSIStorageCapacity, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1alpha1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*storagev1alpha1.CSIStorageCapacity, *storagev1alpha1.CSIStorageCapacityList, *applyconfigurationsstoragev1alpha1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *storagev1alpha1.CSIStorageCapacity { return &storagev1alpha1.CSIStorageCapacity{} }, + func() *storagev1alpha1.CSIStorageCapacityList { return &storagev1alpha1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.CSIStorageCapacity](), + ), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1alpha1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go deleted file mode 100644 index c1614cda7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_csistoragecapacity.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { - Fake *FakeStorageV1alpha1 - ns string -} - -var csistoragecapacitiesResource = v1alpha1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1alpha1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1alpha1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CSIStorageCapacityList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1alpha1.CSIStorageCapacityList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.CSIStorageCapacityList{ListMeta: obj.(*v1alpha1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1alpha1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) - -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1alpha1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1alpha1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1alpha1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1alpha1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1alpha1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.CSIStorageCapacity), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go deleted file mode 100644 index 0e078f348..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeStorageV1alpha1 struct { - *testing.Fake -} - -func (c *FakeStorageV1alpha1) CSIStorageCapacities(namespace string) v1alpha1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} -} - -func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} -} - -func (c *FakeStorageV1alpha1) VolumeAttributesClasses() v1alpha1.VolumeAttributesClassInterface { - return &FakeVolumeAttributesClasses{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeStorageV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go deleted file mode 100644 index 9725d6d10..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { - Fake *FakeStorageV1alpha1 -} - -var volumeattachmentsResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VolumeAttachmentList{ListMeta: obj.(*v1alpha1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1alpha1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1alpha1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttachment), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go deleted file mode 100644 index d25263df4..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattributesclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeAttributesClasses implements VolumeAttributesClassInterface -type FakeVolumeAttributesClasses struct { - Fake *FakeStorageV1alpha1 -} - -var volumeattributesclassesResource = v1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses") - -var volumeattributesclassesKind = v1alpha1.SchemeGroupVersion.WithKind("VolumeAttributesClass") - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *FakeVolumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattributesclassesResource, name), &v1alpha1.VolumeAttributesClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *FakeVolumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattributesclassesResource, volumeattributesclassesKind, opts), &v1alpha1.VolumeAttributesClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.VolumeAttributesClassList{ListMeta: obj.(*v1alpha1.VolumeAttributesClassList).ListMeta} - for _, item := range obj.(*v1alpha1.VolumeAttributesClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *FakeVolumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattributesclassesResource, opts)) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *FakeVolumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattributesclassesResource, volumeAttributesClass), &v1alpha1.VolumeAttributesClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattributesclassesResource, name, opts), &v1alpha1.VolumeAttributesClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattributesclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttributesClassList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, name, pt, data, subresources...), &v1alpha1.VolumeAttributesClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *FakeVolumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattributesclassesResource, *name, types.ApplyPatchType, data), &v1alpha1.VolumeAttributesClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.VolumeAttributesClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 63e3fc243..17b680d19 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -95,10 +95,10 @@ func New(c rest.Interface) *StorageV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := storagev1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 58abb748f..d9c24ab5b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -41,203 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) - UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) - ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttachment, *storagev1alpha1.VolumeAttachmentList, *applyconfigurationsstoragev1alpha1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1alpha1.VolumeAttachment { return &storagev1alpha1.VolumeAttachment{} }, + func() *storagev1alpha1.VolumeAttachmentList { return &storagev1alpha1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttachment](), + ), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1alpha1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go index 6633a4dc1..ef7d6f4ba 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. @@ -41,157 +38,34 @@ type VolumeAttributesClassesGetter interface { // VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. type VolumeAttributesClassInterface interface { - Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) - Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) + Create(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1alpha1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1alpha1.VolumeAttributesClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1alpha1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1alpha1.VolumeAttributesClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) - Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1alpha1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1alpha1.VolumeAttributesClass, err error) VolumeAttributesClassExpansion } // volumeAttributesClasses implements VolumeAttributesClassInterface type volumeAttributesClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration] } // newVolumeAttributesClasses returns a VolumeAttributesClasses func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { return &volumeAttributesClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1alpha1.VolumeAttributesClass, *storagev1alpha1.VolumeAttributesClassList, *applyconfigurationsstoragev1alpha1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1alpha1.VolumeAttributesClass { return &storagev1alpha1.VolumeAttributesClass{} }, + func() *storagev1alpha1.VolumeAttributesClassList { return &storagev1alpha1.VolumeAttributesClassList{} }, + gentype.PrefersProtobuf[*storagev1alpha1.VolumeAttributesClass](), + ), } } - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttributesClassList{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Post(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Put(). - Resource("volumeattributesclasses"). - Name(volumeAttributesClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattributesclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattributesclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(pt). - Resource("volumeattributesclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattributesclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go index 04e677db0..063fdb8d1 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIDriversGetter has a method to return a CSIDriverInterface. @@ -41,157 +38,34 @@ type CSIDriversGetter interface { // CSIDriverInterface has methods to work with CSIDriver resources. type CSIDriverInterface interface { - Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error) - Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error) + Create(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.CreateOptions) (*storagev1beta1.CSIDriver, error) + Update(ctx context.Context, cSIDriver *storagev1beta1.CSIDriver, opts v1.UpdateOptions) (*storagev1beta1.CSIDriver, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIDriver, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIDriverList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) - Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIDriver, err error) + Apply(ctx context.Context, cSIDriver *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIDriver, err error) CSIDriverExpansion } // cSIDrivers implements CSIDriverInterface type cSIDrivers struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration] } // newCSIDrivers returns a CSIDrivers func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { return &cSIDrivers{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1beta1.CSIDriver, *storagev1beta1.CSIDriverList, *applyconfigurationsstoragev1beta1.CSIDriverApplyConfiguration]( + "csidrivers", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1beta1.CSIDriver { return &storagev1beta1.CSIDriver{} }, + func() *storagev1beta1.CSIDriverList { return &storagev1beta1.CSIDriverList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSIDriver](), + ), } } - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIDriver). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *cSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - result = &v1beta1.CSIDriver{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csidrivers"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go index c3760b5ce..5e8eb2e37 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSINodesGetter has a method to return a CSINodeInterface. @@ -41,157 +38,34 @@ type CSINodesGetter interface { // CSINodeInterface has methods to work with CSINode resources. type CSINodeInterface interface { - Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error) - Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error) + Create(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.CreateOptions) (*storagev1beta1.CSINode, error) + Update(ctx context.Context, cSINode *storagev1beta1.CSINode, opts v1.UpdateOptions) (*storagev1beta1.CSINode, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSINode, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSINodeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) - Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSINode, err error) + Apply(ctx context.Context, cSINode *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSINode, err error) CSINodeExpansion } // cSINodes implements CSINodeInterface type cSINodes struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration] } // newCSINodes returns a CSINodes func newCSINodes(c *StorageV1beta1Client) *cSINodes { return &cSINodes{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1beta1.CSINode, *storagev1beta1.CSINodeList, *applyconfigurationsstoragev1beta1.CSINodeApplyConfiguration]( + "csinodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1beta1.CSINode { return &storagev1beta1.CSINode{} }, + func() *storagev1beta1.CSINodeList { return &storagev1beta1.CSINodeList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSINode](), + ), } } - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSINode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *cSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - result = &v1beta1.CSINode{} - err = c.client.Patch(types.ApplyPatchType). - Resource("csinodes"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go index 98ba936dc..d1f5a7029 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csistoragecapacity.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // CSIStorageCapacitiesGetter has a method to return a CSIStorageCapacityInterface. @@ -41,168 +38,34 @@ type CSIStorageCapacitiesGetter interface { // CSIStorageCapacityInterface has methods to work with CSIStorageCapacity resources. type CSIStorageCapacityInterface interface { - Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*v1beta1.CSIStorageCapacity, error) - Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*v1beta1.CSIStorageCapacity, error) + Create(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.CreateOptions) (*storagev1beta1.CSIStorageCapacity, error) + Update(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (*storagev1beta1.CSIStorageCapacity, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIStorageCapacity, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIStorageCapacityList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.CSIStorageCapacity, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.CSIStorageCapacityList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) - Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.CSIStorageCapacity, err error) + Apply(ctx context.Context, cSIStorageCapacity *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.CSIStorageCapacity, err error) CSIStorageCapacityExpansion } // cSIStorageCapacities implements CSIStorageCapacityInterface type cSIStorageCapacities struct { - client rest.Interface - ns string + *gentype.ClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration] } // newCSIStorageCapacities returns a CSIStorageCapacities func newCSIStorageCapacities(c *StorageV1beta1Client, namespace string) *cSIStorageCapacities { return &cSIStorageCapacities{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithListAndApply[*storagev1beta1.CSIStorageCapacity, *storagev1beta1.CSIStorageCapacityList, *applyconfigurationsstoragev1beta1.CSIStorageCapacityApplyConfiguration]( + "csistoragecapacities", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *storagev1beta1.CSIStorageCapacity { return &storagev1beta1.CSIStorageCapacity{} }, + func() *storagev1beta1.CSIStorageCapacityList { return &storagev1beta1.CSIStorageCapacityList{} }, + gentype.PrefersProtobuf[*storagev1beta1.CSIStorageCapacity](), + ), } } - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *cSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *cSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIStorageCapacityList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *cSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Post(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *cSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Put(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(cSIStorageCapacity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(cSIStorageCapacity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *cSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("csistoragecapacities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *cSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *cSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - result = &v1beta1.CSIStorageCapacity{} - err = c.client.Patch(types.ApplyPatchType). - Namespace(c.ns). - Resource("csistoragecapacities"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go deleted file mode 100644 index 4257aa618..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeCSIDrivers implements CSIDriverInterface -type FakeCSIDrivers struct { - Fake *FakeStorageV1beta1 -} - -var csidriversResource = v1beta1.SchemeGroupVersion.WithResource("csidrivers") - -var csidriversKind = v1beta1.SchemeGroupVersion.WithKind("CSIDriver") - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *FakeCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csidriversResource, name), &v1beta1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *FakeCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(csidriversResource, csidriversKind, opts), &v1beta1.CSIDriverList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSIDriverList{ListMeta: obj.(*v1beta1.CSIDriverList).ListMeta} - for _, item := range obj.(*v1beta1.CSIDriverList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *FakeCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csidriversResource, opts)) -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *FakeCSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csidriversResource, cSIDriver), &v1beta1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *FakeCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csidriversResource, name, opts), &v1beta1.CSIDriver{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csidriversResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSIDriverList{}) - return err -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *FakeCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, name, pt, data, subresources...), &v1beta1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIDriver), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIDriver. -func (c *FakeCSIDrivers) Apply(ctx context.Context, cSIDriver *storagev1beta1.CSIDriverApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIDriver, err error) { - if cSIDriver == nil { - return nil, fmt.Errorf("cSIDriver provided to Apply must not be nil") - } - data, err := json.Marshal(cSIDriver) - if err != nil { - return nil, err - } - name := cSIDriver.Name - if name == nil { - return nil, fmt.Errorf("cSIDriver.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csidriversResource, *name, types.ApplyPatchType, data), &v1beta1.CSIDriver{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIDriver), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go deleted file mode 100644 index d38c104bc..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeCSINodes implements CSINodeInterface -type FakeCSINodes struct { - Fake *FakeStorageV1beta1 -} - -var csinodesResource = v1beta1.SchemeGroupVersion.WithResource("csinodes") - -var csinodesKind = v1beta1.SchemeGroupVersion.WithKind("CSINode") - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *FakeCSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(csinodesResource, name), &v1beta1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSINode), err -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *FakeCSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(csinodesResource, csinodesKind, opts), &v1beta1.CSINodeList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSINodeList{ListMeta: obj.(*v1beta1.CSINodeList).ListMeta} - for _, item := range obj.(*v1beta1.CSINodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *FakeCSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(csinodesResource, opts)) -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSINode), err -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *FakeCSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(csinodesResource, cSINode), &v1beta1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSINode), err -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *FakeCSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(csinodesResource, name, opts), &v1beta1.CSINode{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(csinodesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSINodeList{}) - return err -} - -// Patch applies the patch and returns the patched cSINode. -func (c *FakeCSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, name, pt, data, subresources...), &v1beta1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSINode), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSINode. -func (c *FakeCSINodes) Apply(ctx context.Context, cSINode *storagev1beta1.CSINodeApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSINode, err error) { - if cSINode == nil { - return nil, fmt.Errorf("cSINode provided to Apply must not be nil") - } - data, err := json.Marshal(cSINode) - if err != nil { - return nil, err - } - name := cSINode.Name - if name == nil { - return nil, fmt.Errorf("cSINode.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(csinodesResource, *name, types.ApplyPatchType, data), &v1beta1.CSINode{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSINode), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go deleted file mode 100644 index d7bbb614b..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csistoragecapacity.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeCSIStorageCapacities implements CSIStorageCapacityInterface -type FakeCSIStorageCapacities struct { - Fake *FakeStorageV1beta1 - ns string -} - -var csistoragecapacitiesResource = v1beta1.SchemeGroupVersion.WithResource("csistoragecapacities") - -var csistoragecapacitiesKind = v1beta1.SchemeGroupVersion.WithKind("CSIStorageCapacity") - -// Get takes name of the cSIStorageCapacity, and returns the corresponding cSIStorageCapacity object, and an error if there is any. -func (c *FakeCSIStorageCapacities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(csistoragecapacitiesResource, c.ns, name), &v1beta1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// List takes label and field selectors, and returns the list of CSIStorageCapacities that match those selectors. -func (c *FakeCSIStorageCapacities) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIStorageCapacityList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(csistoragecapacitiesResource, csistoragecapacitiesKind, c.ns, opts), &v1beta1.CSIStorageCapacityList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.CSIStorageCapacityList{ListMeta: obj.(*v1beta1.CSIStorageCapacityList).ListMeta} - for _, item := range obj.(*v1beta1.CSIStorageCapacityList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested cSIStorageCapacities. -func (c *FakeCSIStorageCapacities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(csistoragecapacitiesResource, c.ns, opts)) - -} - -// Create takes the representation of a cSIStorageCapacity and creates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Create(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.CreateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Update takes the representation of a cSIStorageCapacity and updates it. Returns the server's representation of the cSIStorageCapacity, and an error, if there is any. -func (c *FakeCSIStorageCapacities) Update(ctx context.Context, cSIStorageCapacity *v1beta1.CSIStorageCapacity, opts v1.UpdateOptions) (result *v1beta1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(csistoragecapacitiesResource, c.ns, cSIStorageCapacity), &v1beta1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Delete takes name of the cSIStorageCapacity and deletes it. Returns an error if one occurs. -func (c *FakeCSIStorageCapacities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(csistoragecapacitiesResource, c.ns, name, opts), &v1beta1.CSIStorageCapacity{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeCSIStorageCapacities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(csistoragecapacitiesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.CSIStorageCapacityList{}) - return err -} - -// Patch applies the patch and returns the patched cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIStorageCapacity, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, name, pt, data, subresources...), &v1beta1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied cSIStorageCapacity. -func (c *FakeCSIStorageCapacities) Apply(ctx context.Context, cSIStorageCapacity *storagev1beta1.CSIStorageCapacityApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CSIStorageCapacity, err error) { - if cSIStorageCapacity == nil { - return nil, fmt.Errorf("cSIStorageCapacity provided to Apply must not be nil") - } - data, err := json.Marshal(cSIStorageCapacity) - if err != nil { - return nil, err - } - name := cSIStorageCapacity.Name - if name == nil { - return nil, fmt.Errorf("cSIStorageCapacity.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(csistoragecapacitiesResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CSIStorageCapacity{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.CSIStorageCapacity), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go deleted file mode 100644 index 6b5bb02fd..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeStorageV1beta1 struct { - *testing.Fake -} - -func (c *FakeStorageV1beta1) CSIDrivers() v1beta1.CSIDriverInterface { - return &FakeCSIDrivers{c} -} - -func (c *FakeStorageV1beta1) CSINodes() v1beta1.CSINodeInterface { - return &FakeCSINodes{c} -} - -func (c *FakeStorageV1beta1) CSIStorageCapacities(namespace string) v1beta1.CSIStorageCapacityInterface { - return &FakeCSIStorageCapacities{c, namespace} -} - -func (c *FakeStorageV1beta1) StorageClasses() v1beta1.StorageClassInterface { - return &FakeStorageClasses{c} -} - -func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterface { - return &FakeVolumeAttachments{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeStorageV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go deleted file mode 100644 index 869e58b4f..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeStorageClasses implements StorageClassInterface -type FakeStorageClasses struct { - Fake *FakeStorageV1beta1 -} - -var storageclassesResource = v1beta1.SchemeGroupVersion.WithResource("storageclasses") - -var storageclassesKind = v1beta1.SchemeGroupVersion.WithKind("StorageClass") - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *FakeStorageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *FakeStorageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.StorageClassList{ListMeta: obj.(*v1beta1.StorageClassList).ListMeta} - for _, item := range obj.(*v1beta1.StorageClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *FakeStorageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts)) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *FakeStorageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *FakeStorageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageclassesResource, name, opts), &v1beta1.StorageClass{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{}) - return err -} - -// Patch applies the patch and returns the patched storageClass. -func (c *FakeStorageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, pt, data, subresources...), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *FakeStorageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, *name, types.ApplyPatchType, data), &v1beta1.StorageClass{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.StorageClass), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go deleted file mode 100644 index e2b4a2eb1..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" - testing "k8s.io/client-go/testing" -) - -// FakeVolumeAttachments implements VolumeAttachmentInterface -type FakeVolumeAttachments struct { - Fake *FakeStorageV1beta1 -} - -var volumeattachmentsResource = v1beta1.SchemeGroupVersion.WithResource("volumeattachments") - -var volumeattachmentsKind = v1beta1.SchemeGroupVersion.WithKind("VolumeAttachment") - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *FakeVolumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *FakeVolumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VolumeAttachmentList{ListMeta: obj.(*v1beta1.VolumeAttachmentList).ListMeta} - for _, item := range obj.(*v1beta1.VolumeAttachmentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *FakeVolumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts)) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *FakeVolumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeVolumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *FakeVolumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(volumeattachmentsResource, name, opts), &v1beta1.VolumeAttachment{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVolumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{}) - return err -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *FakeVolumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, pt, data, subresources...), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *FakeVolumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeVolumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, *name, types.ApplyPatchType, data, "status"), &v1beta1.VolumeAttachment{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VolumeAttachment), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 1a202a928..ebf78e10b 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -27,3 +27,5 @@ type CSIStorageCapacityExpansion interface{} type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} + +type VolumeAttributesClassExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4c7604bd2..63b1d42a3 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - "net/http" + http "net/http" - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/client-go/kubernetes/scheme" + storagev1beta1 "k8s.io/api/storage/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -33,6 +33,7 @@ type StorageV1beta1Interface interface { CSIStorageCapacitiesGetter StorageClassesGetter VolumeAttachmentsGetter + VolumeAttributesClassesGetter } // StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. @@ -60,6 +61,10 @@ func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } +func (c *StorageV1beta1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { + return newVolumeAttributesClasses(c) +} + // NewForConfig creates a new StorageV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). @@ -105,10 +110,10 @@ func New(c rest.Interface) *StorageV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := storagev1beta1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 9b4ef231c..341d5ba82 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageClassesGetter has a method to return a StorageClassInterface. @@ -41,157 +38,34 @@ type StorageClassesGetter interface { // StorageClassInterface has methods to work with StorageClass resources. type StorageClassInterface interface { - Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error) - Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error) + Create(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.CreateOptions) (*storagev1beta1.StorageClass, error) + Update(ctx context.Context, storageClass *storagev1beta1.StorageClass, opts v1.UpdateOptions) (*storagev1beta1.StorageClass, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.StorageClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.StorageClassList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) - Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.StorageClass, err error) + Apply(ctx context.Context, storageClass *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.StorageClass, err error) StorageClassExpansion } // storageClasses implements StorageClassInterface type storageClasses struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration] } // newStorageClasses returns a StorageClasses func newStorageClasses(c *StorageV1beta1Client) *storageClasses { return &storageClasses{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1beta1.StorageClass, *storagev1beta1.StorageClassList, *applyconfigurationsstoragev1beta1.StorageClassApplyConfiguration]( + "storageclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1beta1.StorageClass { return &storagev1beta1.StorageClass{} }, + func() *storagev1beta1.StorageClassList { return &storagev1beta1.StorageClassList{} }, + gentype.PrefersProtobuf[*storagev1beta1.StorageClass](), + ), } } - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageClass. -func (c *storageClasses) Apply(ctx context.Context, storageClass *storagev1beta1.StorageClassApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.StorageClass, err error) { - if storageClass == nil { - return nil, fmt.Errorf("storageClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageClass) - if err != nil { - return nil, err - } - name := storageClass.Name - if name == nil { - return nil, fmt.Errorf("storageClass.Name must be provided to Apply") - } - result = &v1beta1.StorageClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index 35a8b64fc..42c1bd7e0 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,18 +19,15 @@ limitations under the License. package v1beta1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1beta1 "k8s.io/api/storage/v1beta1" + storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. @@ -41,203 +38,38 @@ type VolumeAttachmentsGetter interface { // VolumeAttachmentInterface has methods to work with VolumeAttachment resources. type VolumeAttachmentInterface interface { - Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error) - Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) - UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error) + Create(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.CreateOptions) (*storagev1beta1.VolumeAttachment, error) + Update(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachment, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttachment, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttachment, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttachmentList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) - Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) - ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttachment, err error) + Apply(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, volumeAttachment *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttachment, err error) VolumeAttachmentExpansion } // volumeAttachments implements VolumeAttachmentInterface type volumeAttachments struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration] } // newVolumeAttachments returns a VolumeAttachments func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { return &volumeAttachments{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttachment, *storagev1beta1.VolumeAttachmentList, *applyconfigurationsstoragev1beta1.VolumeAttachmentApplyConfiguration]( + "volumeattachments", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1beta1.VolumeAttachment { return &storagev1beta1.VolumeAttachment{} }, + func() *storagev1beta1.VolumeAttachmentList { return &storagev1beta1.VolumeAttachmentList{} }, + gentype.PrefersProtobuf[*storagev1beta1.VolumeAttachment](), + ), } } - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttachment). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttachment. -func (c *volumeAttachments) Apply(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *volumeAttachments) ApplyStatus(ctx context.Context, volumeAttachment *storagev1beta1.VolumeAttachmentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.VolumeAttachment, err error) { - if volumeAttachment == nil { - return nil, fmt.Errorf("volumeAttachment provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttachment) - if err != nil { - return nil, err - } - - name := volumeAttachment.Name - if name == nil { - return nil, fmt.Errorf("volumeAttachment.Name must be provided to Apply") - } - - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattachments"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..09f9f1178 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1" + gentype "k8s.io/client-go/gentype" + scheme "k8s.io/client-go/kubernetes/scheme" +) + +// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. +// A group's client should implement this interface. +type VolumeAttributesClassesGetter interface { + VolumeAttributesClasses() VolumeAttributesClassInterface +} + +// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. +type VolumeAttributesClassInterface interface { + Create(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.CreateOptions) (*storagev1beta1.VolumeAttributesClass, error) + Update(ctx context.Context, volumeAttributesClass *storagev1beta1.VolumeAttributesClass, opts v1.UpdateOptions) (*storagev1beta1.VolumeAttributesClass, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagev1beta1.VolumeAttributesClass, error) + List(ctx context.Context, opts v1.ListOptions) (*storagev1beta1.VolumeAttributesClassList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagev1beta1.VolumeAttributesClass, err error) + Apply(ctx context.Context, volumeAttributesClass *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *storagev1beta1.VolumeAttributesClass, err error) + VolumeAttributesClassExpansion +} + +// volumeAttributesClasses implements VolumeAttributesClassInterface +type volumeAttributesClasses struct { + *gentype.ClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration] +} + +// newVolumeAttributesClasses returns a VolumeAttributesClasses +func newVolumeAttributesClasses(c *StorageV1beta1Client) *volumeAttributesClasses { + return &volumeAttributesClasses{ + gentype.NewClientWithListAndApply[*storagev1beta1.VolumeAttributesClass, *storagev1beta1.VolumeAttributesClassList, *applyconfigurationsstoragev1beta1.VolumeAttributesClassApplyConfiguration]( + "volumeattributesclasses", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagev1beta1.VolumeAttributesClass { return &storagev1beta1.VolumeAttributesClass{} }, + func() *storagev1beta1.VolumeAttributesClassList { return &storagev1beta1.VolumeAttributesClassList{} }, + gentype.PrefersProtobuf[*storagev1beta1.VolumeAttributesClass](), + ), + } +} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go deleted file mode 100644 index 3ae8f4ae5..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeStoragemigrationV1alpha1 struct { - *testing.Fake -} - -func (c *FakeStoragemigrationV1alpha1) StorageVersionMigrations() v1alpha1.StorageVersionMigrationInterface { - return &FakeStorageVersionMigrations{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeStoragemigrationV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go deleted file mode 100644 index 9b5da88c7..000000000 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeStorageVersionMigrations implements StorageVersionMigrationInterface -type FakeStorageVersionMigrations struct { - Fake *FakeStoragemigrationV1alpha1 -} - -var storageversionmigrationsResource = v1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations") - -var storageversionmigrationsKind = v1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration") - -// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. -func (c *FakeStorageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(storageversionmigrationsResource, name), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. -func (c *FakeStorageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(storageversionmigrationsResource, storageversionmigrationsKind, opts), &v1alpha1.StorageVersionMigrationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.StorageVersionMigrationList{ListMeta: obj.(*v1alpha1.StorageVersionMigrationList).ListMeta} - for _, item := range obj.(*v1alpha1.StorageVersionMigrationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested storageVersionMigrations. -func (c *FakeStorageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(storageversionmigrationsResource, opts)) -} - -// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *FakeStorageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *FakeStorageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(storageversionmigrationsResource, storageVersionMigration), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeStorageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(storageversionmigrationsResource, "status", storageVersionMigration), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. -func (c *FakeStorageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(storageversionmigrationsResource, name, opts), &v1alpha1.StorageVersionMigration{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeStorageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(storageversionmigrationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.StorageVersionMigrationList{}) - return err -} - -// Patch applies the patch and returns the patched storageVersionMigration. -func (c *FakeStorageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, name, pt, data, subresources...), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration. -func (c *FakeStorageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeStorageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(storageversionmigrationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.StorageVersionMigration{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.StorageVersionMigration), err -} diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go index 613e45355..dcd5a4bf8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *StoragemigrationV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := storagemigrationv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go index be66a5b94..5c6981ec8 100644 --- a/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go +++ b/constraint/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go @@ -19,18 +19,15 @@ limitations under the License. package v1alpha1 import ( - "context" - json "encoding/json" - "fmt" - "time" + context "context" - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1" + gentype "k8s.io/client-go/gentype" scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" ) // StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. @@ -41,203 +38,42 @@ type StorageVersionMigrationsGetter interface { // StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. type StorageVersionMigrationInterface interface { - Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) - Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) - UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Create(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*storagemigrationv1alpha1.StorageVersionMigrationList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) - Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) - ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) + Apply(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) + // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). + ApplyStatus(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error) StorageVersionMigrationExpansion } // storageVersionMigrations implements StorageVersionMigrationInterface type storageVersionMigrations struct { - client rest.Interface + *gentype.ClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration] } // newStorageVersionMigrations returns a StorageVersionMigrations func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations { return &storageVersionMigrations{ - client: c.RESTClient(), + gentype.NewClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]( + "storageversionmigrations", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *storagemigrationv1alpha1.StorageVersionMigration { + return &storagemigrationv1alpha1.StorageVersionMigration{} + }, + func() *storagemigrationv1alpha1.StorageVersionMigrationList { + return &storagemigrationv1alpha1.StorageVersionMigrationList{} + }, + gentype.PrefersProtobuf[*storagemigrationv1alpha1.StorageVersionMigration](), + ), } } - -// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. -func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Get(). - Resource("storageversionmigrations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. -func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.StorageVersionMigrationList{} - err = c.client.Get(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageVersionMigrations. -func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Post(). - Resource("storageversionmigrations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. -func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Put(). - Resource("storageversionmigrations"). - Name(storageVersionMigration.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Put(). - Resource("storageversionmigrations"). - Name(storageVersionMigration.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(storageVersionMigration). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. -func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageversionmigrations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("storageversionmigrations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched storageVersionMigration. -func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(pt). - Resource("storageversionmigrations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied storageVersionMigration. -func (c *storageVersionMigrations) Apply(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversionmigrations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *storageVersionMigrations) ApplyStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.StorageVersionMigration, err error) { - if storageVersionMigration == nil { - return nil, fmt.Errorf("storageVersionMigration provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(storageVersionMigration) - if err != nil { - return nil, err - } - - name := storageVersionMigration.Name - if name == nil { - return nil, fmt.Errorf("storageVersionMigration.Name must be provided to Apply") - } - - result = &v1alpha1.StorageVersionMigration{} - err = c.client.Patch(types.ApplyPatchType). - Resource("storageversionmigrations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go index fe9e27985..b3efc72a4 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/mutatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. @@ -30,39 +30,19 @@ import ( type MutatingWebhookConfigurationLister interface { // List lists all MutatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.MutatingWebhookConfiguration, err error) // Get retrieves the MutatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.MutatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1.MutatingWebhookConfiguration, error) MutatingWebhookConfigurationListerExpansion } // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*admissionregistrationv1.MutatingWebhookConfiguration](indexer, admissionregistrationv1.Resource("mutatingwebhookconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go index fff072f4c..07a181cd4 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1.Resource("validatingadmissionpolicy"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go index 07856981e..131eb12b2 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1.Resource("validatingadmissionpolicybinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go index 1579a0ebb..50e1624c9 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/admissionregistration/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. @@ -30,39 +30,19 @@ import ( type ValidatingWebhookConfigurationLister interface { // List lists all ValidatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1.ValidatingWebhookConfiguration, err error) // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ValidatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) ValidatingWebhookConfigurationListerExpansion } // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*admissionregistrationv1.ValidatingWebhookConfiguration](indexer, admissionregistrationv1.Resource("validatingwebhookconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go index 3f8b7819c..701784de0 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -18,6 +18,14 @@ limitations under the License. package v1alpha1 +// MutatingAdmissionPolicyListerExpansion allows custom methods to be added to +// MutatingAdmissionPolicyLister. +type MutatingAdmissionPolicyListerExpansion interface{} + +// MutatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to +// MutatingAdmissionPolicyBindingLister. +type MutatingAdmissionPolicyBindingListerExpansion interface{} + // ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to // ValidatingAdmissionPolicyLister. type ValidatingAdmissionPolicyListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go new file mode 100644 index 000000000..debeb79d2 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyLister helps list MutatingAdmissionPolicies. +// All objects returned here must be treated as read-only. +type MutatingAdmissionPolicyLister interface { + // List lists all MutatingAdmissionPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.MutatingAdmissionPolicy, err error) + // Get retrieves the MutatingAdmissionPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*admissionregistrationv1alpha1.MutatingAdmissionPolicy, error) + MutatingAdmissionPolicyListerExpansion +} + +// mutatingAdmissionPolicyLister implements the MutatingAdmissionPolicyLister interface. +type mutatingAdmissionPolicyLister struct { + listers.ResourceIndexer[*admissionregistrationv1alpha1.MutatingAdmissionPolicy] +} + +// NewMutatingAdmissionPolicyLister returns a new MutatingAdmissionPolicyLister. +func NewMutatingAdmissionPolicyLister(indexer cache.Indexer) MutatingAdmissionPolicyLister { + return &mutatingAdmissionPolicyLister{listers.New[*admissionregistrationv1alpha1.MutatingAdmissionPolicy](indexer, admissionregistrationv1alpha1.Resource("mutatingadmissionpolicy"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go new file mode 100644 index 000000000..bcad29048 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// MutatingAdmissionPolicyBindingLister helps list MutatingAdmissionPolicyBindings. +// All objects returned here must be treated as read-only. +type MutatingAdmissionPolicyBindingLister interface { + // List lists all MutatingAdmissionPolicyBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, err error) + // Get retrieves the MutatingAdmissionPolicyBinding from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, error) + MutatingAdmissionPolicyBindingListerExpansion +} + +// mutatingAdmissionPolicyBindingLister implements the MutatingAdmissionPolicyBindingLister interface. +type mutatingAdmissionPolicyBindingLister struct { + listers.ResourceIndexer[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding] +} + +// NewMutatingAdmissionPolicyBindingLister returns a new MutatingAdmissionPolicyBindingLister. +func NewMutatingAdmissionPolicyBindingLister(indexer cache.Indexer) MutatingAdmissionPolicyBindingLister { + return &mutatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding](indexer, admissionregistrationv1alpha1.Resource("mutatingadmissionpolicybinding"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go index ae500183a..aa4d320cc 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1alpha1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1alpha1.Resource("validatingadmissionpolicy"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go index 552854daf..84edc9fb7 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1alpha1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1alpha1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1alpha1.Resource("validatingadmissionpolicybinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 93c6096ee..67588f13b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. @@ -30,39 +30,19 @@ import ( type MutatingWebhookConfigurationLister interface { // List lists all MutatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.MutatingWebhookConfiguration, err error) // Get retrieves the MutatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1beta1.MutatingWebhookConfiguration, error) MutatingWebhookConfigurationListerExpansion } // mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. type mutatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1beta1.MutatingWebhookConfiguration] } // NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { - return &mutatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all MutatingWebhookConfigurations in the indexer. -func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the MutatingWebhookConfiguration from the index for a given name. -func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.MutatingWebhookConfiguration), nil + return &mutatingWebhookConfigurationLister{listers.New[*admissionregistrationv1beta1.MutatingWebhookConfiguration](indexer, admissionregistrationv1beta1.Resource("mutatingwebhookconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go index 7018b3cee..38237220b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyLister interface { // List lists all ValidatingAdmissionPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingAdmissionPolicy, err error) // Get retrieves the ValidatingAdmissionPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingAdmissionPolicy, error) ValidatingAdmissionPolicyListerExpansion } // validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface. type validatingAdmissionPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingAdmissionPolicy] } // NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister. func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister { - return &validatingAdmissionPolicyLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicies in the indexer. -func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicy)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicy from the index for a given name. -func (s *validatingAdmissionPolicyLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicy"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicy), nil + return &validatingAdmissionPolicyLister{listers.New[*admissionregistrationv1beta1.ValidatingAdmissionPolicy](indexer, admissionregistrationv1beta1.Resource("validatingadmissionpolicy"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go index 5fcebfd22..071b1188d 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings. @@ -30,39 +30,19 @@ import ( type ValidatingAdmissionPolicyBindingLister interface { // List lists all ValidatingAdmissionPolicyBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, err error) // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, error) ValidatingAdmissionPolicyBindingListerExpansion } // validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface. type validatingAdmissionPolicyBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding] } // NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister. func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister { - return &validatingAdmissionPolicyBindingLister{indexer: indexer} -} - -// List lists all ValidatingAdmissionPolicyBindings in the indexer. -func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicyBinding)) - }) - return ret, err -} - -// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name. -func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicybinding"), name) - } - return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), nil + return &validatingAdmissionPolicyBindingLister{listers.New[*admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding](indexer, admissionregistrationv1beta1.Resource("validatingadmissionpolicybinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7c17fccb2..b16546b6d 100644 --- a/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. @@ -30,39 +30,19 @@ import ( type ValidatingWebhookConfigurationLister interface { // List lists all ValidatingWebhookConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) + List(selector labels.Selector) (ret []*admissionregistrationv1beta1.ValidatingWebhookConfiguration, err error) // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) + Get(name string) (*admissionregistrationv1beta1.ValidatingWebhookConfiguration, error) ValidatingWebhookConfigurationListerExpansion } // validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. type validatingWebhookConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*admissionregistrationv1beta1.ValidatingWebhookConfiguration] } // NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { - return &validatingWebhookConfigurationLister{indexer: indexer} -} - -// List lists all ValidatingWebhookConfigurations in the indexer. -func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) - }) - return ret, err -} - -// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. -func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) - } - return obj.(*v1beta1.ValidatingWebhookConfiguration), nil + return &validatingWebhookConfigurationLister{listers.New[*admissionregistrationv1beta1.ValidatingWebhookConfiguration](indexer, admissionregistrationv1beta1.Resource("validatingwebhookconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go b/constraint/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go index 9a6d74b2b..1d24fb893 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go +++ b/constraint/vendor/k8s.io/client-go/listers/apiserverinternal/v1alpha1/storageversion.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + apiserverinternalv1alpha1 "k8s.io/api/apiserverinternal/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageVersionLister helps list StorageVersions. @@ -30,39 +30,19 @@ import ( type StorageVersionLister interface { // List lists all StorageVersions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) + List(selector labels.Selector) (ret []*apiserverinternalv1alpha1.StorageVersion, err error) // Get retrieves the StorageVersion from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.StorageVersion, error) + Get(name string) (*apiserverinternalv1alpha1.StorageVersion, error) StorageVersionListerExpansion } // storageVersionLister implements the StorageVersionLister interface. type storageVersionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*apiserverinternalv1alpha1.StorageVersion] } // NewStorageVersionLister returns a new StorageVersionLister. func NewStorageVersionLister(indexer cache.Indexer) StorageVersionLister { - return &storageVersionLister{indexer: indexer} -} - -// List lists all StorageVersions in the indexer. -func (s *storageVersionLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersion, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.StorageVersion)) - }) - return ret, err -} - -// Get retrieves the StorageVersion from the index for a given name. -func (s *storageVersionLister) Get(name string) (*v1alpha1.StorageVersion, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("storageversion"), name) - } - return obj.(*v1alpha1.StorageVersion), nil + return &storageVersionLister{listers.New[*apiserverinternalv1alpha1.StorageVersion](indexer, apiserverinternalv1alpha1.Resource("storageversion"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go index 9e2f97374..58ab1357e 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*appsv1.ControllerRevision](indexer, appsv1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -64,36 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ControllerRevision, error) + Get(name string) (*appsv1.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) - } - return obj.(*v1.ControllerRevision), nil + listers.ResourceIndexer[*appsv1.ControllerRevision] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go index 061959e3d..b6ba54f80 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*appsv1.DaemonSet](indexer, appsv1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*appsv1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -64,36 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.DaemonSet, error) + Get(name string) (*appsv1.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("daemonset"), name) - } - return obj.(*v1.DaemonSet), nil + listers.ResourceIndexer[*appsv1.DaemonSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1/deployment.go index 770403417..ed1b899e2 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*appsv1.Deployment](indexer, appsv1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -64,36 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Deployment, error) + Get(name string) (*appsv1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("deployment"), name) - } - return obj.(*v1.Deployment), nil + listers.ResourceIndexer[*appsv1.Deployment] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go index 3ca7757eb..68308e352 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*appsv1.ReplicaSet](indexer, appsv1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*appsv1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -64,36 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ReplicaSet, error) + Get(name string) (*appsv1.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicaset"), name) - } - return obj.(*v1.ReplicaSet), nil + listers.ResourceIndexer[*appsv1.ReplicaSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go index f6899d5ff..7366b5a3d 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1 "k8s.io/api/apps/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*appsv1.StatefulSet](indexer, appsv1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -64,36 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.StatefulSet, error) + Get(name string) (*appsv1.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("statefulset"), name) - } - return obj.(*v1.StatefulSet), nil + listers.ResourceIndexer[*appsv1.StatefulSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go index fc73de723..7c7f530b6 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta1.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta1.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*appsv1beta1.ControllerRevision](indexer, appsv1beta1.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1beta1.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -64,36 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta1.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ControllerRevision, error) + Get(name string) (*appsv1beta1.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta1.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("controllerrevision"), name) - } - return obj.(*v1beta1.ControllerRevision), nil + listers.ResourceIndexer[*appsv1beta1.ControllerRevision] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go index 3fb70794c..6803fc8cd 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*appsv1beta1.Deployment](indexer, appsv1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -64,36 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Deployment, error) + Get(name string) (*appsv1beta1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*appsv1beta1.Deployment] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go index e3556bc39..040b65dc1 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta1 "k8s.io/api/apps/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta1.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta1.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*appsv1beta1.StatefulSet](indexer, appsv1beta1.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1beta1.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -64,36 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta1.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.StatefulSet, error) + Get(name string) (*appsv1beta1.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta1.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("statefulset"), name) - } - return obj.(*v1beta1.StatefulSet), nil + listers.ResourceIndexer[*appsv1beta1.StatefulSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go index da2ce8600..9e72122f3 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ControllerRevisionLister helps list ControllerRevisions. @@ -30,7 +30,7 @@ import ( type ControllerRevisionLister interface { // List lists all ControllerRevisions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ControllerRevision, err error) // ControllerRevisions returns an object that can list and get ControllerRevisions. ControllerRevisions(namespace string) ControllerRevisionNamespaceLister ControllerRevisionListerExpansion @@ -38,25 +38,17 @@ type ControllerRevisionLister interface { // controllerRevisionLister implements the ControllerRevisionLister interface. type controllerRevisionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta2.ControllerRevision] } // NewControllerRevisionLister returns a new ControllerRevisionLister. func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { - return &controllerRevisionLister{indexer: indexer} -} - -// List lists all ControllerRevisions in the indexer. -func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err + return &controllerRevisionLister{listers.New[*appsv1beta2.ControllerRevision](indexer, appsv1beta2.Resource("controllerrevision"))} } // ControllerRevisions returns an object that can list and get ControllerRevisions. func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { - return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} + return controllerRevisionNamespaceLister{listers.NewNamespaced[*appsv1beta2.ControllerRevision](s.ResourceIndexer, namespace)} } // ControllerRevisionNamespaceLister helps list and get ControllerRevisions. @@ -64,36 +56,15 @@ func (s *controllerRevisionLister) ControllerRevisions(namespace string) Control type ControllerRevisionNamespaceLister interface { // List lists all ControllerRevisions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ControllerRevision, err error) // Get retrieves the ControllerRevision from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.ControllerRevision, error) + Get(name string) (*appsv1beta2.ControllerRevision, error) ControllerRevisionNamespaceListerExpansion } // controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister // interface. type controllerRevisionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ControllerRevisions in the indexer for a given namespace. -func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ControllerRevision)) - }) - return ret, err -} - -// Get retrieves the ControllerRevision from the indexer for a given namespace and name. -func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta2.ControllerRevision, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("controllerrevision"), name) - } - return obj.(*v1beta2.ControllerRevision), nil + listers.ResourceIndexer[*appsv1beta2.ControllerRevision] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go index 4b7aedd75..16b76b613 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta2.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*appsv1beta2.DaemonSet](indexer, appsv1beta2.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -64,36 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.DaemonSet, error) + Get(name string) (*appsv1beta2.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta2.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("daemonset"), name) - } - return obj.(*v1beta2.DaemonSet), nil + listers.ResourceIndexer[*appsv1beta2.DaemonSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go index c2857bbc3..c0b5b2ab1 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta2.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta2.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*appsv1beta2.Deployment](indexer, appsv1beta2.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*appsv1beta2.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -64,36 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + List(selector labels.Selector) (ret []*appsv1beta2.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.Deployment, error) + Get(name string) (*appsv1beta2.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta2.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("deployment"), name) - } - return obj.(*v1beta2.Deployment), nil + listers.ResourceIndexer[*appsv1beta2.Deployment] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go index 26b350ce8..7d5f3c80e 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta2.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*appsv1beta2.ReplicaSet](indexer, appsv1beta2.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -64,36 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.ReplicaSet, error) + Get(name string) (*appsv1beta2.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta2.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("replicaset"), name) - } - return obj.(*v1beta2.ReplicaSet), nil + listers.ResourceIndexer[*appsv1beta2.ReplicaSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go index fbbaf0133..04dffb2d8 100644 --- a/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go +++ b/constraint/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + appsv1beta2 "k8s.io/api/apps/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StatefulSetLister helps list StatefulSets. @@ -30,7 +30,7 @@ import ( type StatefulSetLister interface { // List lists all StatefulSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.StatefulSet, err error) // StatefulSets returns an object that can list and get StatefulSets. StatefulSets(namespace string) StatefulSetNamespaceLister StatefulSetListerExpansion @@ -38,25 +38,17 @@ type StatefulSetLister interface { // statefulSetLister implements the StatefulSetLister interface. type statefulSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*appsv1beta2.StatefulSet] } // NewStatefulSetLister returns a new StatefulSetLister. func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { - return &statefulSetLister{indexer: indexer} -} - -// List lists all StatefulSets in the indexer. -func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err + return &statefulSetLister{listers.New[*appsv1beta2.StatefulSet](indexer, appsv1beta2.Resource("statefulset"))} } // StatefulSets returns an object that can list and get StatefulSets. func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { - return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return statefulSetNamespaceLister{listers.NewNamespaced[*appsv1beta2.StatefulSet](s.ResourceIndexer, namespace)} } // StatefulSetNamespaceLister helps list and get StatefulSets. @@ -64,36 +56,15 @@ func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceL type StatefulSetNamespaceLister interface { // List lists all StatefulSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + List(selector labels.Selector) (ret []*appsv1beta2.StatefulSet, err error) // Get retrieves the StatefulSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.StatefulSet, error) + Get(name string) (*appsv1beta2.StatefulSet, error) StatefulSetNamespaceListerExpansion } // statefulSetNamespaceLister implements the StatefulSetNamespaceLister // interface. type statefulSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all StatefulSets in the indexer for a given namespace. -func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.StatefulSet)) - }) - return ret, err -} - -// Get retrieves the StatefulSet from the indexer for a given namespace and name. -func (s statefulSetNamespaceLister) Get(name string) (*v1beta2.StatefulSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("statefulset"), name) - } - return obj.(*v1beta2.StatefulSet), nil + listers.ResourceIndexer[*appsv1beta2.StatefulSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go index 8447f059d..b4e577064 100644 --- a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + autoscalingv1 "k8s.io/api/autoscaling/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv1.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*autoscalingv1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv1.HorizontalPodAutoscaler](indexer, autoscalingv1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -64,36 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv1.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*autoscalingv1.HorizontalPodAutoscaler] } diff --git a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go index a5cef2772..aafc57683 100644 --- a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2 import ( - v2 "k8s.io/api/autoscaling/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + autoscalingv2 "k8s.io/api/autoscaling/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*autoscalingv2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2.HorizontalPodAutoscaler](indexer, autoscalingv2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -64,36 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*autoscalingv2.HorizontalPodAutoscaler] } diff --git a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go index f1804e995..b7ad8e79b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta1 import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta1.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*autoscalingv2beta1.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2beta1.HorizontalPodAutoscaler](indexer, autoscalingv2beta1.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2beta1.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -64,36 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta1.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2beta1.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta1.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta1.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*autoscalingv2beta1.HorizontalPodAutoscaler] } diff --git a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go index b0dbaf9eb..8d0fa6e79 100644 --- a/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/constraint/vendor/k8s.io/client-go/listers/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,10 +19,10 @@ limitations under the License. package v2beta2 import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. @@ -30,7 +30,7 @@ import ( type HorizontalPodAutoscalerLister interface { // List lists all HorizontalPodAutoscalers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta2.HorizontalPodAutoscaler, err error) // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister HorizontalPodAutoscalerListerExpansion @@ -38,25 +38,17 @@ type HorizontalPodAutoscalerLister interface { // horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. type horizontalPodAutoscalerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*autoscalingv2beta2.HorizontalPodAutoscaler] } // NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { - return &horizontalPodAutoscalerLister{indexer: indexer} -} - -// List lists all HorizontalPodAutoscalers in the indexer. -func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err + return &horizontalPodAutoscalerLister{listers.New[*autoscalingv2beta2.HorizontalPodAutoscaler](indexer, autoscalingv2beta2.Resource("horizontalpodautoscaler"))} } // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { - return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} + return horizontalPodAutoscalerNamespaceLister{listers.NewNamespaced[*autoscalingv2beta2.HorizontalPodAutoscaler](s.ResourceIndexer, namespace)} } // HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. @@ -64,36 +56,15 @@ func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace strin type HorizontalPodAutoscalerNamespaceLister interface { // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) + List(selector labels.Selector) (ret []*autoscalingv2beta2.HorizontalPodAutoscaler, err error) // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2beta2.HorizontalPodAutoscaler, error) + Get(name string) (*autoscalingv2beta2.HorizontalPodAutoscaler, error) HorizontalPodAutoscalerNamespaceListerExpansion } // horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister // interface. type horizontalPodAutoscalerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. -func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta2.HorizontalPodAutoscaler, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2beta2.HorizontalPodAutoscaler)) - }) - return ret, err -} - -// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. -func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta2.HorizontalPodAutoscaler, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2beta2.Resource("horizontalpodautoscaler"), name) - } - return obj.(*v2beta2.HorizontalPodAutoscaler), nil + listers.ResourceIndexer[*autoscalingv2beta2.HorizontalPodAutoscaler] } diff --git a/constraint/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go b/constraint/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go index 8e49ed959..7ad2b87f0 100644 --- a/constraint/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/listers/batch/v1/cronjob.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + batchv1 "k8s.io/api/batch/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CronJobLister helps list CronJobs. @@ -30,7 +30,7 @@ import ( type CronJobLister interface { // List lists all CronJobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1.CronJob, err error) // CronJobs returns an object that can list and get CronJobs. CronJobs(namespace string) CronJobNamespaceLister CronJobListerExpansion @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*batchv1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*batchv1.CronJob](indexer, batchv1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*batchv1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -64,36 +56,15 @@ func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { type CronJobNamespaceLister interface { // List lists all CronJobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1.CronJob, err error) // Get retrieves the CronJob from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CronJob, error) + Get(name string) (*batchv1.CronJob, error) CronJobNamespaceListerExpansion } // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("cronjob"), name) - } - return obj.(*v1.CronJob), nil + listers.ResourceIndexer[*batchv1.CronJob] } diff --git a/constraint/vendor/k8s.io/client-go/listers/batch/v1/job.go b/constraint/vendor/k8s.io/client-go/listers/batch/v1/job.go index 3aba6b95f..eb9f300d9 100644 --- a/constraint/vendor/k8s.io/client-go/listers/batch/v1/job.go +++ b/constraint/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + batchv1 "k8s.io/api/batch/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // JobLister helps list Jobs. @@ -30,7 +30,7 @@ import ( type JobLister interface { // List lists all Jobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Job, err error) + List(selector labels.Selector) (ret []*batchv1.Job, err error) // Jobs returns an object that can list and get Jobs. Jobs(namespace string) JobNamespaceLister JobListerExpansion @@ -38,25 +38,17 @@ type JobLister interface { // jobLister implements the JobLister interface. type jobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*batchv1.Job] } // NewJobLister returns a new JobLister. func NewJobLister(indexer cache.Indexer) JobLister { - return &jobLister{indexer: indexer} -} - -// List lists all Jobs in the indexer. -func (s *jobLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err + return &jobLister{listers.New[*batchv1.Job](indexer, batchv1.Resource("job"))} } // Jobs returns an object that can list and get Jobs. func (s *jobLister) Jobs(namespace string) JobNamespaceLister { - return jobNamespaceLister{indexer: s.indexer, namespace: namespace} + return jobNamespaceLister{listers.NewNamespaced[*batchv1.Job](s.ResourceIndexer, namespace)} } // JobNamespaceLister helps list and get Jobs. @@ -64,36 +56,15 @@ func (s *jobLister) Jobs(namespace string) JobNamespaceLister { type JobNamespaceLister interface { // List lists all Jobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Job, err error) + List(selector labels.Selector) (ret []*batchv1.Job, err error) // Get retrieves the Job from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Job, error) + Get(name string) (*batchv1.Job, error) JobNamespaceListerExpansion } // jobNamespaceLister implements the JobNamespaceLister // interface. type jobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Jobs in the indexer for a given namespace. -func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1.Job, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Job)) - }) - return ret, err -} - -// Get retrieves the Job from the indexer for a given namespace and name. -func (s jobNamespaceLister) Get(name string) (*v1.Job, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("job"), name) - } - return obj.(*v1.Job), nil + listers.ResourceIndexer[*batchv1.Job] } diff --git a/constraint/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/constraint/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go index 4842d5e5a..d5e0ec2c8 100644 --- a/constraint/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go +++ b/constraint/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + batchv1beta1 "k8s.io/api/batch/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CronJobLister helps list CronJobs. @@ -30,7 +30,7 @@ import ( type CronJobLister interface { // List lists all CronJobs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1beta1.CronJob, err error) // CronJobs returns an object that can list and get CronJobs. CronJobs(namespace string) CronJobNamespaceLister CronJobListerExpansion @@ -38,25 +38,17 @@ type CronJobLister interface { // cronJobLister implements the CronJobLister interface. type cronJobLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*batchv1beta1.CronJob] } // NewCronJobLister returns a new CronJobLister. func NewCronJobLister(indexer cache.Indexer) CronJobLister { - return &cronJobLister{indexer: indexer} -} - -// List lists all CronJobs in the indexer. -func (s *cronJobLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err + return &cronJobLister{listers.New[*batchv1beta1.CronJob](indexer, batchv1beta1.Resource("cronjob"))} } // CronJobs returns an object that can list and get CronJobs. func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { - return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} + return cronJobNamespaceLister{listers.NewNamespaced[*batchv1beta1.CronJob](s.ResourceIndexer, namespace)} } // CronJobNamespaceLister helps list and get CronJobs. @@ -64,36 +56,15 @@ func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { type CronJobNamespaceLister interface { // List lists all CronJobs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + List(selector labels.Selector) (ret []*batchv1beta1.CronJob, err error) // Get retrieves the CronJob from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CronJob, error) + Get(name string) (*batchv1beta1.CronJob, error) CronJobNamespaceListerExpansion } // cronJobNamespaceLister implements the CronJobNamespaceLister // interface. type cronJobNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CronJobs in the indexer for a given namespace. -func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CronJob)) - }) - return ret, err -} - -// Get retrieves the CronJob from the indexer for a given namespace and name. -func (s cronJobNamespaceLister) Get(name string) (*v1beta1.CronJob, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("cronjob"), name) - } - return obj.(*v1beta1.CronJob), nil + listers.ResourceIndexer[*batchv1beta1.CronJob] } diff --git a/constraint/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go index 0d04e118d..7dbb0f3f5 100644 --- a/constraint/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/listers/certificates/v1/certificatesigningrequest.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/certificates/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + certificatesv1 "k8s.io/api/certificates/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CertificateSigningRequestLister helps list CertificateSigningRequests. @@ -30,39 +30,19 @@ import ( type CertificateSigningRequestLister interface { // List lists all CertificateSigningRequests in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error) + List(selector labels.Selector) (ret []*certificatesv1.CertificateSigningRequest, err error) // Get retrieves the CertificateSigningRequest from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CertificateSigningRequest, error) + Get(name string) (*certificatesv1.CertificateSigningRequest, error) CertificateSigningRequestListerExpansion } // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*certificatesv1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*certificatesv1.CertificateSigningRequest](indexer, certificatesv1.Resource("certificatesigningrequest"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go b/constraint/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go index b8049a761..0e7f47d80 100644 --- a/constraint/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go +++ b/constraint/vendor/k8s.io/client-go/listers/certificates/v1alpha1/clustertrustbundle.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/certificates/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterTrustBundleLister helps list ClusterTrustBundles. @@ -30,39 +30,19 @@ import ( type ClusterTrustBundleLister interface { // List lists all ClusterTrustBundles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) + List(selector labels.Selector) (ret []*certificatesv1alpha1.ClusterTrustBundle, err error) // Get retrieves the ClusterTrustBundle from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterTrustBundle, error) + Get(name string) (*certificatesv1alpha1.ClusterTrustBundle, error) ClusterTrustBundleListerExpansion } // clusterTrustBundleLister implements the ClusterTrustBundleLister interface. type clusterTrustBundleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*certificatesv1alpha1.ClusterTrustBundle] } // NewClusterTrustBundleLister returns a new ClusterTrustBundleLister. func NewClusterTrustBundleLister(indexer cache.Indexer) ClusterTrustBundleLister { - return &clusterTrustBundleLister{indexer: indexer} -} - -// List lists all ClusterTrustBundles in the indexer. -func (s *clusterTrustBundleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterTrustBundle, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterTrustBundle)) - }) - return ret, err -} - -// Get retrieves the ClusterTrustBundle from the index for a given name. -func (s *clusterTrustBundleLister) Get(name string) (*v1alpha1.ClusterTrustBundle, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustertrustbundle"), name) - } - return obj.(*v1alpha1.ClusterTrustBundle), nil + return &clusterTrustBundleLister{listers.New[*certificatesv1alpha1.ClusterTrustBundle](indexer, certificatesv1alpha1.Resource("clustertrustbundle"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/constraint/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go index 471b5629b..3b4742c67 100644 --- a/constraint/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go +++ b/constraint/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/certificates/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CertificateSigningRequestLister helps list CertificateSigningRequests. @@ -30,39 +30,19 @@ import ( type CertificateSigningRequestLister interface { // List lists all CertificateSigningRequests in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) + List(selector labels.Selector) (ret []*certificatesv1beta1.CertificateSigningRequest, err error) // Get retrieves the CertificateSigningRequest from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CertificateSigningRequest, error) + Get(name string) (*certificatesv1beta1.CertificateSigningRequest, error) CertificateSigningRequestListerExpansion } // certificateSigningRequestLister implements the CertificateSigningRequestLister interface. type certificateSigningRequestLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*certificatesv1beta1.CertificateSigningRequest] } // NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { - return &certificateSigningRequestLister{indexer: indexer} -} - -// List lists all CertificateSigningRequests in the indexer. -func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CertificateSigningRequest)) - }) - return ret, err -} - -// Get retrieves the CertificateSigningRequest from the index for a given name. -func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequest"), name) - } - return obj.(*v1beta1.CertificateSigningRequest), nil + return &certificateSigningRequestLister{listers.New[*certificatesv1beta1.CertificateSigningRequest](indexer, certificatesv1beta1.Resource("certificatesigningrequest"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/coordination/v1/lease.go b/constraint/vendor/k8s.io/client-go/listers/coordination/v1/lease.go index de366d0e1..7fdab2bff 100644 --- a/constraint/vendor/k8s.io/client-go/listers/coordination/v1/lease.go +++ b/constraint/vendor/k8s.io/client-go/listers/coordination/v1/lease.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/coordination/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + coordinationv1 "k8s.io/api/coordination/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LeaseLister helps list Leases. @@ -30,7 +30,7 @@ import ( type LeaseLister interface { // List lists all Leases in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1.Lease, err error) // Leases returns an object that can list and get Leases. Leases(namespace string) LeaseNamespaceLister LeaseListerExpansion @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*coordinationv1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*coordinationv1.Lease](indexer, coordinationv1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*coordinationv1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -64,36 +56,15 @@ func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { type LeaseNamespaceLister interface { // List lists all Leases in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1.Lease, err error) // Get retrieves the Lease from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Lease, error) + Get(name string) (*coordinationv1.Lease, error) LeaseNamespaceListerExpansion } // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("lease"), name) - } - return obj.(*v1.Lease), nil + listers.ResourceIndexer[*coordinationv1.Lease] } diff --git a/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go new file mode 100644 index 000000000..f22e7d483 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// LeaseCandidateListerExpansion allows custom methods to be added to +// LeaseCandidateLister. +type LeaseCandidateListerExpansion interface{} + +// LeaseCandidateNamespaceListerExpansion allows custom methods to be added to +// LeaseCandidateNamespaceLister. +type LeaseCandidateNamespaceListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go b/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go new file mode 100644 index 000000000..26a3be476 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// LeaseCandidateLister helps list LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateLister interface { + // List lists all LeaseCandidates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*coordinationv1alpha2.LeaseCandidate, err error) + // LeaseCandidates returns an object that can list and get LeaseCandidates. + LeaseCandidates(namespace string) LeaseCandidateNamespaceLister + LeaseCandidateListerExpansion +} + +// leaseCandidateLister implements the LeaseCandidateLister interface. +type leaseCandidateLister struct { + listers.ResourceIndexer[*coordinationv1alpha2.LeaseCandidate] +} + +// NewLeaseCandidateLister returns a new LeaseCandidateLister. +func NewLeaseCandidateLister(indexer cache.Indexer) LeaseCandidateLister { + return &leaseCandidateLister{listers.New[*coordinationv1alpha2.LeaseCandidate](indexer, coordinationv1alpha2.Resource("leasecandidate"))} +} + +// LeaseCandidates returns an object that can list and get LeaseCandidates. +func (s *leaseCandidateLister) LeaseCandidates(namespace string) LeaseCandidateNamespaceLister { + return leaseCandidateNamespaceLister{listers.NewNamespaced[*coordinationv1alpha2.LeaseCandidate](s.ResourceIndexer, namespace)} +} + +// LeaseCandidateNamespaceLister helps list and get LeaseCandidates. +// All objects returned here must be treated as read-only. +type LeaseCandidateNamespaceLister interface { + // List lists all LeaseCandidates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*coordinationv1alpha2.LeaseCandidate, err error) + // Get retrieves the LeaseCandidate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*coordinationv1alpha2.LeaseCandidate, error) + LeaseCandidateNamespaceListerExpansion +} + +// leaseCandidateNamespaceLister implements the LeaseCandidateNamespaceLister +// interface. +type leaseCandidateNamespaceLister struct { + listers.ResourceIndexer[*coordinationv1alpha2.LeaseCandidate] +} diff --git a/constraint/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go b/constraint/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go index 8dfdc1e9b..dfbd02acf 100644 --- a/constraint/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go +++ b/constraint/vendor/k8s.io/client-go/listers/coordination/v1beta1/lease.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/coordination/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LeaseLister helps list Leases. @@ -30,7 +30,7 @@ import ( type LeaseLister interface { // List lists all Leases in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1beta1.Lease, err error) // Leases returns an object that can list and get Leases. Leases(namespace string) LeaseNamespaceLister LeaseListerExpansion @@ -38,25 +38,17 @@ type LeaseLister interface { // leaseLister implements the LeaseLister interface. type leaseLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*coordinationv1beta1.Lease] } // NewLeaseLister returns a new LeaseLister. func NewLeaseLister(indexer cache.Indexer) LeaseLister { - return &leaseLister{indexer: indexer} -} - -// List lists all Leases in the indexer. -func (s *leaseLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err + return &leaseLister{listers.New[*coordinationv1beta1.Lease](indexer, coordinationv1beta1.Resource("lease"))} } // Leases returns an object that can list and get Leases. func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { - return leaseNamespaceLister{indexer: s.indexer, namespace: namespace} + return leaseNamespaceLister{listers.NewNamespaced[*coordinationv1beta1.Lease](s.ResourceIndexer, namespace)} } // LeaseNamespaceLister helps list and get Leases. @@ -64,36 +56,15 @@ func (s *leaseLister) Leases(namespace string) LeaseNamespaceLister { type LeaseNamespaceLister interface { // List lists all Leases in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Lease, err error) + List(selector labels.Selector) (ret []*coordinationv1beta1.Lease, err error) // Get retrieves the Lease from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Lease, error) + Get(name string) (*coordinationv1beta1.Lease, error) LeaseNamespaceListerExpansion } // leaseNamespaceLister implements the LeaseNamespaceLister // interface. type leaseNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Leases in the indexer for a given namespace. -func (s leaseNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Lease, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Lease)) - }) - return ret, err -} - -// Get retrieves the Lease from the indexer for a given namespace and name. -func (s leaseNamespaceLister) Get(name string) (*v1beta1.Lease, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("lease"), name) - } - return obj.(*v1beta1.Lease), nil + listers.ResourceIndexer[*coordinationv1beta1.Lease] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go index 5fcdac3c7..c88836706 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ComponentStatusLister helps list ComponentStatuses. @@ -30,39 +30,19 @@ import ( type ComponentStatusLister interface { // List lists all ComponentStatuses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + List(selector labels.Selector) (ret []*corev1.ComponentStatus, err error) // Get retrieves the ComponentStatus from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ComponentStatus, error) + Get(name string) (*corev1.ComponentStatus, error) ComponentStatusListerExpansion } // componentStatusLister implements the ComponentStatusLister interface. type componentStatusLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.ComponentStatus] } // NewComponentStatusLister returns a new ComponentStatusLister. func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { - return &componentStatusLister{indexer: indexer} -} - -// List lists all ComponentStatuses in the indexer. -func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ComponentStatus)) - }) - return ret, err -} - -// Get retrieves the ComponentStatus from the index for a given name. -func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) - } - return obj.(*v1.ComponentStatus), nil + return &componentStatusLister{listers.New[*corev1.ComponentStatus](indexer, corev1.Resource("componentstatus"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/configmap.go index 6a410e47c..54d25412e 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/configmap.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ConfigMapLister helps list ConfigMaps. @@ -30,7 +30,7 @@ import ( type ConfigMapLister interface { // List lists all ConfigMaps in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) // ConfigMaps returns an object that can list and get ConfigMaps. ConfigMaps(namespace string) ConfigMapNamespaceLister ConfigMapListerExpansion @@ -38,25 +38,17 @@ type ConfigMapLister interface { // configMapLister implements the ConfigMapLister interface. type configMapLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.ConfigMap] } // NewConfigMapLister returns a new ConfigMapLister. func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { - return &configMapLister{indexer: indexer} -} - -// List lists all ConfigMaps in the indexer. -func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err + return &configMapLister{listers.New[*corev1.ConfigMap](indexer, corev1.Resource("configmap"))} } // ConfigMaps returns an object that can list and get ConfigMaps. func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { - return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} + return configMapNamespaceLister{listers.NewNamespaced[*corev1.ConfigMap](s.ResourceIndexer, namespace)} } // ConfigMapNamespaceLister helps list and get ConfigMaps. @@ -64,36 +56,15 @@ func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister type ConfigMapNamespaceLister interface { // List lists all ConfigMaps in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) // Get retrieves the ConfigMap from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ConfigMap, error) + Get(name string) (*corev1.ConfigMap, error) ConfigMapNamespaceListerExpansion } // configMapNamespaceLister implements the ConfigMapNamespaceLister // interface. type configMapNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ConfigMaps in the indexer for a given namespace. -func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ConfigMap)) - }) - return ret, err -} - -// Get retrieves the ConfigMap from the indexer for a given namespace and name. -func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("configmap"), name) - } - return obj.(*v1.ConfigMap), nil + listers.ResourceIndexer[*corev1.ConfigMap] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/endpoints.go index 4759ce808..a9d4e45a1 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/endpoints.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointsLister helps list Endpoints. @@ -30,7 +30,7 @@ import ( type EndpointsLister interface { // List lists all Endpoints in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Endpoints, err error) + List(selector labels.Selector) (ret []*corev1.Endpoints, err error) // Endpoints returns an object that can list and get Endpoints. Endpoints(namespace string) EndpointsNamespaceLister EndpointsListerExpansion @@ -38,25 +38,17 @@ type EndpointsLister interface { // endpointsLister implements the EndpointsLister interface. type endpointsLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Endpoints] } // NewEndpointsLister returns a new EndpointsLister. func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { - return &endpointsLister{indexer: indexer} -} - -// List lists all Endpoints in the indexer. -func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err + return &endpointsLister{listers.New[*corev1.Endpoints](indexer, corev1.Resource("endpoints"))} } // Endpoints returns an object that can list and get Endpoints. func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { - return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointsNamespaceLister{listers.NewNamespaced[*corev1.Endpoints](s.ResourceIndexer, namespace)} } // EndpointsNamespaceLister helps list and get Endpoints. @@ -64,36 +56,15 @@ func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { type EndpointsNamespaceLister interface { // List lists all Endpoints in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Endpoints, err error) + List(selector labels.Selector) (ret []*corev1.Endpoints, err error) // Get retrieves the Endpoints from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Endpoints, error) + Get(name string) (*corev1.Endpoints, error) EndpointsNamespaceListerExpansion } // endpointsNamespaceLister implements the EndpointsNamespaceLister // interface. type endpointsNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Endpoints in the indexer for a given namespace. -func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Endpoints)) - }) - return ret, err -} - -// Get retrieves the Endpoints from the indexer for a given namespace and name. -func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpoints"), name) - } - return obj.(*v1.Endpoints), nil + listers.ResourceIndexer[*corev1.Endpoints] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/event.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/event.go index 4416e2012..0e13fa311 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*corev1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*corev1.Event](indexer, corev1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*corev1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -64,36 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*corev1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Event, error) + Get(name string) (*corev1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*corev1.Event] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/limitrange.go index d8fa569cd..26402c822 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/limitrange.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // LimitRangeLister helps list LimitRanges. @@ -30,7 +30,7 @@ import ( type LimitRangeLister interface { // List lists all LimitRanges in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.LimitRange, err error) + List(selector labels.Selector) (ret []*corev1.LimitRange, err error) // LimitRanges returns an object that can list and get LimitRanges. LimitRanges(namespace string) LimitRangeNamespaceLister LimitRangeListerExpansion @@ -38,25 +38,17 @@ type LimitRangeLister interface { // limitRangeLister implements the LimitRangeLister interface. type limitRangeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.LimitRange] } // NewLimitRangeLister returns a new LimitRangeLister. func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { - return &limitRangeLister{indexer: indexer} -} - -// List lists all LimitRanges in the indexer. -func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err + return &limitRangeLister{listers.New[*corev1.LimitRange](indexer, corev1.Resource("limitrange"))} } // LimitRanges returns an object that can list and get LimitRanges. func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { - return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} + return limitRangeNamespaceLister{listers.NewNamespaced[*corev1.LimitRange](s.ResourceIndexer, namespace)} } // LimitRangeNamespaceLister helps list and get LimitRanges. @@ -64,36 +56,15 @@ func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceList type LimitRangeNamespaceLister interface { // List lists all LimitRanges in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.LimitRange, err error) + List(selector labels.Selector) (ret []*corev1.LimitRange, err error) // Get retrieves the LimitRange from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.LimitRange, error) + Get(name string) (*corev1.LimitRange, error) LimitRangeNamespaceListerExpansion } // limitRangeNamespaceLister implements the LimitRangeNamespaceLister // interface. type limitRangeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all LimitRanges in the indexer for a given namespace. -func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.LimitRange)) - }) - return ret, err -} - -// Get retrieves the LimitRange from the indexer for a given namespace and name. -func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("limitrange"), name) - } - return obj.(*v1.LimitRange), nil + listers.ResourceIndexer[*corev1.LimitRange] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/namespace.go index 454aa1a0a..d0cd4e5c7 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/namespace.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NamespaceLister helps list Namespaces. @@ -30,39 +30,19 @@ import ( type NamespaceLister interface { // List lists all Namespaces in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Namespace, err error) + List(selector labels.Selector) (ret []*corev1.Namespace, err error) // Get retrieves the Namespace from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Namespace, error) + Get(name string) (*corev1.Namespace, error) NamespaceListerExpansion } // namespaceLister implements the NamespaceLister interface. type namespaceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Namespace] } // NewNamespaceLister returns a new NamespaceLister. func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { - return &namespaceLister{indexer: indexer} -} - -// List lists all Namespaces in the indexer. -func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Namespace)) - }) - return ret, err -} - -// Get retrieves the Namespace from the index for a given name. -func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("namespace"), name) - } - return obj.(*v1.Namespace), nil + return &namespaceLister{listers.New[*corev1.Namespace](indexer, corev1.Resource("namespace"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/node.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/node.go index 596049857..c9ffe2e52 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/node.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NodeLister helps list Nodes. @@ -30,39 +30,19 @@ import ( type NodeLister interface { // List lists all Nodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Node, err error) + List(selector labels.Selector) (ret []*corev1.Node, err error) // Get retrieves the Node from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Node, error) + Get(name string) (*corev1.Node, error) NodeListerExpansion } // nodeLister implements the NodeLister interface. type nodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Node] } // NewNodeLister returns a new NodeLister. func NewNodeLister(indexer cache.Indexer) NodeLister { - return &nodeLister{indexer: indexer} -} - -// List lists all Nodes in the indexer. -func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Node)) - }) - return ret, err -} - -// Get retrieves the Node from the index for a given name. -func (s *nodeLister) Get(name string) (*v1.Node, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("node"), name) - } - return obj.(*v1.Node), nil + return &nodeLister{listers.New[*corev1.Node](indexer, corev1.Resource("node"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go index e7dfd4ac9..3f3a17cba 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PersistentVolumeLister helps list PersistentVolumes. @@ -30,39 +30,19 @@ import ( type PersistentVolumeLister interface { // List lists all PersistentVolumes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolume, err error) // Get retrieves the PersistentVolume from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PersistentVolume, error) + Get(name string) (*corev1.PersistentVolume, error) PersistentVolumeListerExpansion } // persistentVolumeLister implements the PersistentVolumeLister interface. type persistentVolumeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.PersistentVolume] } // NewPersistentVolumeLister returns a new PersistentVolumeLister. func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { - return &persistentVolumeLister{indexer: indexer} -} - -// List lists all PersistentVolumes in the indexer. -func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolume)) - }) - return ret, err -} - -// Get retrieves the PersistentVolume from the index for a given name. -func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) - } - return obj.(*v1.PersistentVolume), nil + return &persistentVolumeLister{listers.New[*corev1.PersistentVolume](indexer, corev1.Resource("persistentvolume"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go index fc71bb5a1..0665e6172 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PersistentVolumeClaimLister helps list PersistentVolumeClaims. @@ -30,7 +30,7 @@ import ( type PersistentVolumeClaimLister interface { // List lists all PersistentVolumeClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolumeClaim, err error) // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister PersistentVolumeClaimListerExpansion @@ -38,25 +38,17 @@ type PersistentVolumeClaimLister interface { // persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. type persistentVolumeClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.PersistentVolumeClaim] } // NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { - return &persistentVolumeClaimLister{indexer: indexer} -} - -// List lists all PersistentVolumeClaims in the indexer. -func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err + return &persistentVolumeClaimLister{listers.New[*corev1.PersistentVolumeClaim](indexer, corev1.Resource("persistentvolumeclaim"))} } // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { - return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return persistentVolumeClaimNamespaceLister{listers.NewNamespaced[*corev1.PersistentVolumeClaim](s.ResourceIndexer, namespace)} } // PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. @@ -64,36 +56,15 @@ func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) P type PersistentVolumeClaimNamespaceLister interface { // List lists all PersistentVolumeClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + List(selector labels.Selector) (ret []*corev1.PersistentVolumeClaim, err error) // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PersistentVolumeClaim, error) + Get(name string) (*corev1.PersistentVolumeClaim, error) PersistentVolumeClaimNamespaceListerExpansion } // persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister // interface. type persistentVolumeClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PersistentVolumeClaims in the indexer for a given namespace. -func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PersistentVolumeClaim)) - }) - return ret, err -} - -// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. -func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) - } - return obj.(*v1.PersistentVolumeClaim), nil + listers.ResourceIndexer[*corev1.PersistentVolumeClaim] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/pod.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/pod.go index ab8f0946c..0ea9d678b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/pod.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodLister helps list Pods. @@ -30,7 +30,7 @@ import ( type PodLister interface { // List lists all Pods in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Pod, err error) + List(selector labels.Selector) (ret []*corev1.Pod, err error) // Pods returns an object that can list and get Pods. Pods(namespace string) PodNamespaceLister PodListerExpansion @@ -38,25 +38,17 @@ type PodLister interface { // podLister implements the PodLister interface. type podLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Pod] } // NewPodLister returns a new PodLister. func NewPodLister(indexer cache.Indexer) PodLister { - return &podLister{indexer: indexer} -} - -// List lists all Pods in the indexer. -func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err + return &podLister{listers.New[*corev1.Pod](indexer, corev1.Resource("pod"))} } // Pods returns an object that can list and get Pods. func (s *podLister) Pods(namespace string) PodNamespaceLister { - return podNamespaceLister{indexer: s.indexer, namespace: namespace} + return podNamespaceLister{listers.NewNamespaced[*corev1.Pod](s.ResourceIndexer, namespace)} } // PodNamespaceLister helps list and get Pods. @@ -64,36 +56,15 @@ func (s *podLister) Pods(namespace string) PodNamespaceLister { type PodNamespaceLister interface { // List lists all Pods in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Pod, err error) + List(selector labels.Selector) (ret []*corev1.Pod, err error) // Get retrieves the Pod from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Pod, error) + Get(name string) (*corev1.Pod, error) PodNamespaceListerExpansion } // podNamespaceLister implements the PodNamespaceLister // interface. type podNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Pods in the indexer for a given namespace. -func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Pod)) - }) - return ret, err -} - -// Get retrieves the Pod from the indexer for a given namespace and name. -func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("pod"), name) - } - return obj.(*v1.Pod), nil + listers.ResourceIndexer[*corev1.Pod] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go index 6c310045b..6ee5daf08 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodTemplateLister helps list PodTemplates. @@ -30,7 +30,7 @@ import ( type PodTemplateLister interface { // List lists all PodTemplates in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + List(selector labels.Selector) (ret []*corev1.PodTemplate, err error) // PodTemplates returns an object that can list and get PodTemplates. PodTemplates(namespace string) PodTemplateNamespaceLister PodTemplateListerExpansion @@ -38,25 +38,17 @@ type PodTemplateLister interface { // podTemplateLister implements the PodTemplateLister interface. type podTemplateLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.PodTemplate] } // NewPodTemplateLister returns a new PodTemplateLister. func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { - return &podTemplateLister{indexer: indexer} -} - -// List lists all PodTemplates in the indexer. -func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err + return &podTemplateLister{listers.New[*corev1.PodTemplate](indexer, corev1.Resource("podtemplate"))} } // PodTemplates returns an object that can list and get PodTemplates. func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { - return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} + return podTemplateNamespaceLister{listers.NewNamespaced[*corev1.PodTemplate](s.ResourceIndexer, namespace)} } // PodTemplateNamespaceLister helps list and get PodTemplates. @@ -64,36 +56,15 @@ func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceL type PodTemplateNamespaceLister interface { // List lists all PodTemplates in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + List(selector labels.Selector) (ret []*corev1.PodTemplate, err error) // Get retrieves the PodTemplate from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PodTemplate, error) + Get(name string) (*corev1.PodTemplate, error) PodTemplateNamespaceListerExpansion } // podTemplateNamespaceLister implements the PodTemplateNamespaceLister // interface. type podTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodTemplates in the indexer for a given namespace. -func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodTemplate)) - }) - return ret, err -} - -// Get retrieves the PodTemplate from the indexer for a given namespace and name. -func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) - } - return obj.(*v1.PodTemplate), nil + listers.ResourceIndexer[*corev1.PodTemplate] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go index e28e2ef76..8d366f740 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicationControllerLister helps list ReplicationControllers. @@ -30,7 +30,7 @@ import ( type ReplicationControllerLister interface { // List lists all ReplicationControllers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + List(selector labels.Selector) (ret []*corev1.ReplicationController, err error) // ReplicationControllers returns an object that can list and get ReplicationControllers. ReplicationControllers(namespace string) ReplicationControllerNamespaceLister ReplicationControllerListerExpansion @@ -38,25 +38,17 @@ type ReplicationControllerLister interface { // replicationControllerLister implements the ReplicationControllerLister interface. type replicationControllerLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.ReplicationController] } // NewReplicationControllerLister returns a new ReplicationControllerLister. func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { - return &replicationControllerLister{indexer: indexer} -} - -// List lists all ReplicationControllers in the indexer. -func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err + return &replicationControllerLister{listers.New[*corev1.ReplicationController](indexer, corev1.Resource("replicationcontroller"))} } // ReplicationControllers returns an object that can list and get ReplicationControllers. func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { - return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicationControllerNamespaceLister{listers.NewNamespaced[*corev1.ReplicationController](s.ResourceIndexer, namespace)} } // ReplicationControllerNamespaceLister helps list and get ReplicationControllers. @@ -64,36 +56,15 @@ func (s *replicationControllerLister) ReplicationControllers(namespace string) R type ReplicationControllerNamespaceLister interface { // List lists all ReplicationControllers in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + List(selector labels.Selector) (ret []*corev1.ReplicationController, err error) // Get retrieves the ReplicationController from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ReplicationController, error) + Get(name string) (*corev1.ReplicationController, error) ReplicationControllerNamespaceListerExpansion } // replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister // interface. type replicationControllerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicationControllers in the indexer for a given namespace. -func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ReplicationController)) - }) - return ret, err -} - -// Get retrieves the ReplicationController from the indexer for a given namespace and name. -func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) - } - return obj.(*v1.ReplicationController), nil + listers.ResourceIndexer[*corev1.ReplicationController] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go index 9c00b49d4..67dd9a286 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ResourceQuotaLister helps list ResourceQuotas. @@ -30,7 +30,7 @@ import ( type ResourceQuotaLister interface { // List lists all ResourceQuotas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + List(selector labels.Selector) (ret []*corev1.ResourceQuota, err error) // ResourceQuotas returns an object that can list and get ResourceQuotas. ResourceQuotas(namespace string) ResourceQuotaNamespaceLister ResourceQuotaListerExpansion @@ -38,25 +38,17 @@ type ResourceQuotaLister interface { // resourceQuotaLister implements the ResourceQuotaLister interface. type resourceQuotaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.ResourceQuota] } // NewResourceQuotaLister returns a new ResourceQuotaLister. func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { - return &resourceQuotaLister{indexer: indexer} -} - -// List lists all ResourceQuotas in the indexer. -func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err + return &resourceQuotaLister{listers.New[*corev1.ResourceQuota](indexer, corev1.Resource("resourcequota"))} } // ResourceQuotas returns an object that can list and get ResourceQuotas. func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { - return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} + return resourceQuotaNamespaceLister{listers.NewNamespaced[*corev1.ResourceQuota](s.ResourceIndexer, namespace)} } // ResourceQuotaNamespaceLister helps list and get ResourceQuotas. @@ -64,36 +56,15 @@ func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaName type ResourceQuotaNamespaceLister interface { // List lists all ResourceQuotas in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + List(selector labels.Selector) (ret []*corev1.ResourceQuota, err error) // Get retrieves the ResourceQuota from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ResourceQuota, error) + Get(name string) (*corev1.ResourceQuota, error) ResourceQuotaNamespaceListerExpansion } // resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister // interface. type resourceQuotaNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceQuotas in the indexer for a given namespace. -func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResourceQuota)) - }) - return ret, err -} - -// Get retrieves the ResourceQuota from the indexer for a given namespace and name. -func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) - } - return obj.(*v1.ResourceQuota), nil + listers.ResourceIndexer[*corev1.ResourceQuota] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/secret.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/secret.go index d386d4d5c..16a8da382 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/secret.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // SecretLister helps list Secrets. @@ -30,7 +30,7 @@ import ( type SecretLister interface { // List lists all Secrets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Secret, err error) + List(selector labels.Selector) (ret []*corev1.Secret, err error) // Secrets returns an object that can list and get Secrets. Secrets(namespace string) SecretNamespaceLister SecretListerExpansion @@ -38,25 +38,17 @@ type SecretLister interface { // secretLister implements the SecretLister interface. type secretLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Secret] } // NewSecretLister returns a new SecretLister. func NewSecretLister(indexer cache.Indexer) SecretLister { - return &secretLister{indexer: indexer} -} - -// List lists all Secrets in the indexer. -func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err + return &secretLister{listers.New[*corev1.Secret](indexer, corev1.Resource("secret"))} } // Secrets returns an object that can list and get Secrets. func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { - return secretNamespaceLister{indexer: s.indexer, namespace: namespace} + return secretNamespaceLister{listers.NewNamespaced[*corev1.Secret](s.ResourceIndexer, namespace)} } // SecretNamespaceLister helps list and get Secrets. @@ -64,36 +56,15 @@ func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { type SecretNamespaceLister interface { // List lists all Secrets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Secret, err error) + List(selector labels.Selector) (ret []*corev1.Secret, err error) // Get retrieves the Secret from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Secret, error) + Get(name string) (*corev1.Secret, error) SecretNamespaceListerExpansion } // secretNamespaceLister implements the SecretNamespaceLister // interface. type secretNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Secrets in the indexer for a given namespace. -func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Secret)) - }) - return ret, err -} - -// Get retrieves the Secret from the indexer for a given namespace and name. -func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("secret"), name) - } - return obj.(*v1.Secret), nil + listers.ResourceIndexer[*corev1.Secret] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/service.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/service.go index 51026d7b4..dcd894031 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/service.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceLister helps list Services. @@ -30,7 +30,7 @@ import ( type ServiceLister interface { // List lists all Services in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Service, err error) + List(selector labels.Selector) (ret []*corev1.Service, err error) // Services returns an object that can list and get Services. Services(namespace string) ServiceNamespaceLister ServiceListerExpansion @@ -38,25 +38,17 @@ type ServiceLister interface { // serviceLister implements the ServiceLister interface. type serviceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.Service] } // NewServiceLister returns a new ServiceLister. func NewServiceLister(indexer cache.Indexer) ServiceLister { - return &serviceLister{indexer: indexer} -} - -// List lists all Services in the indexer. -func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err + return &serviceLister{listers.New[*corev1.Service](indexer, corev1.Resource("service"))} } // Services returns an object that can list and get Services. func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { - return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceNamespaceLister{listers.NewNamespaced[*corev1.Service](s.ResourceIndexer, namespace)} } // ServiceNamespaceLister helps list and get Services. @@ -64,36 +56,15 @@ func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { type ServiceNamespaceLister interface { // List lists all Services in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Service, err error) + List(selector labels.Selector) (ret []*corev1.Service, err error) // Get retrieves the Service from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Service, error) + Get(name string) (*corev1.Service, error) ServiceNamespaceListerExpansion } // serviceNamespaceLister implements the ServiceNamespaceLister // interface. type serviceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Services in the indexer for a given namespace. -func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Service)) - }) - return ret, err -} - -// Get retrieves the Service from the indexer for a given namespace and name. -func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("service"), name) - } - return obj.(*v1.Service), nil + listers.ResourceIndexer[*corev1.Service] } diff --git a/constraint/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/constraint/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go index aa9554d8b..aaf888a82 100644 --- a/constraint/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go +++ b/constraint/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceAccountLister helps list ServiceAccounts. @@ -30,7 +30,7 @@ import ( type ServiceAccountLister interface { // List lists all ServiceAccounts in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + List(selector labels.Selector) (ret []*corev1.ServiceAccount, err error) // ServiceAccounts returns an object that can list and get ServiceAccounts. ServiceAccounts(namespace string) ServiceAccountNamespaceLister ServiceAccountListerExpansion @@ -38,25 +38,17 @@ type ServiceAccountLister interface { // serviceAccountLister implements the ServiceAccountLister interface. type serviceAccountLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*corev1.ServiceAccount] } // NewServiceAccountLister returns a new ServiceAccountLister. func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { - return &serviceAccountLister{indexer: indexer} -} - -// List lists all ServiceAccounts in the indexer. -func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err + return &serviceAccountLister{listers.New[*corev1.ServiceAccount](indexer, corev1.Resource("serviceaccount"))} } // ServiceAccounts returns an object that can list and get ServiceAccounts. func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { - return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} + return serviceAccountNamespaceLister{listers.NewNamespaced[*corev1.ServiceAccount](s.ResourceIndexer, namespace)} } // ServiceAccountNamespaceLister helps list and get ServiceAccounts. @@ -64,36 +56,15 @@ func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountN type ServiceAccountNamespaceLister interface { // List lists all ServiceAccounts in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + List(selector labels.Selector) (ret []*corev1.ServiceAccount, err error) // Get retrieves the ServiceAccount from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ServiceAccount, error) + Get(name string) (*corev1.ServiceAccount, error) ServiceAccountNamespaceListerExpansion } // serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister // interface. type serviceAccountNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ServiceAccounts in the indexer for a given namespace. -func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ServiceAccount)) - }) - return ret, err -} - -// Get retrieves the ServiceAccount from the indexer for a given namespace and name. -func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) - } - return obj.(*v1.ServiceAccount), nil + listers.ResourceIndexer[*corev1.ServiceAccount] } diff --git a/constraint/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go b/constraint/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go index 4dd46ff1b..0255ef8bb 100644 --- a/constraint/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/listers/discovery/v1/endpointslice.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/discovery/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + discoveryv1 "k8s.io/api/discovery/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointSliceLister helps list EndpointSlices. @@ -30,7 +30,7 @@ import ( type EndpointSliceLister interface { // List lists all EndpointSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) // EndpointSlices returns an object that can list and get EndpointSlices. EndpointSlices(namespace string) EndpointSliceNamespaceLister EndpointSliceListerExpansion @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*discoveryv1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*discoveryv1.EndpointSlice](indexer, discoveryv1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*discoveryv1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -64,36 +56,15 @@ func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceName type EndpointSliceNamespaceLister interface { // List lists all EndpointSlices in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1.EndpointSlice, err error) // Get retrieves the EndpointSlice from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.EndpointSlice, error) + Get(name string) (*discoveryv1.EndpointSlice, error) EndpointSliceNamespaceListerExpansion } // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("endpointslice"), name) - } - return obj.(*v1.EndpointSlice), nil + listers.ResourceIndexer[*discoveryv1.EndpointSlice] } diff --git a/constraint/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go b/constraint/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go index e92872d5f..2fb6f0549 100644 --- a/constraint/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go +++ b/constraint/vendor/k8s.io/client-go/listers/discovery/v1beta1/endpointslice.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/discovery/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + discoveryv1beta1 "k8s.io/api/discovery/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EndpointSliceLister helps list EndpointSlices. @@ -30,7 +30,7 @@ import ( type EndpointSliceLister interface { // List lists all EndpointSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1beta1.EndpointSlice, err error) // EndpointSlices returns an object that can list and get EndpointSlices. EndpointSlices(namespace string) EndpointSliceNamespaceLister EndpointSliceListerExpansion @@ -38,25 +38,17 @@ type EndpointSliceLister interface { // endpointSliceLister implements the EndpointSliceLister interface. type endpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*discoveryv1beta1.EndpointSlice] } // NewEndpointSliceLister returns a new EndpointSliceLister. func NewEndpointSliceLister(indexer cache.Indexer) EndpointSliceLister { - return &endpointSliceLister{indexer: indexer} -} - -// List lists all EndpointSlices in the indexer. -func (s *endpointSliceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err + return &endpointSliceLister{listers.New[*discoveryv1beta1.EndpointSlice](indexer, discoveryv1beta1.Resource("endpointslice"))} } // EndpointSlices returns an object that can list and get EndpointSlices. func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceNamespaceLister { - return endpointSliceNamespaceLister{indexer: s.indexer, namespace: namespace} + return endpointSliceNamespaceLister{listers.NewNamespaced[*discoveryv1beta1.EndpointSlice](s.ResourceIndexer, namespace)} } // EndpointSliceNamespaceLister helps list and get EndpointSlices. @@ -64,36 +56,15 @@ func (s *endpointSliceLister) EndpointSlices(namespace string) EndpointSliceName type EndpointSliceNamespaceLister interface { // List lists all EndpointSlices in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) + List(selector labels.Selector) (ret []*discoveryv1beta1.EndpointSlice, err error) // Get retrieves the EndpointSlice from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.EndpointSlice, error) + Get(name string) (*discoveryv1beta1.EndpointSlice, error) EndpointSliceNamespaceListerExpansion } // endpointSliceNamespaceLister implements the EndpointSliceNamespaceLister // interface. type endpointSliceNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all EndpointSlices in the indexer for a given namespace. -func (s endpointSliceNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EndpointSlice, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.EndpointSlice)) - }) - return ret, err -} - -// Get retrieves the EndpointSlice from the indexer for a given namespace and name. -func (s endpointSliceNamespaceLister) Get(name string) (*v1beta1.EndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("endpointslice"), name) - } - return obj.(*v1beta1.EndpointSlice), nil + listers.ResourceIndexer[*discoveryv1beta1.EndpointSlice] } diff --git a/constraint/vendor/k8s.io/client-go/listers/doc.go b/constraint/vendor/k8s.io/client-go/listers/doc.go new file mode 100644 index 000000000..96c330c93 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package listers provides generated listers for Kubernetes APIs. +package listers // import "k8s.io/client-go/listers" diff --git a/constraint/vendor/k8s.io/client-go/listers/events/v1/event.go b/constraint/vendor/k8s.io/client-go/listers/events/v1/event.go index 4abe841e2..9ea5bbaf8 100644 --- a/constraint/vendor/k8s.io/client-go/listers/events/v1/event.go +++ b/constraint/vendor/k8s.io/client-go/listers/events/v1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/events/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + eventsv1 "k8s.io/api/events/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*eventsv1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err + return &eventLister{listers.New[*eventsv1.Event](indexer, eventsv1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*eventsv1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -64,36 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Event, error) + Get(name string) (*eventsv1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("event"), name) - } - return obj.(*v1.Event), nil + listers.ResourceIndexer[*eventsv1.Event] } diff --git a/constraint/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/constraint/vendor/k8s.io/client-go/listers/events/v1beta1/event.go index 41a521be6..24fc96261 100644 --- a/constraint/vendor/k8s.io/client-go/listers/events/v1beta1/event.go +++ b/constraint/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/events/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + eventsv1beta1 "k8s.io/api/events/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EventLister helps list Events. @@ -30,7 +30,7 @@ import ( type EventLister interface { // List lists all Events in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1beta1.Event, err error) // Events returns an object that can list and get Events. Events(namespace string) EventNamespaceLister EventListerExpansion @@ -38,25 +38,17 @@ type EventLister interface { // eventLister implements the EventLister interface. type eventLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*eventsv1beta1.Event] } // NewEventLister returns a new EventLister. func NewEventLister(indexer cache.Indexer) EventLister { - return &eventLister{indexer: indexer} -} - -// List lists all Events in the indexer. -func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err + return &eventLister{listers.New[*eventsv1beta1.Event](indexer, eventsv1beta1.Resource("event"))} } // Events returns an object that can list and get Events. func (s *eventLister) Events(namespace string) EventNamespaceLister { - return eventNamespaceLister{indexer: s.indexer, namespace: namespace} + return eventNamespaceLister{listers.NewNamespaced[*eventsv1beta1.Event](s.ResourceIndexer, namespace)} } // EventNamespaceLister helps list and get Events. @@ -64,36 +56,15 @@ func (s *eventLister) Events(namespace string) EventNamespaceLister { type EventNamespaceLister interface { // List lists all Events in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Event, err error) + List(selector labels.Selector) (ret []*eventsv1beta1.Event, err error) // Get retrieves the Event from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Event, error) + Get(name string) (*eventsv1beta1.Event, error) EventNamespaceListerExpansion } // eventNamespaceLister implements the EventNamespaceLister // interface. type eventNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Events in the indexer for a given namespace. -func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Event)) - }) - return ret, err -} - -// Get retrieves the Event from the indexer for a given namespace and name. -func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("event"), name) - } - return obj.(*v1beta1.Event), nil + listers.ResourceIndexer[*eventsv1beta1.Event] } diff --git a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go index 900475410..c78576088 100644 --- a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go +++ b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DaemonSetLister helps list DaemonSets. @@ -30,7 +30,7 @@ import ( type DaemonSetLister interface { // List lists all DaemonSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.DaemonSet, err error) // DaemonSets returns an object that can list and get DaemonSets. DaemonSets(namespace string) DaemonSetNamespaceLister DaemonSetListerExpansion @@ -38,25 +38,17 @@ type DaemonSetLister interface { // daemonSetLister implements the DaemonSetLister interface. type daemonSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*extensionsv1beta1.DaemonSet] } // NewDaemonSetLister returns a new DaemonSetLister. func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { - return &daemonSetLister{indexer: indexer} -} - -// List lists all DaemonSets in the indexer. -func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err + return &daemonSetLister{listers.New[*extensionsv1beta1.DaemonSet](indexer, extensionsv1beta1.Resource("daemonset"))} } // DaemonSets returns an object that can list and get DaemonSets. func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { - return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return daemonSetNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.DaemonSet](s.ResourceIndexer, namespace)} } // DaemonSetNamespaceLister helps list and get DaemonSets. @@ -64,36 +56,15 @@ func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister type DaemonSetNamespaceLister interface { // List lists all DaemonSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.DaemonSet, err error) // Get retrieves the DaemonSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.DaemonSet, error) + Get(name string) (*extensionsv1beta1.DaemonSet, error) DaemonSetNamespaceListerExpansion } // daemonSetNamespaceLister implements the DaemonSetNamespaceLister // interface. type daemonSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all DaemonSets in the indexer for a given namespace. -func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.DaemonSet)) - }) - return ret, err -} - -// Get retrieves the DaemonSet from the indexer for a given namespace and name. -func (s daemonSetNamespaceLister) Get(name string) (*v1beta1.DaemonSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("daemonset"), name) - } - return obj.(*v1beta1.DaemonSet), nil + listers.ResourceIndexer[*extensionsv1beta1.DaemonSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go index 42b5a0723..efaea3991 100644 --- a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go +++ b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // DeploymentLister helps list Deployments. @@ -30,7 +30,7 @@ import ( type DeploymentLister interface { // List lists all Deployments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Deployment, err error) // Deployments returns an object that can list and get Deployments. Deployments(namespace string) DeploymentNamespaceLister DeploymentListerExpansion @@ -38,25 +38,17 @@ type DeploymentLister interface { // deploymentLister implements the DeploymentLister interface. type deploymentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*extensionsv1beta1.Deployment] } // NewDeploymentLister returns a new DeploymentLister. func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { - return &deploymentLister{indexer: indexer} -} - -// List lists all Deployments in the indexer. -func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err + return &deploymentLister{listers.New[*extensionsv1beta1.Deployment](indexer, extensionsv1beta1.Resource("deployment"))} } // Deployments returns an object that can list and get Deployments. func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { - return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} + return deploymentNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.Deployment](s.ResourceIndexer, namespace)} } // DeploymentNamespaceLister helps list and get Deployments. @@ -64,36 +56,15 @@ func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceList type DeploymentNamespaceLister interface { // List lists all Deployments in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Deployment, err error) // Get retrieves the Deployment from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Deployment, error) + Get(name string) (*extensionsv1beta1.Deployment, error) DeploymentNamespaceListerExpansion } // deploymentNamespaceLister implements the DeploymentNamespaceLister // interface. type deploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Deployments in the indexer for a given namespace. -func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Deployment)) - }) - return ret, err -} - -// Get retrieves the Deployment from the indexer for a given namespace and name. -func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) - } - return obj.(*v1beta1.Deployment), nil + listers.ResourceIndexer[*extensionsv1beta1.Deployment] } diff --git a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go index 1cb7677bd..929a13faa 100644 --- a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*extensionsv1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*extensionsv1beta1.Ingress](indexer, extensionsv1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -64,36 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Ingress, error) + Get(name string) (*extensionsv1beta1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*extensionsv1beta1.Ingress] } diff --git a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go index 84419a8e9..ee88fd974 100644 --- a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/networkpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NetworkPolicyLister helps list NetworkPolicies. @@ -30,7 +30,7 @@ import ( type NetworkPolicyLister interface { // List lists all NetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.NetworkPolicy, err error) // NetworkPolicies returns an object that can list and get NetworkPolicies. NetworkPolicies(namespace string) NetworkPolicyNamespaceLister NetworkPolicyListerExpansion @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*extensionsv1beta1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*extensionsv1beta1.NetworkPolicy](indexer, extensionsv1beta1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -64,36 +56,15 @@ func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNam type NetworkPolicyNamespaceLister interface { // List lists all NetworkPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.NetworkPolicy, err error) // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.NetworkPolicy, error) + Get(name string) (*extensionsv1beta1.NetworkPolicy, error) NetworkPolicyNamespaceListerExpansion } // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1beta1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("networkpolicy"), name) - } - return obj.(*v1beta1.NetworkPolicy), nil + listers.ResourceIndexer[*extensionsv1beta1.NetworkPolicy] } diff --git a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go index a5ec3229b..853cc2bcd 100644 --- a/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go +++ b/constraint/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ReplicaSetLister helps list ReplicaSets. @@ -30,7 +30,7 @@ import ( type ReplicaSetLister interface { // List lists all ReplicaSets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.ReplicaSet, err error) // ReplicaSets returns an object that can list and get ReplicaSets. ReplicaSets(namespace string) ReplicaSetNamespaceLister ReplicaSetListerExpansion @@ -38,25 +38,17 @@ type ReplicaSetLister interface { // replicaSetLister implements the ReplicaSetLister interface. type replicaSetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*extensionsv1beta1.ReplicaSet] } // NewReplicaSetLister returns a new ReplicaSetLister. func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { - return &replicaSetLister{indexer: indexer} -} - -// List lists all ReplicaSets in the indexer. -func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err + return &replicaSetLister{listers.New[*extensionsv1beta1.ReplicaSet](indexer, extensionsv1beta1.Resource("replicaset"))} } // ReplicaSets returns an object that can list and get ReplicaSets. func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { - return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} + return replicaSetNamespaceLister{listers.NewNamespaced[*extensionsv1beta1.ReplicaSet](s.ResourceIndexer, namespace)} } // ReplicaSetNamespaceLister helps list and get ReplicaSets. @@ -64,36 +56,15 @@ func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceList type ReplicaSetNamespaceLister interface { // List lists all ReplicaSets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + List(selector labels.Selector) (ret []*extensionsv1beta1.ReplicaSet, err error) // Get retrieves the ReplicaSet from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ReplicaSet, error) + Get(name string) (*extensionsv1beta1.ReplicaSet, error) ReplicaSetNamespaceListerExpansion } // replicaSetNamespaceLister implements the ReplicaSetNamespaceLister // interface. type replicaSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicaSets in the indexer for a given namespace. -func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ReplicaSet)) - }) - return ret, err -} - -// Get retrieves the ReplicaSet from the indexer for a given namespace and name. -func (s replicaSetNamespaceLister) Get(name string) (*v1beta1.ReplicaSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("replicaset"), name) - } - return obj.(*v1beta1.ReplicaSet), nil + listers.ResourceIndexer[*extensionsv1beta1.ReplicaSet] } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go index 43ccd4e5f..cccb3022b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,39 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.FlowSchema, error) + Get(name string) (*flowcontrolv1.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("flowschema"), name) - } - return obj.(*v1.FlowSchema), nil + return &flowSchemaLister{listers.New[*flowcontrolv1.FlowSchema](indexer, flowcontrolv1.Resource("flowschema"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go index 61189b9cf..cc8db4a46 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/flowcontrol/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1 "k8s.io/api/flowcontrol/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,39 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1.PriorityLevelConfiguration](indexer, flowcontrolv1.Resource("prioritylevelconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go index 7927a8411..b85907d03 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,39 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.FlowSchema, error) + Get(name string) (*flowcontrolv1beta1.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta1.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta1.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta1.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("flowschema"), name) - } - return obj.(*v1beta1.FlowSchema), nil + return &flowSchemaLister{listers.New[*flowcontrolv1beta1.FlowSchema](indexer, flowcontrolv1beta1.Resource("flowschema"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go index c94aaa4c1..338aef8e2 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta1/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/flowcontrol/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,39 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta1.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta1.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta1.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta1.PriorityLevelConfiguration](indexer, flowcontrolv1beta1.Resource("prioritylevelconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go index 2710f2630..5894a5a29 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,39 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta2.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.FlowSchema, error) + Get(name string) (*flowcontrolv1beta2.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta2.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta2.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta2.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("flowschema"), name) - } - return obj.(*v1beta2.FlowSchema), nil + return &flowSchemaLister{listers.New[*flowcontrolv1beta2.FlowSchema](indexer, flowcontrolv1beta2.Resource("flowschema"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go index 00ede0070..1236cb384 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta2/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta2 import ( - v1beta2 "k8s.io/api/flowcontrol/v1beta2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,39 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta2.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta2.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta2.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta2.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta2.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta2.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta2.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta2.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta2.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta2.PriorityLevelConfiguration](indexer, flowcontrolv1beta2.Resource("prioritylevelconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go index ef01b5a76..5f127d84b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/flowschema.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // FlowSchemaLister helps list FlowSchemas. @@ -30,39 +30,19 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta3.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta3.FlowSchema, error) + Get(name string) (*flowcontrolv1beta3.FlowSchema, error) FlowSchemaListerExpansion } // flowSchemaLister implements the FlowSchemaLister interface. type flowSchemaLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta3.FlowSchema] } // NewFlowSchemaLister returns a new FlowSchemaLister. func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { - return &flowSchemaLister{indexer: indexer} -} - -// List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1beta3.FlowSchema, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.FlowSchema)) - }) - return ret, err -} - -// Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1beta3.FlowSchema, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("flowschema"), name) - } - return obj.(*v1beta3.FlowSchema), nil + return &flowSchemaLister{listers.New[*flowcontrolv1beta3.FlowSchema](indexer, flowcontrolv1beta3.Resource("flowschema"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go index d05613949..d50fc8376 100644 --- a/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go +++ b/constraint/vendor/k8s.io/client-go/listers/flowcontrol/v1beta3/prioritylevelconfiguration.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta3 import ( - v1beta3 "k8s.io/api/flowcontrol/v1beta3" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityLevelConfigurationLister helps list PriorityLevelConfigurations. @@ -30,39 +30,19 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*flowcontrolv1beta3.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta3.PriorityLevelConfiguration, error) + Get(name string) (*flowcontrolv1beta3.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } // priorityLevelConfigurationLister implements the PriorityLevelConfigurationLister interface. type priorityLevelConfigurationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*flowcontrolv1beta3.PriorityLevelConfiguration] } // NewPriorityLevelConfigurationLister returns a new PriorityLevelConfigurationLister. func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelConfigurationLister { - return &priorityLevelConfigurationLister{indexer: indexer} -} - -// List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1beta3.PriorityLevelConfiguration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta3.PriorityLevelConfiguration)) - }) - return ret, err -} - -// Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1beta3.PriorityLevelConfiguration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta3.Resource("prioritylevelconfiguration"), name) - } - return obj.(*v1beta3.PriorityLevelConfiguration), nil + return &priorityLevelConfigurationLister{listers.New[*flowcontrolv1beta3.PriorityLevelConfiguration](indexer, flowcontrolv1beta3.Resource("prioritylevelconfiguration"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/generic_helpers.go b/constraint/vendor/k8s.io/client-go/listers/generic_helpers.go new file mode 100644 index 000000000..c69bb22b1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/generic_helpers.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package listers + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// ResourceIndexer wraps an indexer, resource, and optional namespace for a given type. +// This is intended for use by listers (generated by lister-gen) only. +type ResourceIndexer[T runtime.Object] struct { + indexer cache.Indexer + resource schema.GroupResource + namespace string // empty for non-namespaced types +} + +// New returns a new instance of a lister (resource indexer) wrapping the given indexer and resource for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func New[T runtime.Object](indexer cache.Indexer, resource schema.GroupResource) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: indexer, resource: resource} +} + +// NewNamespaced returns a new instance of a namespaced lister (resource indexer) wrapping the given parent and namespace for the specified type. +// This is intended for use by listers (generated by lister-gen) only. +func NewNamespaced[T runtime.Object](parent ResourceIndexer[T], namespace string) ResourceIndexer[T] { + return ResourceIndexer[T]{indexer: parent.indexer, resource: parent.resource, namespace: namespace} +} + +// List lists all resources in the indexer matching the given selector. +func (l ResourceIndexer[T]) List(selector labels.Selector) (ret []T, err error) { + // ListAllByNamespace reverts to ListAll on empty namespaces + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(T)) + }) + return ret, err +} + +// Get retrieves the resource from the index for a given name. +func (l ResourceIndexer[T]) Get(name string) (T, error) { + var key string + if l.namespace == "" { + key = name + } else { + key = l.namespace + "/" + name + } + obj, exists, err := l.indexer.GetByKey(key) + if err != nil { + return *new(T), err + } + if !exists { + return *new(T), errors.NewNotFound(l.resource, name) + } + return obj.(T), nil +} diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingress.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingress.go index 0f49d4f57..7d31b09c1 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*networkingv1.Ingress](indexer, networkingv1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*networkingv1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -64,36 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Ingress, error) + Get(name string) (*networkingv1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingress"), name) - } - return obj.(*v1.Ingress), nil + listers.ResourceIndexer[*networkingv1.Ingress] } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go index 1480cb13f..71d432164 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1/ingressclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressClassLister helps list IngressClasses. @@ -30,39 +30,19 @@ import ( type IngressClassLister interface { // List lists all IngressClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.IngressClass, err error) + List(selector labels.Selector) (ret []*networkingv1.IngressClass, err error) // Get retrieves the IngressClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.IngressClass, error) + Get(name string) (*networkingv1.IngressClass, error) IngressClassListerExpansion } // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("ingressclass"), name) - } - return obj.(*v1.IngressClass), nil + return &ingressClassLister{listers.New[*networkingv1.IngressClass](indexer, networkingv1.Resource("ingressclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go index 34cabf057..5a38a74a0 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1 "k8s.io/api/networking/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // NetworkPolicyLister helps list NetworkPolicies. @@ -30,7 +30,7 @@ import ( type NetworkPolicyLister interface { // List lists all NetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*networkingv1.NetworkPolicy, err error) // NetworkPolicies returns an object that can list and get NetworkPolicies. NetworkPolicies(namespace string) NetworkPolicyNamespaceLister NetworkPolicyListerExpansion @@ -38,25 +38,17 @@ type NetworkPolicyLister interface { // networkPolicyLister implements the NetworkPolicyLister interface. type networkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1.NetworkPolicy] } // NewNetworkPolicyLister returns a new NetworkPolicyLister. func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { - return &networkPolicyLister{indexer: indexer} -} - -// List lists all NetworkPolicies in the indexer. -func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err + return &networkPolicyLister{listers.New[*networkingv1.NetworkPolicy](indexer, networkingv1.Resource("networkpolicy"))} } // NetworkPolicies returns an object that can list and get NetworkPolicies. func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { - return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return networkPolicyNamespaceLister{listers.NewNamespaced[*networkingv1.NetworkPolicy](s.ResourceIndexer, namespace)} } // NetworkPolicyNamespaceLister helps list and get NetworkPolicies. @@ -64,36 +56,15 @@ func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNam type NetworkPolicyNamespaceLister interface { // List lists all NetworkPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + List(selector labels.Selector) (ret []*networkingv1.NetworkPolicy, err error) // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.NetworkPolicy, error) + Get(name string) (*networkingv1.NetworkPolicy, error) NetworkPolicyNamespaceListerExpansion } // networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister // interface. type networkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all NetworkPolicies in the indexer for a given namespace. -func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.NetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. -func (s networkPolicyNamespaceLister) Get(name string) (*v1.NetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("networkpolicy"), name) - } - return obj.(*v1.NetworkPolicy), nil + listers.ResourceIndexer[*networkingv1.NetworkPolicy] } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go index b3dfe2797..953265eca 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/ipaddress.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IPAddressLister helps list IPAddresses. @@ -30,39 +30,19 @@ import ( type IPAddressLister interface { // List lists all IPAddresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) + List(selector labels.Selector) (ret []*networkingv1alpha1.IPAddress, err error) // Get retrieves the IPAddress from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.IPAddress, error) + Get(name string) (*networkingv1alpha1.IPAddress, error) IPAddressListerExpansion } // iPAddressLister implements the IPAddressLister interface. type iPAddressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1alpha1.IPAddress] } // NewIPAddressLister returns a new IPAddressLister. func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { - return &iPAddressLister{indexer: indexer} -} - -// List lists all IPAddresses in the indexer. -func (s *iPAddressLister) List(selector labels.Selector) (ret []*v1alpha1.IPAddress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAddress)) - }) - return ret, err -} - -// Get retrieves the IPAddress from the index for a given name. -func (s *iPAddressLister) Get(name string) (*v1alpha1.IPAddress, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("ipaddress"), name) - } - return obj.(*v1alpha1.IPAddress), nil + return &iPAddressLister{listers.New[*networkingv1alpha1.IPAddress](indexer, networkingv1alpha1.Resource("ipaddress"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go index 8bc2b10e6..0c4cb2ebf 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ServiceCIDRLister helps list ServiceCIDRs. @@ -30,39 +30,19 @@ import ( type ServiceCIDRLister interface { // List lists all ServiceCIDRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) + List(selector labels.Selector) (ret []*networkingv1alpha1.ServiceCIDR, err error) // Get retrieves the ServiceCIDR from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ServiceCIDR, error) + Get(name string) (*networkingv1alpha1.ServiceCIDR, error) ServiceCIDRListerExpansion } // serviceCIDRLister implements the ServiceCIDRLister interface. type serviceCIDRLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1alpha1.ServiceCIDR] } // NewServiceCIDRLister returns a new ServiceCIDRLister. func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{indexer: indexer} -} - -// List lists all ServiceCIDRs in the indexer. -func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ServiceCIDR)) - }) - return ret, err -} - -// Get retrieves the ServiceCIDR from the index for a given name. -func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name) - } - return obj.(*v1alpha1.ServiceCIDR), nil + return &serviceCIDRLister{listers.New[*networkingv1alpha1.ServiceCIDR](indexer, networkingv1alpha1.Resource("servicecidr"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go index d8c99c186..320af736e 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/expansion_generated.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +// IPAddressListerExpansion allows custom methods to be added to +// IPAddressLister. +type IPAddressListerExpansion interface{} + // IngressListerExpansion allows custom methods to be added to // IngressLister. type IngressListerExpansion interface{} @@ -29,3 +33,7 @@ type IngressNamespaceListerExpansion interface{} // IngressClassListerExpansion allows custom methods to be added to // IngressClassLister. type IngressClassListerExpansion interface{} + +// ServiceCIDRListerExpansion allows custom methods to be added to +// ServiceCIDRLister. +type ServiceCIDRListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go index b8f4d3558..f8c172487 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingress.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressLister helps list Ingresses. @@ -30,7 +30,7 @@ import ( type IngressLister interface { // List lists all Ingresses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.Ingress, err error) // Ingresses returns an object that can list and get Ingresses. Ingresses(namespace string) IngressNamespaceLister IngressListerExpansion @@ -38,25 +38,17 @@ type IngressLister interface { // ingressLister implements the IngressLister interface. type ingressLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1beta1.Ingress] } // NewIngressLister returns a new IngressLister. func NewIngressLister(indexer cache.Indexer) IngressLister { - return &ingressLister{indexer: indexer} -} - -// List lists all Ingresses in the indexer. -func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err + return &ingressLister{listers.New[*networkingv1beta1.Ingress](indexer, networkingv1beta1.Resource("ingress"))} } // Ingresses returns an object that can list and get Ingresses. func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { - return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} + return ingressNamespaceLister{listers.NewNamespaced[*networkingv1beta1.Ingress](s.ResourceIndexer, namespace)} } // IngressNamespaceLister helps list and get Ingresses. @@ -64,36 +56,15 @@ func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { type IngressNamespaceLister interface { // List lists all Ingresses in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.Ingress, err error) // Get retrieves the Ingress from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Ingress, error) + Get(name string) (*networkingv1beta1.Ingress, error) IngressNamespaceListerExpansion } // ingressNamespaceLister implements the IngressNamespaceLister // interface. type ingressNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Ingresses in the indexer for a given namespace. -func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Ingress)) - }) - return ret, err -} - -// Get retrieves the Ingress from the indexer for a given namespace and name. -func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) - } - return obj.(*v1beta1.Ingress), nil + listers.ResourceIndexer[*networkingv1beta1.Ingress] } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go index ebcd6ba85..0e87e0397 100644 --- a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ingressclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/networking/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IngressClassLister helps list IngressClasses. @@ -30,39 +30,19 @@ import ( type IngressClassLister interface { // List lists all IngressClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error) + List(selector labels.Selector) (ret []*networkingv1beta1.IngressClass, err error) // Get retrieves the IngressClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.IngressClass, error) + Get(name string) (*networkingv1beta1.IngressClass, error) IngressClassListerExpansion } // ingressClassLister implements the IngressClassLister interface. type ingressClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*networkingv1beta1.IngressClass] } // NewIngressClassLister returns a new IngressClassLister. func NewIngressClassLister(indexer cache.Indexer) IngressClassLister { - return &ingressClassLister{indexer: indexer} -} - -// List lists all IngressClasses in the indexer. -func (s *ingressClassLister) List(selector labels.Selector) (ret []*v1beta1.IngressClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.IngressClass)) - }) - return ret, err -} - -// Get retrieves the IngressClass from the index for a given name. -func (s *ingressClassLister) Get(name string) (*v1beta1.IngressClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("ingressclass"), name) - } - return obj.(*v1beta1.IngressClass), nil + return &ingressClassLister{listers.New[*networkingv1beta1.IngressClass](indexer, networkingv1beta1.Resource("ingressclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go new file mode 100644 index 000000000..41a4bf927 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// IPAddressLister helps list IPAddresses. +// All objects returned here must be treated as read-only. +type IPAddressLister interface { + // List lists all IPAddresses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkingv1beta1.IPAddress, err error) + // Get retrieves the IPAddress from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkingv1beta1.IPAddress, error) + IPAddressListerExpansion +} + +// iPAddressLister implements the IPAddressLister interface. +type iPAddressLister struct { + listers.ResourceIndexer[*networkingv1beta1.IPAddress] +} + +// NewIPAddressLister returns a new IPAddressLister. +func NewIPAddressLister(indexer cache.Indexer) IPAddressLister { + return &iPAddressLister{listers.New[*networkingv1beta1.IPAddress](indexer, networkingv1beta1.Resource("ipaddress"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go new file mode 100644 index 000000000..5c9a0eac1 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + networkingv1beta1 "k8s.io/api/networking/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ServiceCIDRLister helps list ServiceCIDRs. +// All objects returned here must be treated as read-only. +type ServiceCIDRLister interface { + // List lists all ServiceCIDRs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*networkingv1beta1.ServiceCIDR, err error) + // Get retrieves the ServiceCIDR from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*networkingv1beta1.ServiceCIDR, error) + ServiceCIDRListerExpansion +} + +// serviceCIDRLister implements the ServiceCIDRLister interface. +type serviceCIDRLister struct { + listers.ResourceIndexer[*networkingv1beta1.ServiceCIDR] +} + +// NewServiceCIDRLister returns a new ServiceCIDRLister. +func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { + return &serviceCIDRLister{listers.New[*networkingv1beta1.ServiceCIDR](indexer, networkingv1beta1.Resource("servicecidr"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go index 6e00cf1a5..b8322dbb4 100644 --- a/constraint/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/node/v1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/node/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + nodev1 "k8s.io/api/node/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,39 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.RuntimeClass, error) + Get(name string) (*nodev1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*nodev1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("runtimeclass"), name) - } - return obj.(*v1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*nodev1.RuntimeClass](indexer, nodev1.Resource("runtimeclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go index 31f335799..b3d4ad46c 100644 --- a/constraint/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/node/v1alpha1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/node/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,39 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1alpha1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RuntimeClass, error) + Get(name string) (*nodev1alpha1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*nodev1alpha1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1alpha1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1alpha1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("runtimeclass"), name) - } - return obj.(*v1alpha1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*nodev1alpha1.RuntimeClass](indexer, nodev1alpha1.Resource("runtimeclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go b/constraint/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go index 7dbd6ab26..1b9f8d799 100644 --- a/constraint/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/node/v1beta1/runtimeclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + nodev1beta1 "k8s.io/api/node/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RuntimeClassLister helps list RuntimeClasses. @@ -30,39 +30,19 @@ import ( type RuntimeClassLister interface { // List lists all RuntimeClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) + List(selector labels.Selector) (ret []*nodev1beta1.RuntimeClass, err error) // Get retrieves the RuntimeClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.RuntimeClass, error) + Get(name string) (*nodev1beta1.RuntimeClass, error) RuntimeClassListerExpansion } // runtimeClassLister implements the RuntimeClassLister interface. type runtimeClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*nodev1beta1.RuntimeClass] } // NewRuntimeClassLister returns a new RuntimeClassLister. func NewRuntimeClassLister(indexer cache.Indexer) RuntimeClassLister { - return &runtimeClassLister{indexer: indexer} -} - -// List lists all RuntimeClasses in the indexer. -func (s *runtimeClassLister) List(selector labels.Selector) (ret []*v1beta1.RuntimeClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RuntimeClass)) - }) - return ret, err -} - -// Get retrieves the RuntimeClass from the index for a given name. -func (s *runtimeClassLister) Get(name string) (*v1beta1.RuntimeClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("runtimeclass"), name) - } - return obj.(*v1beta1.RuntimeClass), nil + return &runtimeClassLister{listers.New[*nodev1beta1.RuntimeClass](indexer, nodev1beta1.Resource("runtimeclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/policy/v1/eviction.go b/constraint/vendor/k8s.io/client-go/listers/policy/v1/eviction.go index dc5ffa074..8dccd7313 100644 --- a/constraint/vendor/k8s.io/client-go/listers/policy/v1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/listers/policy/v1/eviction.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + policyv1 "k8s.io/api/policy/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EvictionLister helps list Evictions. @@ -30,7 +30,7 @@ import ( type EvictionLister interface { // List lists all Evictions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1.Eviction, err error) // Evictions returns an object that can list and get Evictions. Evictions(namespace string) EvictionNamespaceLister EvictionListerExpansion @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*policyv1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*policyv1.Eviction](indexer, policyv1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*policyv1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -64,36 +56,15 @@ func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { type EvictionNamespaceLister interface { // List lists all Evictions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1.Eviction, err error) // Get retrieves the Eviction from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Eviction, error) + Get(name string) (*policyv1.Eviction, error) EvictionNamespaceListerExpansion } // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("eviction"), name) - } - return obj.(*v1.Eviction), nil + listers.ResourceIndexer[*policyv1.Eviction] } diff --git a/constraint/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go index 8470d38bb..1a6273b3e 100644 --- a/constraint/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/listers/policy/v1/poddisruptionbudget.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/policy/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + policyv1 "k8s.io/api/policy/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodDisruptionBudgetLister helps list PodDisruptionBudgets. @@ -30,7 +30,7 @@ import ( type PodDisruptionBudgetLister interface { // List lists all PodDisruptionBudgets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1.PodDisruptionBudget, err error) // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister PodDisruptionBudgetListerExpansion @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*policyv1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*policyv1.PodDisruptionBudget](indexer, policyv1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*policyv1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -64,36 +56,15 @@ func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDi type PodDisruptionBudgetNamespaceLister interface { // List lists all PodDisruptionBudgets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1.PodDisruptionBudget, err error) // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PodDisruptionBudget, error) + Get(name string) (*policyv1.PodDisruptionBudget, error) PodDisruptionBudgetNamespaceListerExpansion } // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1.PodDisruptionBudget), nil + listers.ResourceIndexer[*policyv1.PodDisruptionBudget] } diff --git a/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go index e1d40d0b3..318c380fa 100644 --- a/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go +++ b/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + policyv1beta1 "k8s.io/api/policy/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // EvictionLister helps list Evictions. @@ -30,7 +30,7 @@ import ( type EvictionLister interface { // List lists all Evictions in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1beta1.Eviction, err error) // Evictions returns an object that can list and get Evictions. Evictions(namespace string) EvictionNamespaceLister EvictionListerExpansion @@ -38,25 +38,17 @@ type EvictionLister interface { // evictionLister implements the EvictionLister interface. type evictionLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*policyv1beta1.Eviction] } // NewEvictionLister returns a new EvictionLister. func NewEvictionLister(indexer cache.Indexer) EvictionLister { - return &evictionLister{indexer: indexer} -} - -// List lists all Evictions in the indexer. -func (s *evictionLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err + return &evictionLister{listers.New[*policyv1beta1.Eviction](indexer, policyv1beta1.Resource("eviction"))} } // Evictions returns an object that can list and get Evictions. func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { - return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} + return evictionNamespaceLister{listers.NewNamespaced[*policyv1beta1.Eviction](s.ResourceIndexer, namespace)} } // EvictionNamespaceLister helps list and get Evictions. @@ -64,36 +56,15 @@ func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { type EvictionNamespaceLister interface { // List lists all Evictions in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + List(selector labels.Selector) (ret []*policyv1beta1.Eviction, err error) // Get retrieves the Eviction from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Eviction, error) + Get(name string) (*policyv1beta1.Eviction, error) EvictionNamespaceListerExpansion } // evictionNamespaceLister implements the EvictionNamespaceLister // interface. type evictionNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Evictions in the indexer for a given namespace. -func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Eviction)) - }) - return ret, err -} - -// Get retrieves the Eviction from the indexer for a given namespace and name. -func (s evictionNamespaceLister) Get(name string) (*v1beta1.Eviction, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("eviction"), name) - } - return obj.(*v1beta1.Eviction), nil + listers.ResourceIndexer[*policyv1beta1.Eviction] } diff --git a/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go index aa08f813e..fb156e97b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go +++ b/constraint/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/policy/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + policyv1beta1 "k8s.io/api/policy/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PodDisruptionBudgetLister helps list PodDisruptionBudgets. @@ -30,7 +30,7 @@ import ( type PodDisruptionBudgetLister interface { // List lists all PodDisruptionBudgets in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1beta1.PodDisruptionBudget, err error) // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister PodDisruptionBudgetListerExpansion @@ -38,25 +38,17 @@ type PodDisruptionBudgetLister interface { // podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. type podDisruptionBudgetLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*policyv1beta1.PodDisruptionBudget] } // NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { - return &podDisruptionBudgetLister{indexer: indexer} -} - -// List lists all PodDisruptionBudgets in the indexer. -func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err + return &podDisruptionBudgetLister{listers.New[*policyv1beta1.PodDisruptionBudget](indexer, policyv1beta1.Resource("poddisruptionbudget"))} } // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { - return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} + return podDisruptionBudgetNamespaceLister{listers.NewNamespaced[*policyv1beta1.PodDisruptionBudget](s.ResourceIndexer, namespace)} } // PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. @@ -64,36 +56,15 @@ func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDi type PodDisruptionBudgetNamespaceLister interface { // List lists all PodDisruptionBudgets in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + List(selector labels.Selector) (ret []*policyv1beta1.PodDisruptionBudget, err error) // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PodDisruptionBudget, error) + Get(name string) (*policyv1beta1.PodDisruptionBudget, error) PodDisruptionBudgetNamespaceListerExpansion } // podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister // interface. type podDisruptionBudgetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodDisruptionBudgets in the indexer for a given namespace. -func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) - }) - return ret, err -} - -// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. -func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1beta1.PodDisruptionBudget, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("poddisruptionbudget"), name) - } - return obj.(*v1beta1.PodDisruptionBudget), nil + listers.ResourceIndexer[*policyv1beta1.PodDisruptionBudget] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go index 84dc003ca..456393aee 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,39 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterRole, error) + Get(name string) (*rbacv1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrole"), name) - } - return obj.(*v1.ClusterRole), nil + return &clusterRoleLister{listers.New[*rbacv1.ClusterRole](indexer, rbacv1.Resource("clusterrole"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go index ff061d4b2..bf84144a7 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,39 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.ClusterRoleBinding, error) + Get(name string) (*rbacv1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("clusterrolebinding"), name) - } - return obj.(*v1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*rbacv1.ClusterRoleBinding](indexer, rbacv1.Resource("clusterrolebinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/role.go index 503f013b5..d0077e3ce 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/role.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err + return &roleLister{listers.New[*rbacv1.Role](indexer, rbacv1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -64,36 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.Role, error) + Get(name string) (*rbacv1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("role"), name) - } - return obj.(*v1.Role), nil + listers.ResourceIndexer[*rbacv1.Role] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go index ea50c6413..a0e366156 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1 "k8s.io/api/rbac/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*rbacv1.RoleBinding](indexer, rbacv1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -64,36 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.RoleBinding, error) + Get(name string) (*rbacv1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("rolebinding"), name) - } - return obj.(*v1.RoleBinding), nil + listers.ResourceIndexer[*rbacv1.RoleBinding] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go index 181ea95a7..1783b7161 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,39 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterRole, error) + Get(name string) (*rbacv1alpha1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1alpha1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrole"), name) - } - return obj.(*v1alpha1.ClusterRole), nil + return &clusterRoleLister{listers.New[*rbacv1alpha1.ClusterRole](indexer, rbacv1alpha1.Resource("clusterrole"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go index 29d283b6c..be80c7585 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,39 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterRoleBinding, error) + Get(name string) (*rbacv1alpha1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1alpha1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clusterrolebinding"), name) - } - return obj.(*v1alpha1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*rbacv1alpha1.ClusterRoleBinding](indexer, rbacv1alpha1.Resource("clusterrolebinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go index 13a64137a..28a1ede19 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1alpha1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err + return &roleLister{listers.New[*rbacv1alpha1.Role](indexer, rbacv1alpha1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1alpha1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -64,36 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Role, error) + Get(name string) (*rbacv1alpha1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1alpha1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("role"), name) - } - return obj.(*v1alpha1.Role), nil + listers.ResourceIndexer[*rbacv1alpha1.Role] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go index 0ad3d0eba..67e123f63 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1alpha1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*rbacv1alpha1.RoleBinding](indexer, rbacv1alpha1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1alpha1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -64,36 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1alpha1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RoleBinding, error) + Get(name string) (*rbacv1alpha1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1alpha1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("rolebinding"), name) - } - return obj.(*v1alpha1.RoleBinding), nil + listers.ResourceIndexer[*rbacv1alpha1.RoleBinding] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go index bf6cd99cb..9cf996b86 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleLister helps list ClusterRoles. @@ -30,39 +30,19 @@ import ( type ClusterRoleLister interface { // List lists all ClusterRoles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.ClusterRole, err error) // Get retrieves the ClusterRole from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ClusterRole, error) + Get(name string) (*rbacv1beta1.ClusterRole, error) ClusterRoleListerExpansion } // clusterRoleLister implements the ClusterRoleLister interface. type clusterRoleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1beta1.ClusterRole] } // NewClusterRoleLister returns a new ClusterRoleLister. func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { - return &clusterRoleLister{indexer: indexer} -} - -// List lists all ClusterRoles in the indexer. -func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRole)) - }) - return ret, err -} - -// Get retrieves the ClusterRole from the index for a given name. -func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrole"), name) - } - return obj.(*v1beta1.ClusterRole), nil + return &clusterRoleLister{listers.New[*rbacv1beta1.ClusterRole](indexer, rbacv1beta1.Resource("clusterrole"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go index 00bab2330..41418f762 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // ClusterRoleBindingLister helps list ClusterRoleBindings. @@ -30,39 +30,19 @@ import ( type ClusterRoleBindingLister interface { // List lists all ClusterRoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.ClusterRoleBinding, err error) // Get retrieves the ClusterRoleBinding from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.ClusterRoleBinding, error) + Get(name string) (*rbacv1beta1.ClusterRoleBinding, error) ClusterRoleBindingListerExpansion } // clusterRoleBindingLister implements the ClusterRoleBindingLister interface. type clusterRoleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1beta1.ClusterRoleBinding] } // NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { - return &clusterRoleBindingLister{indexer: indexer} -} - -// List lists all ClusterRoleBindings in the indexer. -func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.ClusterRoleBinding)) - }) - return ret, err -} - -// Get retrieves the ClusterRoleBinding from the index for a given name. -func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("clusterrolebinding"), name) - } - return obj.(*v1beta1.ClusterRoleBinding), nil + return &clusterRoleBindingLister{listers.New[*rbacv1beta1.ClusterRoleBinding](indexer, rbacv1beta1.Resource("clusterrolebinding"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go index 9cd9b9042..04d4ab772 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleLister helps list Roles. @@ -30,7 +30,7 @@ import ( type RoleLister interface { // List lists all Roles in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.Role, err error) // Roles returns an object that can list and get Roles. Roles(namespace string) RoleNamespaceLister RoleListerExpansion @@ -38,25 +38,17 @@ type RoleLister interface { // roleLister implements the RoleLister interface. type roleLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1beta1.Role] } // NewRoleLister returns a new RoleLister. func NewRoleLister(indexer cache.Indexer) RoleLister { - return &roleLister{indexer: indexer} -} - -// List lists all Roles in the indexer. -func (s *roleLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err + return &roleLister{listers.New[*rbacv1beta1.Role](indexer, rbacv1beta1.Resource("role"))} } // Roles returns an object that can list and get Roles. func (s *roleLister) Roles(namespace string) RoleNamespaceLister { - return roleNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleNamespaceLister{listers.NewNamespaced[*rbacv1beta1.Role](s.ResourceIndexer, namespace)} } // RoleNamespaceLister helps list and get Roles. @@ -64,36 +56,15 @@ func (s *roleLister) Roles(namespace string) RoleNamespaceLister { type RoleNamespaceLister interface { // List lists all Roles in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.Role, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.Role, err error) // Get retrieves the Role from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.Role, error) + Get(name string) (*rbacv1beta1.Role, error) RoleNamespaceListerExpansion } // roleNamespaceLister implements the RoleNamespaceLister // interface. type roleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Roles in the indexer for a given namespace. -func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.Role)) - }) - return ret, err -} - -// Get retrieves the Role from the indexer for a given namespace and name. -func (s roleNamespaceLister) Get(name string) (*v1beta1.Role, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("role"), name) - } - return obj.(*v1beta1.Role), nil + listers.ResourceIndexer[*rbacv1beta1.Role] } diff --git a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go index 7c7c91bf3..816673229 100644 --- a/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go +++ b/constraint/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // RoleBindingLister helps list RoleBindings. @@ -30,7 +30,7 @@ import ( type RoleBindingLister interface { // List lists all RoleBindings in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.RoleBinding, err error) // RoleBindings returns an object that can list and get RoleBindings. RoleBindings(namespace string) RoleBindingNamespaceLister RoleBindingListerExpansion @@ -38,25 +38,17 @@ type RoleBindingLister interface { // roleBindingLister implements the RoleBindingLister interface. type roleBindingLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*rbacv1beta1.RoleBinding] } // NewRoleBindingLister returns a new RoleBindingLister. func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { - return &roleBindingLister{indexer: indexer} -} - -// List lists all RoleBindings in the indexer. -func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err + return &roleBindingLister{listers.New[*rbacv1beta1.RoleBinding](indexer, rbacv1beta1.Resource("rolebinding"))} } // RoleBindings returns an object that can list and get RoleBindings. func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { - return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} + return roleBindingNamespaceLister{listers.NewNamespaced[*rbacv1beta1.RoleBinding](s.ResourceIndexer, namespace)} } // RoleBindingNamespaceLister helps list and get RoleBindings. @@ -64,36 +56,15 @@ func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceL type RoleBindingNamespaceLister interface { // List lists all RoleBindings in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + List(selector labels.Selector) (ret []*rbacv1beta1.RoleBinding, err error) // Get retrieves the RoleBinding from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.RoleBinding, error) + Get(name string) (*rbacv1beta1.RoleBinding, error) RoleBindingNamespaceListerExpansion } // roleBindingNamespaceLister implements the RoleBindingNamespaceLister // interface. type roleBindingNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RoleBindings in the indexer for a given namespace. -func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.RoleBinding)) - }) - return ret, err -} - -// Get retrieves the RoleBinding from the indexer for a given namespace and name. -func (s roleBindingNamespaceLister) Get(name string) (*v1beta1.RoleBinding, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("rolebinding"), name) - } - return obj.(*v1beta1.RoleBinding), nil + listers.ResourceIndexer[*rbacv1beta1.RoleBinding] } diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go deleted file mode 100644 index 68861832d..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/expansion_generated.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -// PodSchedulingContextListerExpansion allows custom methods to be added to -// PodSchedulingContextLister. -type PodSchedulingContextListerExpansion interface{} - -// PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to -// PodSchedulingContextNamespaceLister. -type PodSchedulingContextNamespaceListerExpansion interface{} - -// ResourceClaimListerExpansion allows custom methods to be added to -// ResourceClaimLister. -type ResourceClaimListerExpansion interface{} - -// ResourceClaimNamespaceListerExpansion allows custom methods to be added to -// ResourceClaimNamespaceLister. -type ResourceClaimNamespaceListerExpansion interface{} - -// ResourceClaimParametersListerExpansion allows custom methods to be added to -// ResourceClaimParametersLister. -type ResourceClaimParametersListerExpansion interface{} - -// ResourceClaimParametersNamespaceListerExpansion allows custom methods to be added to -// ResourceClaimParametersNamespaceLister. -type ResourceClaimParametersNamespaceListerExpansion interface{} - -// ResourceClaimTemplateListerExpansion allows custom methods to be added to -// ResourceClaimTemplateLister. -type ResourceClaimTemplateListerExpansion interface{} - -// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to -// ResourceClaimTemplateNamespaceLister. -type ResourceClaimTemplateNamespaceListerExpansion interface{} - -// ResourceClassListerExpansion allows custom methods to be added to -// ResourceClassLister. -type ResourceClassListerExpansion interface{} - -// ResourceClassParametersListerExpansion allows custom methods to be added to -// ResourceClassParametersLister. -type ResourceClassParametersListerExpansion interface{} - -// ResourceClassParametersNamespaceListerExpansion allows custom methods to be added to -// ResourceClassParametersNamespaceLister. -type ResourceClassParametersNamespaceListerExpansion interface{} - -// ResourceSliceListerExpansion allows custom methods to be added to -// ResourceSliceLister. -type ResourceSliceListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go deleted file mode 100644 index c50b3f889..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PodSchedulingContextLister helps list PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextLister interface { - // List lists all PodSchedulingContexts in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) - // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. - PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister - PodSchedulingContextListerExpansion -} - -// podSchedulingContextLister implements the PodSchedulingContextLister interface. -type podSchedulingContextLister struct { - indexer cache.Indexer -} - -// NewPodSchedulingContextLister returns a new PodSchedulingContextLister. -func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { - return &podSchedulingContextLister{indexer: indexer} -} - -// List lists all PodSchedulingContexts in the indexer. -func (s *podSchedulingContextLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err -} - -// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. -func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { - return podSchedulingContextNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextNamespaceLister interface { - // List lists all PodSchedulingContexts in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) - // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.PodSchedulingContext, error) - PodSchedulingContextNamespaceListerExpansion -} - -// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister -// interface. -type podSchedulingContextNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PodSchedulingContexts in the indexer for a given namespace. -func (s podSchedulingContextNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.PodSchedulingContext, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.PodSchedulingContext)) - }) - return ret, err -} - -// Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. -func (s podSchedulingContextNamespaceLister) Get(name string) (*v1alpha2.PodSchedulingContext, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("podschedulingcontext"), name) - } - return obj.(*v1alpha2.PodSchedulingContext), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go deleted file mode 100644 index 273f16af3..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaim.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClaimLister helps list ResourceClaims. -// All objects returned here must be treated as read-only. -type ResourceClaimLister interface { - // List lists all ResourceClaims in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) - // ResourceClaims returns an object that can list and get ResourceClaims. - ResourceClaims(namespace string) ResourceClaimNamespaceLister - ResourceClaimListerExpansion -} - -// resourceClaimLister implements the ResourceClaimLister interface. -type resourceClaimLister struct { - indexer cache.Indexer -} - -// NewResourceClaimLister returns a new ResourceClaimLister. -func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { - return &resourceClaimLister{indexer: indexer} -} - -// List lists all ResourceClaims in the indexer. -func (s *resourceClaimLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err -} - -// ResourceClaims returns an object that can list and get ResourceClaims. -func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { - return resourceClaimNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClaimNamespaceLister helps list and get ResourceClaims. -// All objects returned here must be treated as read-only. -type ResourceClaimNamespaceLister interface { - // List lists all ResourceClaims in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) - // Get retrieves the ResourceClaim from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaim, error) - ResourceClaimNamespaceListerExpansion -} - -// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister -// interface. -type resourceClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaims in the indexer for a given namespace. -func (s resourceClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaim)) - }) - return ret, err -} - -// Get retrieves the ResourceClaim from the indexer for a given namespace and name. -func (s resourceClaimNamespaceLister) Get(name string) (*v1alpha2.ResourceClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaim"), name) - } - return obj.(*v1alpha2.ResourceClaim), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go deleted file mode 100644 index 1a561ef7a..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimparameters.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClaimParametersLister helps list ResourceClaimParameters. -// All objects returned here must be treated as read-only. -type ResourceClaimParametersLister interface { - // List lists all ResourceClaimParameters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) - // ResourceClaimParameters returns an object that can list and get ResourceClaimParameters. - ResourceClaimParameters(namespace string) ResourceClaimParametersNamespaceLister - ResourceClaimParametersListerExpansion -} - -// resourceClaimParametersLister implements the ResourceClaimParametersLister interface. -type resourceClaimParametersLister struct { - indexer cache.Indexer -} - -// NewResourceClaimParametersLister returns a new ResourceClaimParametersLister. -func NewResourceClaimParametersLister(indexer cache.Indexer) ResourceClaimParametersLister { - return &resourceClaimParametersLister{indexer: indexer} -} - -// List lists all ResourceClaimParameters in the indexer. -func (s *resourceClaimParametersLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimParameters)) - }) - return ret, err -} - -// ResourceClaimParameters returns an object that can list and get ResourceClaimParameters. -func (s *resourceClaimParametersLister) ResourceClaimParameters(namespace string) ResourceClaimParametersNamespaceLister { - return resourceClaimParametersNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClaimParametersNamespaceLister helps list and get ResourceClaimParameters. -// All objects returned here must be treated as read-only. -type ResourceClaimParametersNamespaceLister interface { - // List lists all ResourceClaimParameters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) - // Get retrieves the ResourceClaimParameters from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaimParameters, error) - ResourceClaimParametersNamespaceListerExpansion -} - -// resourceClaimParametersNamespaceLister implements the ResourceClaimParametersNamespaceLister -// interface. -type resourceClaimParametersNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaimParameters in the indexer for a given namespace. -func (s resourceClaimParametersNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimParameters, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimParameters)) - }) - return ret, err -} - -// Get retrieves the ResourceClaimParameters from the indexer for a given namespace and name. -func (s resourceClaimParametersNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimParameters, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimparameters"), name) - } - return obj.(*v1alpha2.ResourceClaimParameters), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go deleted file mode 100644 index 91a488b17..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclaimtemplate.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClaimTemplateLister helps list ResourceClaimTemplates. -// All objects returned here must be treated as read-only. -type ResourceClaimTemplateLister interface { - // List lists all ResourceClaimTemplates in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) - // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. - ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister - ResourceClaimTemplateListerExpansion -} - -// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. -type resourceClaimTemplateLister struct { - indexer cache.Indexer -} - -// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. -func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { - return &resourceClaimTemplateLister{indexer: indexer} -} - -// List lists all ResourceClaimTemplates in the indexer. -func (s *resourceClaimTemplateLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err -} - -// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. -func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { - return resourceClaimTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. -// All objects returned here must be treated as read-only. -type ResourceClaimTemplateNamespaceLister interface { - // List lists all ResourceClaimTemplates in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) - // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClaimTemplate, error) - ResourceClaimTemplateNamespaceListerExpansion -} - -// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister -// interface. -type resourceClaimTemplateNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClaimTemplates in the indexer for a given namespace. -func (s resourceClaimTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClaimTemplate, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClaimTemplate)) - }) - return ret, err -} - -// Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. -func (s resourceClaimTemplateNamespaceLister) Get(name string) (*v1alpha2.ResourceClaimTemplate, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclaimtemplate"), name) - } - return obj.(*v1alpha2.ResourceClaimTemplate), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go deleted file mode 100644 index eeb2fc337..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClassLister helps list ResourceClasses. -// All objects returned here must be treated as read-only. -type ResourceClassLister interface { - // List lists all ResourceClasses in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) - // Get retrieves the ResourceClass from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClass, error) - ResourceClassListerExpansion -} - -// resourceClassLister implements the ResourceClassLister interface. -type resourceClassLister struct { - indexer cache.Indexer -} - -// NewResourceClassLister returns a new ResourceClassLister. -func NewResourceClassLister(indexer cache.Indexer) ResourceClassLister { - return &resourceClassLister{indexer: indexer} -} - -// List lists all ResourceClasses in the indexer. -func (s *resourceClassLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClass)) - }) - return ret, err -} - -// Get retrieves the ResourceClass from the index for a given name. -func (s *resourceClassLister) Get(name string) (*v1alpha2.ResourceClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclass"), name) - } - return obj.(*v1alpha2.ResourceClass), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go deleted file mode 100644 index 26fb95e6d..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclassparameters.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceClassParametersLister helps list ResourceClassParameters. -// All objects returned here must be treated as read-only. -type ResourceClassParametersLister interface { - // List lists all ResourceClassParameters in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) - // ResourceClassParameters returns an object that can list and get ResourceClassParameters. - ResourceClassParameters(namespace string) ResourceClassParametersNamespaceLister - ResourceClassParametersListerExpansion -} - -// resourceClassParametersLister implements the ResourceClassParametersLister interface. -type resourceClassParametersLister struct { - indexer cache.Indexer -} - -// NewResourceClassParametersLister returns a new ResourceClassParametersLister. -func NewResourceClassParametersLister(indexer cache.Indexer) ResourceClassParametersLister { - return &resourceClassParametersLister{indexer: indexer} -} - -// List lists all ResourceClassParameters in the indexer. -func (s *resourceClassParametersLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClassParameters)) - }) - return ret, err -} - -// ResourceClassParameters returns an object that can list and get ResourceClassParameters. -func (s *resourceClassParametersLister) ResourceClassParameters(namespace string) ResourceClassParametersNamespaceLister { - return resourceClassParametersNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResourceClassParametersNamespaceLister helps list and get ResourceClassParameters. -// All objects returned here must be treated as read-only. -type ResourceClassParametersNamespaceLister interface { - // List lists all ResourceClassParameters in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) - // Get retrieves the ResourceClassParameters from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceClassParameters, error) - ResourceClassParametersNamespaceListerExpansion -} - -// resourceClassParametersNamespaceLister implements the ResourceClassParametersNamespaceLister -// interface. -type resourceClassParametersNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResourceClassParameters in the indexer for a given namespace. -func (s resourceClassParametersNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceClassParameters, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceClassParameters)) - }) - return ret, err -} - -// Get retrieves the ResourceClassParameters from the indexer for a given namespace and name. -func (s resourceClassParametersNamespaceLister) Get(name string) (*v1alpha2.ResourceClassParameters, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceclassparameters"), name) - } - return obj.(*v1alpha2.ResourceClassParameters), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go deleted file mode 100644 index 4301cea2e..000000000 --- a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceslice.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - v1alpha2 "k8s.io/api/resource/v1alpha2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResourceSliceLister helps list ResourceSlices. -// All objects returned here must be treated as read-only. -type ResourceSliceLister interface { - // List lists all ResourceSlices in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error) - // Get retrieves the ResourceSlice from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha2.ResourceSlice, error) - ResourceSliceListerExpansion -} - -// resourceSliceLister implements the ResourceSliceLister interface. -type resourceSliceLister struct { - indexer cache.Indexer -} - -// NewResourceSliceLister returns a new ResourceSliceLister. -func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { - return &resourceSliceLister{indexer: indexer} -} - -// List lists all ResourceSlices in the indexer. -func (s *resourceSliceLister) List(selector labels.Selector) (ret []*v1alpha2.ResourceSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha2.ResourceSlice)) - }) - return ret, err -} - -// Get retrieves the ResourceSlice from the index for a given name. -func (s *resourceSliceLister) Get(name string) (*v1alpha2.ResourceSlice, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha2.Resource("resourceslice"), name) - } - return obj.(*v1alpha2.ResourceSlice), nil -} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go new file mode 100644 index 000000000..05032833b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassLister helps list DeviceClasses. +// All objects returned here must be treated as read-only. +type DeviceClassLister interface { + // List lists all DeviceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.DeviceClass, err error) + // Get retrieves the DeviceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1alpha3.DeviceClass, error) + DeviceClassListerExpansion +} + +// deviceClassLister implements the DeviceClassLister interface. +type deviceClassLister struct { + listers.ResourceIndexer[*resourcev1alpha3.DeviceClass] +} + +// NewDeviceClassLister returns a new DeviceClassLister. +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { + return &deviceClassLister{listers.New[*resourcev1alpha3.DeviceClass](indexer, resourcev1alpha3.Resource("deviceclass"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go new file mode 100644 index 000000000..f626c9283 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +// DeviceClassListerExpansion allows custom methods to be added to +// DeviceClassLister. +type DeviceClassListerExpansion interface{} + +// ResourceClaimListerExpansion allows custom methods to be added to +// ResourceClaimLister. +type ResourceClaimListerExpansion interface{} + +// ResourceClaimNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimNamespaceLister. +type ResourceClaimNamespaceListerExpansion interface{} + +// ResourceClaimTemplateListerExpansion allows custom methods to be added to +// ResourceClaimTemplateLister. +type ResourceClaimTemplateListerExpansion interface{} + +// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimTemplateNamespaceLister. +type ResourceClaimTemplateNamespaceListerExpansion interface{} + +// ResourceSliceListerExpansion allows custom methods to be added to +// ResourceSliceLister. +type ResourceSliceListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go new file mode 100644 index 000000000..9de229bff --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaim.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimLister helps list ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimLister interface { + // List lists all ResourceClaims in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaim, err error) + // ResourceClaims returns an object that can list and get ResourceClaims. + ResourceClaims(namespace string) ResourceClaimNamespaceLister + ResourceClaimListerExpansion +} + +// resourceClaimLister implements the ResourceClaimLister interface. +type resourceClaimLister struct { + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaim] +} + +// NewResourceClaimLister returns a new ResourceClaimLister. +func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { + return &resourceClaimLister{listers.New[*resourcev1alpha3.ResourceClaim](indexer, resourcev1alpha3.Resource("resourceclaim"))} +} + +// ResourceClaims returns an object that can list and get ResourceClaims. +func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { + return resourceClaimNamespaceLister{listers.NewNamespaced[*resourcev1alpha3.ResourceClaim](s.ResourceIndexer, namespace)} +} + +// ResourceClaimNamespaceLister helps list and get ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimNamespaceLister interface { + // List lists all ResourceClaims in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaim, err error) + // Get retrieves the ResourceClaim from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1alpha3.ResourceClaim, error) + ResourceClaimNamespaceListerExpansion +} + +// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister +// interface. +type resourceClaimNamespaceLister struct { + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaim] +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go new file mode 100644 index 000000000..b0895edd2 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateLister helps list ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateLister interface { + // List lists all ResourceClaimTemplates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaimTemplate, err error) + // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. + ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister + ResourceClaimTemplateListerExpansion +} + +// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. +type resourceClaimTemplateLister struct { + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaimTemplate] +} + +// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. +func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { + return &resourceClaimTemplateLister{listers.New[*resourcev1alpha3.ResourceClaimTemplate](indexer, resourcev1alpha3.Resource("resourceclaimtemplate"))} +} + +// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. +func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*resourcev1alpha3.ResourceClaimTemplate](s.ResourceIndexer, namespace)} +} + +// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateNamespaceLister interface { + // List lists all ResourceClaimTemplates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceClaimTemplate, err error) + // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1alpha3.ResourceClaimTemplate, error) + ResourceClaimTemplateNamespaceListerExpansion +} + +// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister +// interface. +type resourceClaimTemplateNamespaceLister struct { + listers.ResourceIndexer[*resourcev1alpha3.ResourceClaimTemplate] +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go new file mode 100644 index 000000000..66b1b9e06 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + resourcev1alpha3 "k8s.io/api/resource/v1alpha3" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceLister helps list ResourceSlices. +// All objects returned here must be treated as read-only. +type ResourceSliceLister interface { + // List lists all ResourceSlices in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1alpha3.ResourceSlice, err error) + // Get retrieves the ResourceSlice from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1alpha3.ResourceSlice, error) + ResourceSliceListerExpansion +} + +// resourceSliceLister implements the ResourceSliceLister interface. +type resourceSliceLister struct { + listers.ResourceIndexer[*resourcev1alpha3.ResourceSlice] +} + +// NewResourceSliceLister returns a new ResourceSliceLister. +func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { + return &resourceSliceLister{listers.New[*resourcev1alpha3.ResourceSlice](indexer, resourcev1alpha3.Resource("resourceslice"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go new file mode 100644 index 000000000..a386fb269 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// DeviceClassLister helps list DeviceClasses. +// All objects returned here must be treated as read-only. +type DeviceClassLister interface { + // List lists all DeviceClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.DeviceClass, err error) + // Get retrieves the DeviceClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.DeviceClass, error) + DeviceClassListerExpansion +} + +// deviceClassLister implements the DeviceClassLister interface. +type deviceClassLister struct { + listers.ResourceIndexer[*resourcev1beta1.DeviceClass] +} + +// NewDeviceClassLister returns a new DeviceClassLister. +func NewDeviceClassLister(indexer cache.Indexer) DeviceClassLister { + return &deviceClassLister{listers.New[*resourcev1beta1.DeviceClass](indexer, resourcev1beta1.Resource("deviceclass"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go new file mode 100644 index 000000000..c50a006d8 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// DeviceClassListerExpansion allows custom methods to be added to +// DeviceClassLister. +type DeviceClassListerExpansion interface{} + +// ResourceClaimListerExpansion allows custom methods to be added to +// ResourceClaimLister. +type ResourceClaimListerExpansion interface{} + +// ResourceClaimNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimNamespaceLister. +type ResourceClaimNamespaceListerExpansion interface{} + +// ResourceClaimTemplateListerExpansion allows custom methods to be added to +// ResourceClaimTemplateLister. +type ResourceClaimTemplateListerExpansion interface{} + +// ResourceClaimTemplateNamespaceListerExpansion allows custom methods to be added to +// ResourceClaimTemplateNamespaceLister. +type ResourceClaimTemplateNamespaceListerExpansion interface{} + +// ResourceSliceListerExpansion allows custom methods to be added to +// ResourceSliceLister. +type ResourceSliceListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go new file mode 100644 index 000000000..434227394 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimLister helps list ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimLister interface { + // List lists all ResourceClaims in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaim, err error) + // ResourceClaims returns an object that can list and get ResourceClaims. + ResourceClaims(namespace string) ResourceClaimNamespaceLister + ResourceClaimListerExpansion +} + +// resourceClaimLister implements the ResourceClaimLister interface. +type resourceClaimLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaim] +} + +// NewResourceClaimLister returns a new ResourceClaimLister. +func NewResourceClaimLister(indexer cache.Indexer) ResourceClaimLister { + return &resourceClaimLister{listers.New[*resourcev1beta1.ResourceClaim](indexer, resourcev1beta1.Resource("resourceclaim"))} +} + +// ResourceClaims returns an object that can list and get ResourceClaims. +func (s *resourceClaimLister) ResourceClaims(namespace string) ResourceClaimNamespaceLister { + return resourceClaimNamespaceLister{listers.NewNamespaced[*resourcev1beta1.ResourceClaim](s.ResourceIndexer, namespace)} +} + +// ResourceClaimNamespaceLister helps list and get ResourceClaims. +// All objects returned here must be treated as read-only. +type ResourceClaimNamespaceLister interface { + // List lists all ResourceClaims in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaim, err error) + // Get retrieves the ResourceClaim from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceClaim, error) + ResourceClaimNamespaceListerExpansion +} + +// resourceClaimNamespaceLister implements the ResourceClaimNamespaceLister +// interface. +type resourceClaimNamespaceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaim] +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go new file mode 100644 index 000000000..152646a90 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaimtemplate.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceClaimTemplateLister helps list ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateLister interface { + // List lists all ResourceClaimTemplates in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaimTemplate, err error) + // ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. + ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister + ResourceClaimTemplateListerExpansion +} + +// resourceClaimTemplateLister implements the ResourceClaimTemplateLister interface. +type resourceClaimTemplateLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaimTemplate] +} + +// NewResourceClaimTemplateLister returns a new ResourceClaimTemplateLister. +func NewResourceClaimTemplateLister(indexer cache.Indexer) ResourceClaimTemplateLister { + return &resourceClaimTemplateLister{listers.New[*resourcev1beta1.ResourceClaimTemplate](indexer, resourcev1beta1.Resource("resourceclaimtemplate"))} +} + +// ResourceClaimTemplates returns an object that can list and get ResourceClaimTemplates. +func (s *resourceClaimTemplateLister) ResourceClaimTemplates(namespace string) ResourceClaimTemplateNamespaceLister { + return resourceClaimTemplateNamespaceLister{listers.NewNamespaced[*resourcev1beta1.ResourceClaimTemplate](s.ResourceIndexer, namespace)} +} + +// ResourceClaimTemplateNamespaceLister helps list and get ResourceClaimTemplates. +// All objects returned here must be treated as read-only. +type ResourceClaimTemplateNamespaceLister interface { + // List lists all ResourceClaimTemplates in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceClaimTemplate, err error) + // Get retrieves the ResourceClaimTemplate from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceClaimTemplate, error) + ResourceClaimTemplateNamespaceListerExpansion +} + +// resourceClaimTemplateNamespaceLister implements the ResourceClaimTemplateNamespaceLister +// interface. +type resourceClaimTemplateNamespaceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceClaimTemplate] +} diff --git a/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go new file mode 100644 index 000000000..fbe108476 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + resourcev1beta1 "k8s.io/api/resource/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceSliceLister helps list ResourceSlices. +// All objects returned here must be treated as read-only. +type ResourceSliceLister interface { + // List lists all ResourceSlices in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*resourcev1beta1.ResourceSlice, err error) + // Get retrieves the ResourceSlice from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*resourcev1beta1.ResourceSlice, error) + ResourceSliceListerExpansion +} + +// resourceSliceLister implements the ResourceSliceLister interface. +type resourceSliceLister struct { + listers.ResourceIndexer[*resourcev1beta1.ResourceSlice] +} + +// NewResourceSliceLister returns a new ResourceSliceLister. +func NewResourceSliceLister(indexer cache.Indexer) ResourceSliceLister { + return &resourceSliceLister{listers.New[*resourcev1beta1.ResourceSlice](indexer, resourcev1beta1.Resource("resourceslice"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go index 4da84ccf8..deadbe2f8 100644 --- a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/scheduling/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + schedulingv1 "k8s.io/api/scheduling/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,39 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PriorityClass, error) + Get(name string) (*schedulingv1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*schedulingv1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("priorityclass"), name) - } - return obj.(*v1.PriorityClass), nil + return &priorityClassLister{listers.New[*schedulingv1.PriorityClass](indexer, schedulingv1.Resource("priorityclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go index 3d25dc80a..7beaf4632 100644 --- a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,39 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1alpha1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PriorityClass, error) + Get(name string) (*schedulingv1alpha1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*schedulingv1alpha1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("priorityclass"), name) - } - return obj.(*v1alpha1.PriorityClass), nil + return &priorityClassLister{listers.New[*schedulingv1alpha1.PriorityClass](indexer, schedulingv1alpha1.Resource("priorityclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go index c848d035a..92fd17de6 100644 --- a/constraint/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/scheduling/v1beta1/priorityclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // PriorityClassLister helps list PriorityClasses. @@ -30,39 +30,19 @@ import ( type PriorityClassLister interface { // List lists all PriorityClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error) + List(selector labels.Selector) (ret []*schedulingv1beta1.PriorityClass, err error) // Get retrieves the PriorityClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.PriorityClass, error) + Get(name string) (*schedulingv1beta1.PriorityClass, error) PriorityClassListerExpansion } // priorityClassLister implements the PriorityClassLister interface. type priorityClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*schedulingv1beta1.PriorityClass] } // NewPriorityClassLister returns a new PriorityClassLister. func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { - return &priorityClassLister{indexer: indexer} -} - -// List lists all PriorityClasses in the indexer. -func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1beta1.PriorityClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.PriorityClass)) - }) - return ret, err -} - -// Get retrieves the PriorityClass from the index for a given name. -func (s *priorityClassLister) Get(name string) (*v1beta1.PriorityClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("priorityclass"), name) - } - return obj.(*v1beta1.PriorityClass), nil + return &priorityClassLister{listers.New[*schedulingv1beta1.PriorityClass](indexer, schedulingv1beta1.Resource("priorityclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go index 4e8ab9090..dff96e691 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csidriver.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIDriverLister helps list CSIDrivers. @@ -30,39 +30,19 @@ import ( type CSIDriverLister interface { // List lists all CSIDrivers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIDriver, err error) + List(selector labels.Selector) (ret []*storagev1.CSIDriver, err error) // Get retrieves the CSIDriver from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSIDriver, error) + Get(name string) (*storagev1.CSIDriver, error) CSIDriverListerExpansion } // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csidriver"), name) - } - return obj.(*v1.CSIDriver), nil + return &cSIDriverLister{listers.New[*storagev1.CSIDriver](indexer, storagev1.Resource("csidriver"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csinode.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csinode.go index 93f869572..85a2ad6b5 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csinode.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSINodeLister helps list CSINodes. @@ -30,39 +30,19 @@ import ( type CSINodeLister interface { // List lists all CSINodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSINode, err error) + List(selector labels.Selector) (ret []*storagev1.CSINode, err error) // Get retrieves the CSINode from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSINode, error) + Get(name string) (*storagev1.CSINode, error) CSINodeListerExpansion } // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csinode"), name) - } - return obj.(*v1.CSINode), nil + return &cSINodeLister{listers.New[*storagev1.CSINode](indexer, storagev1.Resource("csinode"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go index a72328c9a..3ed903a17 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*storagev1.CSIStorageCapacity](indexer, storagev1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -64,36 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.CSIStorageCapacity, error) + Get(name string) (*storagev1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("csistoragecapacity"), name) - } - return obj.(*v1.CSIStorageCapacity), nil + listers.ResourceIndexer[*storagev1.CSIStorageCapacity] } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go index ffa3d19f5..8d595a856 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageClassLister helps list StorageClasses. @@ -30,39 +30,19 @@ import ( type StorageClassLister interface { // List lists all StorageClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.StorageClass, err error) + List(selector labels.Selector) (ret []*storagev1.StorageClass, err error) // Get retrieves the StorageClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.StorageClass, error) + Get(name string) (*storagev1.StorageClass, error) StorageClassListerExpansion } // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("storageclass"), name) - } - return obj.(*v1.StorageClass), nil + return &storageClassLister{listers.New[*storagev1.StorageClass](indexer, storagev1.Resource("storageclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go index fbc735c93..26247faad 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1 "k8s.io/api/storage/v1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,39 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.VolumeAttachment, error) + Get(name string) (*storagev1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("volumeattachment"), name) - } - return obj.(*v1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*storagev1.VolumeAttachment](indexer, storagev1.Resource("volumeattachment"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go index 0c1b5f264..62127edf9 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1alpha1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*storagev1alpha1.CSIStorageCapacity](indexer, storagev1alpha1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1alpha1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -64,36 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.CSIStorageCapacity, error) + Get(name string) (*storagev1alpha1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1alpha1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("csistoragecapacity"), name) - } - return obj.(*v1alpha1.CSIStorageCapacity), nil + listers.ResourceIndexer[*storagev1alpha1.CSIStorageCapacity] } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go index 3d5e2b7b7..9604e4c1b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,39 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VolumeAttachment, error) + Get(name string) (*storagev1alpha1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1alpha1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) - } - return obj.(*v1alpha1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*storagev1alpha1.VolumeAttachment](indexer, storagev1alpha1.Resource("volumeattachment"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go index f30b4a89b..08710c981 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttributesClassLister helps list VolumeAttributesClasses. @@ -30,39 +30,19 @@ import ( type VolumeAttributesClassLister interface { // List lists all VolumeAttributesClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) + List(selector labels.Selector) (ret []*storagev1alpha1.VolumeAttributesClass, err error) // Get retrieves the VolumeAttributesClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VolumeAttributesClass, error) + Get(name string) (*storagev1alpha1.VolumeAttributesClass, error) VolumeAttributesClassListerExpansion } // volumeAttributesClassLister implements the VolumeAttributesClassLister interface. type volumeAttributesClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1alpha1.VolumeAttributesClass] } // NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{indexer: indexer} -} - -// List lists all VolumeAttributesClasses in the indexer. -func (s *volumeAttributesClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttributesClass)) - }) - return ret, err -} - -// Get retrieves the VolumeAttributesClass from the index for a given name. -func (s *volumeAttributesClassLister) Get(name string) (*v1alpha1.VolumeAttributesClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattributesclass"), name) - } - return obj.(*v1alpha1.VolumeAttributesClass), nil + return &volumeAttributesClassLister{listers.New[*storagev1alpha1.VolumeAttributesClass](indexer, storagev1alpha1.Resource("volumeattributesclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go index c6787aa01..31a7e5a2b 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csidriver.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIDriverLister helps list CSIDrivers. @@ -30,39 +30,19 @@ import ( type CSIDriverLister interface { // List lists all CSIDrivers in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIDriver, err error) // Get retrieves the CSIDriver from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSIDriver, error) + Get(name string) (*storagev1beta1.CSIDriver, error) CSIDriverListerExpansion } // cSIDriverLister implements the CSIDriverLister interface. type cSIDriverLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1beta1.CSIDriver] } // NewCSIDriverLister returns a new CSIDriverLister. func NewCSIDriverLister(indexer cache.Indexer) CSIDriverLister { - return &cSIDriverLister{indexer: indexer} -} - -// List lists all CSIDrivers in the indexer. -func (s *cSIDriverLister) List(selector labels.Selector) (ret []*v1beta1.CSIDriver, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIDriver)) - }) - return ret, err -} - -// Get retrieves the CSIDriver from the index for a given name. -func (s *cSIDriverLister) Get(name string) (*v1beta1.CSIDriver, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csidriver"), name) - } - return obj.(*v1beta1.CSIDriver), nil + return &cSIDriverLister{listers.New[*storagev1beta1.CSIDriver](indexer, storagev1beta1.Resource("csidriver"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go index 809efaa36..1ab0942c3 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csinode.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSINodeLister helps list CSINodes. @@ -30,39 +30,19 @@ import ( type CSINodeLister interface { // List lists all CSINodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSINode, err error) // Get retrieves the CSINode from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSINode, error) + Get(name string) (*storagev1beta1.CSINode, error) CSINodeListerExpansion } // cSINodeLister implements the CSINodeLister interface. type cSINodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1beta1.CSINode] } // NewCSINodeLister returns a new CSINodeLister. func NewCSINodeLister(indexer cache.Indexer) CSINodeLister { - return &cSINodeLister{indexer: indexer} -} - -// List lists all CSINodes in the indexer. -func (s *cSINodeLister) List(selector labels.Selector) (ret []*v1beta1.CSINode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSINode)) - }) - return ret, err -} - -// Get retrieves the CSINode from the index for a given name. -func (s *cSINodeLister) Get(name string) (*v1beta1.CSINode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csinode"), name) - } - return obj.(*v1beta1.CSINode), nil + return &cSINodeLister{listers.New[*storagev1beta1.CSINode](indexer, storagev1beta1.Resource("csinode"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go index 4680ffb7c..25ff95237 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/csistoragecapacity.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CSIStorageCapacityLister helps list CSIStorageCapacities. @@ -30,7 +30,7 @@ import ( type CSIStorageCapacityLister interface { // List lists all CSIStorageCapacities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIStorageCapacity, err error) // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister CSIStorageCapacityListerExpansion @@ -38,25 +38,17 @@ type CSIStorageCapacityLister interface { // cSIStorageCapacityLister implements the CSIStorageCapacityLister interface. type cSIStorageCapacityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1beta1.CSIStorageCapacity] } // NewCSIStorageCapacityLister returns a new CSIStorageCapacityLister. func NewCSIStorageCapacityLister(indexer cache.Indexer) CSIStorageCapacityLister { - return &cSIStorageCapacityLister{indexer: indexer} -} - -// List lists all CSIStorageCapacities in the indexer. -func (s *cSIStorageCapacityLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err + return &cSIStorageCapacityLister{listers.New[*storagev1beta1.CSIStorageCapacity](indexer, storagev1beta1.Resource("csistoragecapacity"))} } // CSIStorageCapacities returns an object that can list and get CSIStorageCapacities. func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSIStorageCapacityNamespaceLister { - return cSIStorageCapacityNamespaceLister{indexer: s.indexer, namespace: namespace} + return cSIStorageCapacityNamespaceLister{listers.NewNamespaced[*storagev1beta1.CSIStorageCapacity](s.ResourceIndexer, namespace)} } // CSIStorageCapacityNamespaceLister helps list and get CSIStorageCapacities. @@ -64,36 +56,15 @@ func (s *cSIStorageCapacityLister) CSIStorageCapacities(namespace string) CSISto type CSIStorageCapacityNamespaceLister interface { // List lists all CSIStorageCapacities in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) + List(selector labels.Selector) (ret []*storagev1beta1.CSIStorageCapacity, err error) // Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.CSIStorageCapacity, error) + Get(name string) (*storagev1beta1.CSIStorageCapacity, error) CSIStorageCapacityNamespaceListerExpansion } // cSIStorageCapacityNamespaceLister implements the CSIStorageCapacityNamespaceLister // interface. type cSIStorageCapacityNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CSIStorageCapacities in the indexer for a given namespace. -func (s cSIStorageCapacityNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CSIStorageCapacity, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.CSIStorageCapacity)) - }) - return ret, err -} - -// Get retrieves the CSIStorageCapacity from the indexer for a given namespace and name. -func (s cSIStorageCapacityNamespaceLister) Get(name string) (*v1beta1.CSIStorageCapacity, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("csistoragecapacity"), name) - } - return obj.(*v1beta1.CSIStorageCapacity), nil + listers.ResourceIndexer[*storagev1beta1.CSIStorageCapacity] } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go index c2b0d5b17..4f56776be 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -41,3 +41,7 @@ type StorageClassListerExpansion interface{} // VolumeAttachmentListerExpansion allows custom methods to be added to // VolumeAttachmentLister. type VolumeAttachmentListerExpansion interface{} + +// VolumeAttributesClassListerExpansion allows custom methods to be added to +// VolumeAttributesClassLister. +type VolumeAttributesClassListerExpansion interface{} diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go index eb7b8315c..3928cbacd 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageClassLister helps list StorageClasses. @@ -30,39 +30,19 @@ import ( type StorageClassLister interface { // List lists all StorageClasses in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) + List(selector labels.Selector) (ret []*storagev1beta1.StorageClass, err error) // Get retrieves the StorageClass from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.StorageClass, error) + Get(name string) (*storagev1beta1.StorageClass, error) StorageClassListerExpansion } // storageClassLister implements the StorageClassLister interface. type storageClassLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1beta1.StorageClass] } // NewStorageClassLister returns a new StorageClassLister. func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { - return &storageClassLister{indexer: indexer} -} - -// List lists all StorageClasses in the indexer. -func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.StorageClass)) - }) - return ret, err -} - -// Get retrieves the StorageClass from the index for a given name. -func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("storageclass"), name) - } - return obj.(*v1beta1.StorageClass), nil + return &storageClassLister{listers.New[*storagev1beta1.StorageClass](indexer, storagev1beta1.Resource("storageclass"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go index bab2d317c..aab4bef13 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattachment.go @@ -19,10 +19,10 @@ limitations under the License. package v1beta1 import ( - v1beta1 "k8s.io/api/storage/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // VolumeAttachmentLister helps list VolumeAttachments. @@ -30,39 +30,19 @@ import ( type VolumeAttachmentLister interface { // List lists all VolumeAttachments in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error) + List(selector labels.Selector) (ret []*storagev1beta1.VolumeAttachment, err error) // Get retrieves the VolumeAttachment from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1beta1.VolumeAttachment, error) + Get(name string) (*storagev1beta1.VolumeAttachment, error) VolumeAttachmentListerExpansion } // volumeAttachmentLister implements the VolumeAttachmentLister interface. type volumeAttachmentLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagev1beta1.VolumeAttachment] } // NewVolumeAttachmentLister returns a new VolumeAttachmentLister. func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { - return &volumeAttachmentLister{indexer: indexer} -} - -// List lists all VolumeAttachments in the indexer. -func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1beta1.VolumeAttachment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1beta1.VolumeAttachment)) - }) - return ret, err -} - -// Get retrieves the VolumeAttachment from the index for a given name. -func (s *volumeAttachmentLister) Get(name string) (*v1beta1.VolumeAttachment, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("volumeattachment"), name) - } - return obj.(*v1beta1.VolumeAttachment), nil + return &volumeAttachmentLister{listers.New[*storagev1beta1.VolumeAttachment](indexer, storagev1beta1.Resource("volumeattachment"))} } diff --git a/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go new file mode 100644 index 000000000..8b9724ed9 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + storagev1beta1 "k8s.io/api/storage/v1beta1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeAttributesClassLister helps list VolumeAttributesClasses. +// All objects returned here must be treated as read-only. +type VolumeAttributesClassLister interface { + // List lists all VolumeAttributesClasses in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*storagev1beta1.VolumeAttributesClass, err error) + // Get retrieves the VolumeAttributesClass from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*storagev1beta1.VolumeAttributesClass, error) + VolumeAttributesClassListerExpansion +} + +// volumeAttributesClassLister implements the VolumeAttributesClassLister interface. +type volumeAttributesClassLister struct { + listers.ResourceIndexer[*storagev1beta1.VolumeAttributesClass] +} + +// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. +func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { + return &volumeAttributesClassLister{listers.New[*storagev1beta1.VolumeAttributesClass](indexer, storagev1beta1.Resource("volumeattributesclass"))} +} diff --git a/constraint/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/constraint/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go index b65bf2532..e7d164d04 100644 --- a/constraint/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go +++ b/constraint/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/storagemigration/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // StorageVersionMigrationLister helps list StorageVersionMigrations. @@ -30,39 +30,19 @@ import ( type StorageVersionMigrationLister interface { // List lists all StorageVersionMigrations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error) + List(selector labels.Selector) (ret []*storagemigrationv1alpha1.StorageVersionMigration, err error) // Get retrieves the StorageVersionMigration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.StorageVersionMigration, error) + Get(name string) (*storagemigrationv1alpha1.StorageVersionMigration, error) StorageVersionMigrationListerExpansion } // storageVersionMigrationLister implements the StorageVersionMigrationLister interface. type storageVersionMigrationLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*storagemigrationv1alpha1.StorageVersionMigration] } // NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister. func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister { - return &storageVersionMigrationLister{indexer: indexer} -} - -// List lists all StorageVersionMigrations in the indexer. -func (s *storageVersionMigrationLister) List(selector labels.Selector) (ret []*v1alpha1.StorageVersionMigration, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.StorageVersionMigration)) - }) - return ret, err -} - -// Get retrieves the StorageVersionMigration from the index for a given name. -func (s *storageVersionMigrationLister) Get(name string) (*v1alpha1.StorageVersionMigration, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("storageversionmigration"), name) - } - return obj.(*v1alpha1.StorageVersionMigration), nil + return &storageVersionMigrationLister{listers.New[*storagemigrationv1alpha1.StorageVersionMigration](indexer, storagemigrationv1alpha1.Resource("storageversionmigration"))} } diff --git a/constraint/vendor/k8s.io/client-go/metadata/metadata.go b/constraint/vendor/k8s.io/client-go/metadata/metadata.go index 2cc7e22ad..a19ba1304 100644 --- a/constraint/vendor/k8s.io/client-go/metadata/metadata.go +++ b/constraint/vendor/k8s.io/client-go/metadata/metadata.go @@ -33,6 +33,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/rest" + "k8s.io/client-go/util/consistencydetector" + "k8s.io/client-go/util/watchlist" ) var deleteScheme = runtime.NewScheme() @@ -218,6 +220,24 @@ func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, s // List returns all resources within the specified scope (namespace or cluster). func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := watchlist.PrepareWatchListOptionsFromListOptions(opts); watchListOptionsErr != nil { + klog.FromContext(ctx).Error(watchListOptionsErr, "Failed preparing watchlist options, falling back to the standard LIST semantics", "resource", c.resource) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + consistencydetector.CheckWatchListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("watchlist request for %v", c.resource), c.list, opts, result) + return result, nil + } + klog.FromContext(ctx).Error(err, "The watchlist request ended with an error, falling back to the standard LIST semantics", "resource", c.resource) + } + result, err := c.list(ctx, opts) + if err == nil { + consistencydetector.CheckListFromCacheDataConsistencyIfRequested(ctx, fmt.Sprintf("list request for %v", c.resource), c.list, opts, result) + } + return result, err +} + +func (c *client) list(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). @@ -249,6 +269,25 @@ func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.Par return partial, nil } +// watchList establishes a watch stream with the server and returns PartialObjectMetadataList. +func (c *client) watchList(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + + result := &metav1.PartialObjectMetadataList{} + err := c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + WatchList(ctx). + Into(result) + + return result, err +} + // Watch finds all changes to the resources in the specified scope (namespace or cluster). func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration diff --git a/constraint/vendor/k8s.io/client-go/openapi/groupversion.go b/constraint/vendor/k8s.io/client-go/openapi/groupversion.go index 601dcbe3c..40d91b9a5 100644 --- a/constraint/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/constraint/vendor/k8s.io/client-go/openapi/groupversion.go @@ -27,6 +27,12 @@ const ContentTypeOpenAPIV3PB = "application/com.github.proto-openapi.spec.v3@v1. type GroupVersion interface { Schema(contentType string) ([]byte, error) + + // ServerRelativeURL. Returns the path and parameters used to fetch the schema. + // You should use the Schema method to fetch it, but this value can be used + // to key the current version of the schema in a cache since it contains a + // hash string which changes upon schema update. + ServerRelativeURL() string } type groupversion struct { @@ -68,3 +74,9 @@ func (g *groupversion) Schema(contentType string) ([]byte, error) { return path.Do(context.TODO()).Raw() } + +// URL used for fetching the schema. The URL includes a hash and can be used +// to key the current version of the schema in a cache. +func (g *groupversion) ServerRelativeURL() string { + return g.item.ServerRelativeURL +} diff --git a/constraint/vendor/k8s.io/client-go/rest/client.go b/constraint/vendor/k8s.io/client-go/rest/client.go index 60df7e568..159caa13f 100644 --- a/constraint/vendor/k8s.io/client-go/rest/client.go +++ b/constraint/vendor/k8s.io/client-go/rest/client.go @@ -17,16 +17,21 @@ limitations under the License. package rest import ( + "fmt" + "mime" "net/http" "net/url" "os" "strconv" "strings" + "sync/atomic" "time" + "github.com/munnerz/goautoneg" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/util/flowcontrol" ) @@ -85,7 +90,7 @@ type RESTClient struct { versionedAPIPath string // content describes how a RESTClient encodes and decodes responses. - content ClientContentConfig + content requestClientContentConfigProvider // creates BackoffManager that is passed to requests. createBackoffMgr func() BackoffManager @@ -105,10 +110,6 @@ type RESTClient struct { // NewRESTClient creates a new RESTClient. This client performs generic REST functions // such as Get, Put, Post, and Delete on specified paths. func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - base := *baseURL if !strings.HasSuffix(base.Path, "/") { base.Path += "/" @@ -119,14 +120,53 @@ func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientConte return &RESTClient{ base: &base, versionedAPIPath: versionedAPIPath, - content: config, + content: requestClientContentConfigProvider{base: scrubCBORContentConfigIfDisabled(config)}, createBackoffMgr: readExpBackoffConfig, rateLimiter: rateLimiter, - - Client: client, + Client: client, }, nil } +func scrubCBORContentConfigIfDisabled(content ClientContentConfig) ClientContentConfig { + if clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + content.Negotiator = clientNegotiatorWithCBORSequenceStreamDecoder{content.Negotiator} + return content + } + + if mediatype, _, err := mime.ParseMediaType(content.ContentType); err == nil && mediatype == "application/cbor" { + content.ContentType = "application/json" + } + + clauses := goautoneg.ParseAccept(content.AcceptContentTypes) + scrubbed := false + for i, clause := range clauses { + if clause.Type == "application" && clause.SubType == "cbor" { + scrubbed = true + clauses[i].SubType = "json" + } + } + if !scrubbed { + // No application/cbor in AcceptContentTypes, nothing more to do. + return content + } + + parts := make([]string, 0, len(clauses)) + for _, clause := range clauses { + // ParseAccept does not store the parameter "q" in Params. + params := clause.Params + if clause.Q < 1 { // omit q=1, it's the default + if params == nil { + params = make(map[string]string, 1) + } + params["q"] = strconv.FormatFloat(clause.Q, 'g', 3, 32) + } + parts = append(parts, mime.FormatMediaType(fmt.Sprintf("%s/%s", clause.Type, clause.SubType), params)) + } + content.AcceptContentTypes = strings.Join(parts, ",") + + return content +} + // GetRateLimiter returns rate limiter for a given client, or nil if it's called on a nil client func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { if c == nil { @@ -198,5 +238,106 @@ func (c *RESTClient) Delete() *Request { // APIVersion returns the APIVersion this RESTClient is expected to use. func (c *RESTClient) APIVersion() schema.GroupVersion { - return c.content.GroupVersion + config, _ := c.content.GetClientContentConfig() + return config.GroupVersion +} + +// requestClientContentConfigProvider observes HTTP 415 (Unsupported Media Type) responses to detect +// that the server does not understand CBOR. Once this has happened, future requests are forced to +// use JSON so they can succeed. This is convenient for client users that want to prefer CBOR, but +// also need to interoperate with older servers so requests do not permanently fail. The clients +// will not default to using CBOR until at least all supported kube-apiservers have enable-CBOR +// locked to true, so this path will be rarely taken. Additionally, all generated clients accessing +// built-in kube resources are forced to protobuf, so those will not degrade to JSON. +type requestClientContentConfigProvider struct { + base ClientContentConfig + + // Becomes permanently true if a server responds with HTTP 415 (Unsupported Media Type) to a + // request with "Content-Type" header containing the CBOR media type. + sawUnsupportedMediaTypeForCBOR atomic.Bool +} + +// GetClientContentConfig returns the ClientContentConfig that should be used for new requests by +// this client and true if the request ContentType was selected by default. +func (p *requestClientContentConfigProvider) GetClientContentConfig() (ClientContentConfig, bool) { + config := p.base + + defaulted := config.ContentType == "" + if defaulted { + config.ContentType = "application/json" + } + + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return config, defaulted + } + + if defaulted && clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsPreferCBOR) { + config.ContentType = "application/cbor" + } + + if sawUnsupportedMediaTypeForCBOR := p.sawUnsupportedMediaTypeForCBOR.Load(); !sawUnsupportedMediaTypeForCBOR { + return config, defaulted + } + + if mediaType, _, _ := mime.ParseMediaType(config.ContentType); mediaType != runtime.ContentTypeCBOR { + return config, defaulted + } + + // The effective ContentType is CBOR and the client has previously received an HTTP 415 in + // response to a CBOR request. Override ContentType to JSON. + config.ContentType = runtime.ContentTypeJSON + return config, defaulted +} + +// UnsupportedMediaType reports that the server has responded to a request with HTTP 415 Unsupported +// Media Type. +func (p *requestClientContentConfigProvider) UnsupportedMediaType(requestContentType string) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return + } + + // This could be extended to consider the Content-Encoding request header, the Accept and + // Accept-Encoding response headers, the request method, and URI (as mentioned in + // https://www.rfc-editor.org/rfc/rfc9110.html#section-15.5.16). The request Content-Type + // header is sufficient to implement a blanket CBOR fallback mechanism. + requestContentType, _, _ = mime.ParseMediaType(requestContentType) + switch requestContentType { + case runtime.ContentTypeCBOR, string(types.ApplyCBORPatchType): + p.sawUnsupportedMediaTypeForCBOR.Store(true) + } +} + +// clientNegotiatorWithCBORSequenceStreamDecoder is a ClientNegotiator that delegates to another +// ClientNegotiator to select the appropriate Encoder or Decoder for a given media type. As a +// special case, it will resolve "application/cbor-seq" (a CBOR Sequence, the concatenation of zero +// or more CBOR data items) as an alias for "application/cbor" (exactly one CBOR data item) when +// selecting a stream decoder. +type clientNegotiatorWithCBORSequenceStreamDecoder struct { + negotiator runtime.ClientNegotiator +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) Encoder(contentType string, params map[string]string) (runtime.Encoder, error) { + return n.negotiator.Encoder(contentType, params) +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) Decoder(contentType string, params map[string]string) (runtime.Decoder, error) { + return n.negotiator.Decoder(contentType, params) +} + +func (n clientNegotiatorWithCBORSequenceStreamDecoder) StreamDecoder(contentType string, params map[string]string) (runtime.Decoder, runtime.Serializer, runtime.Framer, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) { + return n.negotiator.StreamDecoder(contentType, params) + } + + switch contentType { + case runtime.ContentTypeCBORSequence: + return n.negotiator.StreamDecoder(runtime.ContentTypeCBOR, params) + case runtime.ContentTypeCBOR: + // This media type is only appropriate for exactly one data item, not the zero or + // more events of a watch stream. + return nil, nil, nil, runtime.NegotiateError{ContentType: contentType, Stream: true} + default: + return n.negotiator.StreamDecoder(contentType, params) + } + } diff --git a/constraint/vendor/k8s.io/client-go/rest/config.go b/constraint/vendor/k8s.io/client-go/rest/config.go index f8ff7e928..f2e813d07 100644 --- a/constraint/vendor/k8s.io/client-go/rest/config.go +++ b/constraint/vendor/k8s.io/client-go/rest/config.go @@ -32,6 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/cbor" + "k8s.io/client-go/features" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" @@ -113,6 +116,9 @@ type Config struct { // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 + // + // Setting this to a negative value will disable client-side ratelimiting + // unless `Ratelimiter` is also set. QPS float32 // Maximum burst for throttle. @@ -669,3 +675,19 @@ func CopyConfig(config *Config) *Config { } return c } + +// CodecFactoryForGeneratedClient returns the provided CodecFactory if there are no enabled client +// feature gates affecting serialization. Otherwise, it constructs and returns a new CodecFactory +// from the provided Scheme. +// +// This is supported ONLY for use by clients generated with client-gen. The caller is responsible +// for ensuring that the CodecFactory argument was constructed using the Scheme argument. +func CodecFactoryForGeneratedClient(scheme *runtime.Scheme, codecs serializer.CodecFactory) serializer.CodecFactory { + if !features.FeatureGates().Enabled(features.ClientsAllowCBOR) { + // NOTE: This assumes client-gen will not generate CBOR-enabled Codecs as long as + // the feature gate exists. + return codecs + } + + return serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo)) +} diff --git a/constraint/vendor/k8s.io/client-go/rest/fake/fake.go b/constraint/vendor/k8s.io/client-go/rest/fake/fake.go deleted file mode 100644 index 293e09694..000000000 --- a/constraint/vendor/k8s.io/client-go/rest/fake/fake.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This is made a separate package and should only be imported by tests, because -// it imports testapi -package fake - -import ( - "net/http" - "net/url" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/util/flowcontrol" -) - -// CreateHTTPClient creates an http.Client that will invoke the provided roundTripper func -// when a request is made. -func CreateHTTPClient(roundTripper func(*http.Request) (*http.Response, error)) *http.Client { - return &http.Client{ - Transport: roundTripperFunc(roundTripper), - } -} - -type roundTripperFunc func(*http.Request) (*http.Response, error) - -func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return f(req) -} - -// RESTClient provides a fake RESTClient interface. It is used to mock network -// interactions via a rest.Request, or to make them via the provided Client to -// a specific server. -type RESTClient struct { - NegotiatedSerializer runtime.NegotiatedSerializer - GroupVersion schema.GroupVersion - VersionedAPIPath string - - // Err is returned when any request would be made to the server. If Err is set, - // Req will not be recorded, Resp will not be returned, and Client will not be - // invoked. - Err error - // Req is set to the last request that was executed (had the methods Do/DoRaw) invoked. - Req *http.Request - // If Client is specified, the client will be invoked instead of returning Resp if - // Err is not set. - Client *http.Client - // Resp is returned to the caller after Req is recorded, unless Err or Client are set. - Resp *http.Response -} - -func (c *RESTClient) Get() *restclient.Request { - return c.Verb("GET") -} - -func (c *RESTClient) Put() *restclient.Request { - return c.Verb("PUT") -} - -func (c *RESTClient) Patch(pt types.PatchType) *restclient.Request { - return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) -} - -func (c *RESTClient) Post() *restclient.Request { - return c.Verb("POST") -} - -func (c *RESTClient) Delete() *restclient.Request { - return c.Verb("DELETE") -} - -func (c *RESTClient) Verb(verb string) *restclient.Request { - return c.Request().Verb(verb) -} - -func (c *RESTClient) APIVersion() schema.GroupVersion { - return c.GroupVersion -} - -func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { - return nil -} - -func (c *RESTClient) Request() *restclient.Request { - config := restclient.ClientContentConfig{ - ContentType: runtime.ContentTypeJSON, - GroupVersion: c.GroupVersion, - Negotiator: runtime.NewClientNegotiator(c.NegotiatedSerializer, c.GroupVersion), - } - return restclient.NewRequestWithClient(&url.URL{Scheme: "https", Host: "localhost"}, c.VersionedAPIPath, config, CreateHTTPClient(c.do)) -} - -// do is invoked when a Request() created by this client is executed. -func (c *RESTClient) do(req *http.Request) (*http.Response, error) { - if c.Err != nil { - return nil, c.Err - } - c.Req = req - if c.Client != nil { - return c.Client.Do(req) - } - return c.Resp, nil -} diff --git a/constraint/vendor/k8s.io/client-go/rest/request.go b/constraint/vendor/k8s.io/client-go/rest/request.go index 850e57dae..0ec90ad18 100644 --- a/constraint/vendor/k8s.io/client-go/rest/request.go +++ b/constraint/vendor/k8s.io/client-go/rest/request.go @@ -19,6 +19,7 @@ package rest import ( "bytes" "context" + "encoding/base64" "encoding/hex" "fmt" "io" @@ -37,12 +38,15 @@ import ( "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" @@ -96,6 +100,9 @@ func defaultRequestRetryFn(maxRetries int) WithRetry { type Request struct { c *RESTClient + contentConfig ClientContentConfig + contentTypeNotSet bool + warningHandler WarningHandler rateLimiter flowcontrol.RateLimiter @@ -120,7 +127,7 @@ type Request struct { // output err error - // only one of body / bodyBytes may be set. requests using body are not retriable. + // only one of body / bodyBytes may be set. requests using body are not retryable. body io.Reader bodyBytes []byte @@ -149,6 +156,11 @@ func NewRequest(c *RESTClient) *Request { timeout = c.Client.Timeout } + // A request needs to know whether the content type was explicitly configured or selected by + // default in order to support the per-request Protobuf override used by clients generated + // with --prefers-protobuf. + contentConfig, contentTypeDefaulted := c.content.GetClientContentConfig() + r := &Request{ c: c, rateLimiter: c.rateLimiter, @@ -158,14 +170,12 @@ func NewRequest(c *RESTClient) *Request { maxRetries: 10, retryFn: defaultRequestRetryFn, warningHandler: c.warningHandler, - } - switch { - case len(c.content.AcceptContentTypes) > 0: - r.SetHeader("Accept", c.content.AcceptContentTypes) - case len(c.content.ContentType) > 0: - r.SetHeader("Accept", c.content.ContentType+", */*") + contentConfig: contentConfig, + contentTypeNotSet: contentTypeDefaulted, } + + r.setAcceptHeader() return r } @@ -174,11 +184,36 @@ func NewRequestWithClient(base *url.URL, versionedAPIPath string, content Client return NewRequest(&RESTClient{ base: base, versionedAPIPath: versionedAPIPath, - content: content, + content: requestClientContentConfigProvider{base: content}, Client: client, }) } +func (r *Request) UseProtobufAsDefaultIfPreferred(prefersProtobuf bool) *Request { + if prefersProtobuf { + return r.UseProtobufAsDefault() + } + return r +} + +func (r *Request) UseProtobufAsDefault() *Request { + if r.contentTypeNotSet && len(r.contentConfig.AcceptContentTypes) == 0 { + r.contentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + r.contentConfig.ContentType = "application/vnd.kubernetes.protobuf" + r.setAcceptHeader() + } + return r +} + +func (r *Request) setAcceptHeader() { + switch { + case len(r.contentConfig.AcceptContentTypes) > 0: + r.SetHeader("Accept", r.contentConfig.AcceptContentTypes) + case len(r.contentConfig.ContentType) > 0: + r.SetHeader("Accept", r.contentConfig.ContentType+", */*") + } +} + // Verb sets the verb this request will use. func (r *Request) Verb(verb string) *Request { r.verb = verb @@ -367,7 +402,7 @@ func (r *Request) Param(paramName, s string) *Request { // VersionedParams will not write query parameters that have omitempty set and are empty. If a // parameter has already been set it is appended to (Params and VersionedParams are additive). func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { - return r.SpecificallyVersionedParams(obj, codec, r.c.content.GroupVersion) + return r.SpecificallyVersionedParams(obj, codec, r.contentConfig.GroupVersion) } func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { @@ -447,11 +482,9 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glogBody("Request Body", data) r.body = nil r.bodyBytes = data case []byte: - glogBody("Request Body", t) r.body = nil r.bodyBytes = t case io.Reader: @@ -462,7 +495,7 @@ func (r *Request) Body(obj interface{}) *Request { if reflect.ValueOf(t).IsNil() { return r } - encoder, err := r.c.content.Negotiator.Encoder(r.c.content.ContentType, nil) + encoder, err := r.contentConfig.Negotiator.Encoder(r.contentConfig.ContentType, nil) if err != nil { r.err = err return r @@ -472,10 +505,9 @@ func (r *Request) Body(obj interface{}) *Request { r.err = err return r } - glogBody("Request Body", data) r.body = nil r.bodyBytes = data - r.SetHeader("Content-Type", r.c.content.ContentType) + r.SetHeader("Content-Type", r.contentConfig.ContentType) default: r.err = fmt.Errorf("unknown type used for body: %+v", obj) } @@ -701,10 +733,19 @@ func (b *throttledLogger) Infof(message string, args ...interface{}) { // Watch attempts to begin watching the requested location. // Returns a watch.Interface, or an error. func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { + w, _, e := r.watchInternal(ctx) + return w, e +} + +func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + // We specifically don't want to rate limit watches, so we // don't use r.rateLimiter here. if r.err != nil { - return nil, r.err + return nil, nil, r.err } client := r.c.Client @@ -724,12 +765,12 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { url := r.URL().String() for { if err := retry.Before(ctx, r); err != nil { - return nil, retry.WrapPreviousError(err) + return nil, nil, retry.WrapPreviousError(err) } req, err := r.newHTTPRequest(ctx) if err != nil { - return nil, err + return nil, nil, err } resp, err := client.Do(req) @@ -749,34 +790,194 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) { // the server must have sent us an error in 'err' return true, nil } - if result := r.transformResponse(resp, req); result.err != nil { - return true, result.err + result := r.transformResponse(ctx, resp, req) + if err := result.Error(); err != nil { + return true, err } return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode) }() if done { if isErrRetryableFunc(req, err) { - return watch.NewEmptyWatch(), nil + return watch.NewEmptyWatch(), nil, nil } if err == nil { // if the server sent us an HTTP Response object, // we need to return the error object from that. err = transformErr } - return nil, retry.WrapPreviousError(err) + return nil, nil, retry.WrapPreviousError(err) + } + } +} + +type WatchListResult struct { + // err holds any errors we might have received + // during streaming. + err error + + // items hold the collected data + items []runtime.Object + + // initialEventsEndBookmarkRV holds the resource version + // extracted from the bookmark event that marks + // the end of the stream. + initialEventsEndBookmarkRV string + + // negotiatedObjectDecoder knows how to decode + // the initialEventsListBlueprint + negotiatedObjectDecoder runtime.Decoder + + // base64EncodedInitialEventsListBlueprint contains an empty, + // versioned list encoded in the requested format + // (e.g., protobuf, JSON, CBOR) and stored as a base64-encoded string + base64EncodedInitialEventsListBlueprint string +} + +// Into stores the result into obj. The passed obj parameter must be a pointer to a list type. +// +// Note: +// +// Special attention should be given to the type *unstructured.Unstructured, +// which represents a list type but does not have an "Items" field. +// Users who directly use RESTClient may store the response in such an object. +// This particular case is not handled by the current implementation of this function, +// but may be considered for future updates. +func (r WatchListResult) Into(obj runtime.Object) error { + if r.err != nil { + return r.err + } + + listItemsPtr, err := meta.GetItemsPtr(obj) + if err != nil { + return err + } + listVal, err := conversion.EnforcePtr(listItemsPtr) + if err != nil { + return err + } + if listVal.Kind() != reflect.Slice { + return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) + } + + encodedInitialEventsListBlueprint, err := base64.StdEncoding.DecodeString(r.base64EncodedInitialEventsListBlueprint) + if err != nil { + return fmt.Errorf("failed to decode the received blueprint list, err %w", err) + } + + err = runtime.DecodeInto(r.negotiatedObjectDecoder, encodedInitialEventsListBlueprint, obj) + if err != nil { + return err + } + + if len(r.items) == 0 { + listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0)) + } else { + listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items))) + for i, o := range r.items { + if listVal.Type().Elem() != reflect.TypeOf(o).Elem() { + return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem()) + } + listVal.Index(i).Set(reflect.ValueOf(o).Elem()) + } + } + + listMeta, err := meta.ListAccessor(obj) + if err != nil { + return err + } + listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV) + return nil +} + +// WatchList establishes a stream to get a consistent snapshot of data +// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal +// +// Note that the watchlist requires properly setting the ListOptions +// otherwise it just establishes a regular watch with the server. +// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists +// to see what parameters are currently required. +func (r *Request) WatchList(ctx context.Context) WatchListResult { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)} + } + // TODO(#115478): consider validating request parameters (i.e sendInitialEvents). + // Most users use the generated client, which handles the proper setting of parameters. + // We don't have validation for other methods (e.g., the Watch) + // thus, for symmetry, we haven't added additional checks for the WatchList method. + w, d, err := r.watchInternal(ctx) + if err != nil { + return WatchListResult{err: err} + } + return r.handleWatchList(ctx, w, d) +} + +// handleWatchList holds the actual logic for easier unit testing. +// Note that this function will close the passed watch. +func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negotiatedObjectDecoder runtime.Decoder) WatchListResult { + defer w.Stop() + var lastKey string + var items []runtime.Object + + for { + select { + case <-ctx.Done(): + return WatchListResult{err: ctx.Err()} + case event, ok := <-w.ResultChan(): + if !ok { + return WatchListResult{err: fmt.Errorf("unexpected watch close")} + } + if event.Type == watch.Error { + return WatchListResult{err: errors.FromObject(event.Object)} + } + meta, err := meta.Accessor(event.Object) + if err != nil { + return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)} + } + + switch event.Type { + case watch.Added: + // the following check ensures that the response is ordered. + // earlier servers had a bug that caused them to not sort the output. + // in such cases, return an error which can trigger fallback logic. + key := objectKeyFromMeta(meta) + if len(lastKey) > 0 && lastKey > key { + return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)} + } + items = append(items, event.Object) + lastKey = key + case watch.Bookmark: + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + base64EncodedInitialEventsListBlueprint := meta.GetAnnotations()[metav1.InitialEventsListBlueprintAnnotationKey] + if len(base64EncodedInitialEventsListBlueprint) == 0 { + return WatchListResult{err: fmt.Errorf("%q annotation is missing content", metav1.InitialEventsListBlueprintAnnotationKey)} + } + return WatchListResult{ + items: items, + initialEventsEndBookmarkRV: meta.GetResourceVersion(), + negotiatedObjectDecoder: negotiatedObjectDecoder, + base64EncodedInitialEventsListBlueprint: base64EncodedInitialEventsListBlueprint, + } + } + default: + return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)} + } } } } -func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) { +func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) { contentType := resp.Header.Get("Content-Type") mediaType, params, err := mime.ParseMediaType(contentType) if err != nil { klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err) } - objectDecoder, streamingSerializer, framer, err := r.c.content.Negotiator.StreamDecoder(mediaType, params) + objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params) if err != nil { - return nil, err + return nil, nil, err } handleWarnings(resp.Header, r.warningHandler) @@ -789,7 +990,7 @@ func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) // use 500 to indicate that the cause of the error is unknown - other error codes // are more specific to HTTP interactions, and set a reason errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"), - ), nil + ), objectDecoder, nil } // updateRequestResultMetric increments the RequestResult metric counter, @@ -829,6 +1030,10 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) { // Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. // If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + if r.err != nil { return nil, r.err } @@ -872,7 +1077,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) { if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) { return false, nil } - result := r.transformResponse(resp, req) + result := r.transformResponse(ctx, resp, req) if err := result.Error(); err != nil { return true, err } @@ -1004,7 +1209,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp return false } // For connection errors and apiserver shutdown errors retry. - if net.IsConnectionReset(err) || net.IsProbableEOF(err) { + if net.IsConnectionReset(err) || net.IsProbableEOF(err) || net.IsHTTP2ConnectionLost(err) { return true } return false @@ -1026,6 +1231,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) { metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength)) } + if resp != nil && resp.StatusCode == http.StatusUnsupportedMediaType { + r.c.content.UnsupportedMediaType(resp.Request.Header.Get("Content-Type")) + } retry.After(ctx, r, resp, err) done := func() bool { @@ -1059,9 +1267,13 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp // - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError // - http.Client.Do errors are returned directly. func (r *Request) Do(ctx context.Context) Result { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { - result = r.transformResponse(resp, req) + result = r.transformResponse(ctx, resp, req) }) if err != nil { return Result{err: err} @@ -1074,10 +1286,14 @@ func (r *Request) Do(ctx context.Context) Result { // DoRaw executes the request but does not process the response body. func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { + if r.body == nil { + logBody(ctx, 2, "Request Body", r.bodyBytes) + } + var result Result err := r.request(ctx, func(req *http.Request, resp *http.Response) { result.body, result.err = io.ReadAll(resp.Body) - glogBody("Response Body", result.body) + logBody(ctx, 2, "Response Body", result.body) if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { result.err = r.transformUnstructuredResponseError(resp, req, result.body) } @@ -1092,7 +1308,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) { } // transformResponse converts an API response into a structured API object -func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { +func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result { var body []byte if resp.Body != nil { data, err := io.ReadAll(resp.Body) @@ -1121,13 +1337,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } } - glogBody("Response Body", body) + // Call depth is tricky. This one is okay for Do and DoRaw. + logBody(ctx, 7, "Response Body", body) // verify the content type is accurate var decoder runtime.Decoder contentType := resp.Header.Get("Content-Type") if len(contentType) == 0 { - contentType = r.c.content.ContentType + contentType = r.contentConfig.ContentType } if len(contentType) > 0 { var err error @@ -1135,7 +1352,7 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu if err != nil { return Result{err: errors.NewInternalError(err)} } - decoder, err = r.c.content.Negotiator.Decoder(mediaType, params) + decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params) if err != nil { // if we fail to negotiate a decoder, treat this as an unstructured error switch { @@ -1181,14 +1398,14 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu } // truncateBody decides if the body should be truncated, based on the glog Verbosity. -func truncateBody(body string) string { +func truncateBody(logger klog.Logger, body string) string { max := 0 switch { - case bool(klog.V(10).Enabled()): + case bool(logger.V(10).Enabled()): return body - case bool(klog.V(9).Enabled()): + case bool(logger.V(9).Enabled()): max = 10240 - case bool(klog.V(8).Enabled()): + case bool(logger.V(8).Enabled()): max = 1024 } @@ -1199,17 +1416,21 @@ func truncateBody(body string) string { return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max) } -// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// logBody logs a body output that could be either JSON or protobuf. It explicitly guards against // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. -func glogBody(prefix string, body []byte) { - if klogV := klog.V(8); klogV.Enabled() { +// +// It needs to be called by all functions which send or receive the data. +func logBody(ctx context.Context, callDepth int, prefix string, body []byte) { + logger := klog.FromContext(ctx) + if loggerV := logger.V(8); loggerV.Enabled() { + loggerV := loggerV.WithCallDepth(callDepth) if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + loggerV.Info(prefix, "body", truncateBody(logger, hex.Dump(body))) } else { - klogV.Infof("%s: %s", prefix, truncateBody(string(body))) + loggerV.Info(prefix, "body", truncateBody(logger, string(body))) } } } @@ -1258,7 +1479,7 @@ func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, } var groupResource schema.GroupResource if len(r.resource) > 0 { - groupResource.Group = r.c.content.GroupVersion.Group + groupResource.Group = r.contentConfig.GroupVersion.Group groupResource.Resource = r.resource } return errors.NewGenericServerResponse( @@ -1470,3 +1691,10 @@ func ValidatePathSegmentName(name string, prefix bool) []string { } return IsValidPathSegmentName(name) } + +func objectKeyFromMeta(objMeta metav1.Object) string { + if len(objMeta.GetNamespace()) > 0 { + return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) + } + return objMeta.GetName() +} diff --git a/constraint/vendor/k8s.io/client-go/rest/url_utils.go b/constraint/vendor/k8s.io/client-go/rest/url_utils.go index c4ce6e3b8..0a0ab7917 100644 --- a/constraint/vendor/k8s.io/client-go/rest/url_utils.go +++ b/constraint/vendor/k8s.io/client-go/rest/url_utils.go @@ -61,7 +61,7 @@ func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, de return hostURL, versionedAPIPath, nil } -// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given +// DefaultVersionedAPIPath constructs the default path for the given group version, assuming the given // API path, following the standard conventions of the Kubernetes API. func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string { versionedAPIPath := path.Join("/", apiPath) diff --git a/constraint/vendor/k8s.io/client-go/rest/watch/decoder.go b/constraint/vendor/k8s.io/client-go/rest/watch/decoder.go index e95c020b2..c2b68cbcb 100644 --- a/constraint/vendor/k8s.io/client-go/rest/watch/decoder.go +++ b/constraint/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package versioned +package watch import ( "fmt" @@ -51,7 +51,7 @@ func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { return "", nil, err } if res != &got { - return "", nil, fmt.Errorf("unable to decode to metav1.Event") + return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent") } switch got.Type { case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark): diff --git a/constraint/vendor/k8s.io/client-go/rest/watch/encoder.go b/constraint/vendor/k8s.io/client-go/rest/watch/encoder.go index e55aa12d9..a95b4985c 100644 --- a/constraint/vendor/k8s.io/client-go/rest/watch/encoder.go +++ b/constraint/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package versioned +package watch import ( "encoding/json" diff --git a/constraint/vendor/k8s.io/client-go/testing/actions.go b/constraint/vendor/k8s.io/client-go/testing/actions.go index c8ae0aaf5..e7af4d6e8 100644 --- a/constraint/vendor/k8s.io/client-go/testing/actions.go +++ b/constraint/vendor/k8s.io/client-go/testing/actions.go @@ -29,42 +29,66 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// All NewRoot... functions return non-namespaced actions, and are equivalent to +// calling the corresponding New... function with an empty namespace. +// This is assumed by the fake client generator. + func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + return NewRootGetActionWithOptions(resource, name, metav1.GetOptions{}) +} + +func NewRootGetActionWithOptions(resource schema.GroupVersionResource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Name = name + action.GetOptions = opts return action } func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + return NewGetActionWithOptions(resource, namespace, name, metav1.GetOptions{}) +} + +func NewGetActionWithOptions(resource schema.GroupVersionResource, namespace, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + return NewGetSubresourceActionWithOptions(resource, namespace, subresource, name, metav1.GetOptions{}) +} + +func NewGetSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.GetOptions = opts return action } func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + return NewRootGetSubresourceActionWithOptions(resource, subresource, name, metav1.GetOptions{}) +} + +func NewRootGetSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, name string, opts metav1.GetOptions) GetActionImpl { action := GetActionImpl{} action.Verb = "get" action.Resource = resource action.Subresource = subresource action.Name = name + action.GetOptions = opts return action } @@ -76,6 +100,21 @@ func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVe action.Kind = kind labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewRootListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.ListOptions = opts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} return action } @@ -86,6 +125,21 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio action.Resource = resource action.Kind = kind action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + action.ListOptions = metav1.ListOptions{LabelSelector: labelSelector.String(), FieldSelector: fieldSelector.String()} + + return action +} + +func NewListActionWithOptions(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts metav1.ListOptions) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} @@ -93,36 +147,55 @@ func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersio } func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + return NewRootCreateActionWithOptions(resource, object, metav1.CreateOptions{}) +} + +func NewRootCreateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Object = object + action.CreateOptions = opts return action } func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateActionWithOptions(resource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Namespace = namespace action.Object = object + action.CreateOptions = opts return action } func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + return NewRootCreateSubresourceActionWithOptions(resource, name, subresource, object, metav1.CreateOptions{}) +} + +func NewRootCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + return NewCreateSubresourceActionWithOptions(resource, name, subresource, namespace, object, metav1.CreateOptions{}) +} + +func NewCreateSubresourceActionWithOptions(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object, opts metav1.CreateOptions) CreateActionImpl { action := CreateActionImpl{} action.Verb = "create" action.Resource = resource @@ -130,41 +203,61 @@ func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subr action.Subresource = subresource action.Name = name action.Object = object + action.CreateOptions = opts return action } func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + return NewRootUpdateActionWithOptions(resource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateActionWithOptions(resource schema.GroupVersionResource, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Object = object + action.UpdateOptions = opts return action } func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateActionWithOptions(resource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateActionWithOptions(resource schema.GroupVersionResource, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewRootPatchActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}) +} + +func NewRootPatchActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + return NewPatchActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}) +} + +func NewPatchActionWithOptions(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -172,11 +265,16 @@ func NewPatchAction(resource schema.GroupVersionResource, namespace string, name action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewRootPatchSubresourceActionWithOptions(resource, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewRootPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -184,11 +282,16 @@ func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name st action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + return NewPatchSubresourceActionWithOptions(resource, namespace, name, pt, patch, metav1.PatchOptions{}, subresources...) +} + +func NewPatchSubresourceActionWithOptions(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions, subresources ...string) PatchActionImpl { action := PatchActionImpl{} action.Verb = "patch" action.Resource = resource @@ -197,26 +300,38 @@ func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, action.Name = name action.PatchType = pt action.Patch = patch + action.PatchOptions = opts return action } func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + return NewRootUpdateSubresourceActionWithOptions(resource, subresource, object, metav1.UpdateOptions{}) +} + +func NewRootUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Object = object + action.UpdateOptions = opts return action } + func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + return NewUpdateSubresourceActionWithOptions(resource, subresource, namespace, object, metav1.UpdateOptions{}) +} + +func NewUpdateSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object, opts metav1.UpdateOptions) UpdateActionImpl { action := UpdateActionImpl{} action.Verb = "update" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Object = object + action.UpdateOptions = opts return action } @@ -236,11 +351,16 @@ func NewRootDeleteActionWithOptions(resource schema.GroupVersionResource, name s } func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + return NewRootDeleteSubresourceActionWithOptions(resource, subresource, name, metav1.DeleteOptions{}) +} + +func NewRootDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource string, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Name = name + action.DeleteOptions = opts return action } @@ -261,41 +381,69 @@ func NewDeleteActionWithOptions(resource schema.GroupVersionResource, namespace, } func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + return NewDeleteSubresourceActionWithOptions(resource, subresource, namespace, name, metav1.DeleteOptions{}) +} + +func NewDeleteSubresourceActionWithOptions(resource schema.GroupVersionResource, subresource, namespace, name string, opts metav1.DeleteOptions) DeleteActionImpl { action := DeleteActionImpl{} action.Verb = "delete" action.Resource = resource action.Subresource = subresource action.Namespace = namespace action.Name = name + action.DeleteOptions = opts return action } func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootDeleteCollectionActionWithOptions(resource, metav1.DeleteOptions{}, listOpts) +} + +func NewRootDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewDeleteCollectionActionWithOptions(resource, namespace, metav1.DeleteOptions{}, listOpts) +} + +func NewDeleteCollectionActionWithOptions(resource schema.GroupVersionResource, namespace string, deleteOpts metav1.DeleteOptions, listOpts metav1.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource action.Namespace = namespace - labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.DeleteOptions = deleteOpts + action.ListOptions = listOpts + + labelSelector, fieldSelector, _ := ExtractFromListOptions(listOpts) action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action } func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewRootWatchActionWithOptions(resource, listOpts) +} + +func NewRootWatchActionWithOptions(resource schema.GroupVersionResource, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -328,10 +476,17 @@ func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fi } func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + listOpts, _ := opts.(metav1.ListOptions) + return NewWatchActionWithOptions(resource, namespace, listOpts) +} + +func NewWatchActionWithOptions(resource schema.GroupVersionResource, namespace string, opts metav1.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource action.Namespace = namespace + action.ListOptions = opts + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} @@ -487,17 +642,23 @@ func (a GenericActionImpl) DeepCopy() Action { type GetActionImpl struct { ActionImpl - Name string + Name string + GetOptions metav1.GetOptions } func (a GetActionImpl) GetName() string { return a.Name } +func (a GetActionImpl) GetGetOptions() metav1.GetOptions { + return a.GetOptions +} + func (a GetActionImpl) DeepCopy() Action { return GetActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), Name: a.Name, + GetOptions: *a.GetOptions.DeepCopy(), } } @@ -506,6 +667,7 @@ type ListActionImpl struct { Kind schema.GroupVersionKind Name string ListRestrictions ListRestrictions + ListOptions metav1.ListOptions } func (a ListActionImpl) GetKind() schema.GroupVersionKind { @@ -516,6 +678,10 @@ func (a ListActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a ListActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a ListActionImpl) DeepCopy() Action { return ListActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -525,48 +691,62 @@ func (a ListActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + ListOptions: *a.ListOptions.DeepCopy(), } } type CreateActionImpl struct { ActionImpl - Name string - Object runtime.Object + Name string + Object runtime.Object + CreateOptions metav1.CreateOptions } func (a CreateActionImpl) GetObject() runtime.Object { return a.Object } +func (a CreateActionImpl) GetCreateOptions() metav1.CreateOptions { + return a.CreateOptions +} + func (a CreateActionImpl) DeepCopy() Action { return CreateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + CreateOptions: *a.CreateOptions.DeepCopy(), } } type UpdateActionImpl struct { ActionImpl - Object runtime.Object + Object runtime.Object + UpdateOptions metav1.UpdateOptions } func (a UpdateActionImpl) GetObject() runtime.Object { return a.Object } +func (a UpdateActionImpl) GetUpdateOptions() metav1.UpdateOptions { + return a.UpdateOptions +} + func (a UpdateActionImpl) DeepCopy() Action { return UpdateActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Object: a.Object.DeepCopyObject(), + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + UpdateOptions: *a.UpdateOptions.DeepCopy(), } } type PatchActionImpl struct { ActionImpl - Name string - PatchType types.PatchType - Patch []byte + Name string + PatchType types.PatchType + Patch []byte + PatchOptions metav1.PatchOptions } func (a PatchActionImpl) GetName() string { @@ -581,14 +761,19 @@ func (a PatchActionImpl) GetPatchType() types.PatchType { return a.PatchType } +func (a PatchActionImpl) GetPatchOptions() metav1.PatchOptions { + return a.PatchOptions +} + func (a PatchActionImpl) DeepCopy() Action { patch := make([]byte, len(a.Patch)) copy(patch, a.Patch) return PatchActionImpl{ - ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), - Name: a.Name, - PatchType: a.PatchType, - Patch: patch, + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + PatchOptions: *a.PatchOptions.DeepCopy(), } } @@ -617,12 +802,22 @@ func (a DeleteActionImpl) DeepCopy() Action { type DeleteCollectionActionImpl struct { ActionImpl ListRestrictions ListRestrictions + DeleteOptions metav1.DeleteOptions + ListOptions metav1.ListOptions } func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { return a.ListRestrictions } +func (a DeleteCollectionActionImpl) GetDeleteOptions() metav1.DeleteOptions { + return a.DeleteOptions +} + +func (a DeleteCollectionActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a DeleteCollectionActionImpl) DeepCopy() Action { return DeleteCollectionActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -630,18 +825,25 @@ func (a DeleteCollectionActionImpl) DeepCopy() Action { Labels: a.ListRestrictions.Labels.DeepCopySelector(), Fields: a.ListRestrictions.Fields.DeepCopySelector(), }, + DeleteOptions: *a.DeleteOptions.DeepCopy(), + ListOptions: *a.ListOptions.DeepCopy(), } } type WatchActionImpl struct { ActionImpl WatchRestrictions WatchRestrictions + ListOptions metav1.ListOptions } func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { return a.WatchRestrictions } +func (a WatchActionImpl) GetListOptions() metav1.ListOptions { + return a.ListOptions +} + func (a WatchActionImpl) DeepCopy() Action { return WatchActionImpl{ ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), @@ -650,6 +852,7 @@ func (a WatchActionImpl) DeepCopy() Action { Fields: a.WatchRestrictions.Fields.DeepCopySelector(), ResourceVersion: a.WatchRestrictions.ResourceVersion, }, + ListOptions: *a.ListOptions.DeepCopy(), } } diff --git a/constraint/vendor/k8s.io/client-go/testing/fixture.go b/constraint/vendor/k8s.io/client-go/testing/fixture.go index 396840670..15b3e5334 100644 --- a/constraint/vendor/k8s.io/client-go/testing/fixture.go +++ b/constraint/vendor/k8s.io/client-go/testing/fixture.go @@ -19,19 +19,24 @@ package testing import ( "fmt" "reflect" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/yaml" "sort" "strings" "sync" - jsonpatch "github.com/evanphx/json-patch" + jsonpatch "gopkg.in/evanphx/json-patch.v4" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/managedfields" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" @@ -46,26 +51,32 @@ type ObjectTracker interface { Add(obj runtime.Object) error // Get retrieves the object by its kind, namespace and name. - Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) // Create adds an object to the tracker in the specified namespace. - Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error // Update updates an existing object in the tracker in the specified namespace. - Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error + + // Patch patches an existing object in the tracker in the specified namespace. + Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error + + // Apply applies an object in the tracker in the specified namespace. + Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error // List retrieves all objects of a given kind in the given // namespace. Only non-List kinds are accepted. - List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) // Delete deletes an existing object from the tracker. If object // didn't exist in the tracker prior to deletion, Delete returns // no error. - Delete(gvr schema.GroupVersionResource, ns, name string) error + Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error // Watch watches objects from the tracker. Watch returns a channel // which will push added / modified / deleted object. - Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) + Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) } // ObjectScheme abstracts the implementation of common operations on objects. @@ -76,133 +87,201 @@ type ObjectScheme interface { // ObjectReaction returns a ReactionFunc that applies core.Action to // the given tracker. +// +// If tracker also implements ManagedFieldObjectTracker, then managed fields +// will be handled by the tracker and apply patch actions will be evaluated +// using the field manager and will take field ownership into consideration. +// Without a ManagedFieldObjectTracker, apply patch actions do not consider +// field ownership. +// +// WARNING: There is no server side defaulting, validation, or conversion handled +// by the fake client and subresources are not handled accurately (fields in the +// root resource are not automatically updated when a scale resource is updated, for example). func ObjectReaction(tracker ObjectTracker) ReactionFunc { + reactor := objectTrackerReact{tracker: tracker} return func(action Action) (bool, runtime.Object, error) { - ns := action.GetNamespace() - gvr := action.GetResource() // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, // updates and creates end up matching the same case branch. switch action := action.(type) { - case ListActionImpl: - obj, err := tracker.List(gvr, action.GetKind(), ns) + obj, err := reactor.List(action) return true, obj, err - case GetActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) + obj, err := reactor.Get(action) return true, obj, err - case CreateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - if action.GetSubresource() == "" { - err = tracker.Create(gvr, action.GetObject(), ns) - } else { - oldObj, getOldObjErr := tracker.Get(gvr, ns, objMeta.GetName()) - if getOldObjErr != nil { - return true, nil, getOldObjErr - } - // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. - if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { - // TODO: Currently we're handling subresource creation as an update - // on the enclosing resource. This works for some subresources but - // might not be generic enough. - err = tracker.Update(gvr, action.GetObject(), ns) - } else { - // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. - return true, action.GetObject(), nil - } - } - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Create(action) return true, obj, err - case UpdateActionImpl: - objMeta, err := meta.Accessor(action.GetObject()) - if err != nil { - return true, nil, err - } - err = tracker.Update(gvr, action.GetObject(), ns) - if err != nil { - return true, nil, err - } - obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + obj, err := reactor.Update(action) return true, obj, err - case DeleteActionImpl: - err := tracker.Delete(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err - } - return true, nil, nil - + obj, err := reactor.Delete(action) + return true, obj, err case PatchActionImpl: - obj, err := tracker.Get(gvr, ns, action.GetName()) - if err != nil { - return true, nil, err + if action.GetPatchType() == types.ApplyPatchType { + obj, err := reactor.Apply(action) + return true, obj, err } + obj, err := reactor.Patch(action) + return true, obj, err + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} - old, err := json.Marshal(obj) - if err != nil { - return true, nil, err - } +type objectTrackerReact struct { + tracker ObjectTracker +} - // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields - // in obj that are removed by patch are cleared - value := reflect.ValueOf(obj) - value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) - - switch action.GetPatchType() { - case types.JSONPatchType: - patch, err := jsonpatch.DecodePatch(action.GetPatch()) - if err != nil { - return true, nil, err - } - modified, err := patch.Apply(old) - if err != nil { - return true, nil, err - } - - if err = json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.MergePatchType: - modified, err := jsonpatch.MergePatch(old, action.GetPatch()) - if err != nil { - return true, nil, err - } - - if err := json.Unmarshal(modified, obj); err != nil { - return true, nil, err - } - case types.StrategicMergePatchType, types.ApplyPatchType: - mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) - if err != nil { - return true, nil, err - } - if err = json.Unmarshal(mergedByte, obj); err != nil { - return true, nil, err - } - default: - return true, nil, fmt.Errorf("PatchType is not supported") - } +func (o objectTrackerReact) List(action ListActionImpl) (runtime.Object, error) { + return o.tracker.List(action.GetResource(), action.GetKind(), action.GetNamespace(), action.ListOptions) +} - if err = tracker.Update(gvr, obj, ns); err != nil { - return true, nil, err - } +func (o objectTrackerReact) Get(action GetActionImpl) (runtime.Object, error) { + return o.tracker.Get(action.GetResource(), action.GetNamespace(), action.GetName(), action.GetOptions) +} + +func (o objectTrackerReact) Create(action CreateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } + if action.GetSubresource() == "" { + err = o.tracker.Create(gvr, action.GetObject(), ns, action.CreateOptions) + if err != nil { + return nil, err + } + } else { + oldObj, getOldObjErr := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if getOldObjErr != nil { + return nil, getOldObjErr + } + // Check whether the existing historical object type is the same as the current operation object type that needs to be updated, and if it is the same, perform the update operation. + if reflect.TypeOf(oldObj) == reflect.TypeOf(action.GetObject()) { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = o.tracker.Update(gvr, action.GetObject(), ns, metav1.UpdateOptions{ + DryRun: action.CreateOptions.DryRun, + FieldManager: action.CreateOptions.FieldManager, + FieldValidation: action.CreateOptions.FieldValidation, + }) + } else { + // If the historical object type is different from the current object type, need to make sure we return the object submitted,don't persist the submitted object in the tracker. + return action.GetObject(), nil + } + } + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Update(action UpdateActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return nil, err + } - return true, obj, nil + err = o.tracker.Update(gvr, action.GetObject(), ns, action.UpdateOptions) + if err != nil { + return nil, err + } - default: - return false, nil, fmt.Errorf("no reaction implemented for %s", action) + obj, err := o.tracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Delete(action DeleteActionImpl) (runtime.Object, error) { + err := o.tracker.Delete(action.GetResource(), action.GetNamespace(), action.GetName(), action.DeleteOptions) + return nil, err +} + +func (o objectTrackerReact) Apply(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal(action.GetPatch(), &patchObj.Object); err != nil { + return nil, err + } + patchObj.SetName(action.GetName()) + err := o.tracker.Apply(gvr, patchObj, ns, action.PatchOptions) + if err != nil { + return nil, err + } + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + return obj, err +} + +func (o objectTrackerReact) Patch(action PatchActionImpl) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := o.tracker.Get(gvr, ns, action.GetName(), metav1.GetOptions{}) + if err != nil { + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("PatchType %s is not supported", action.GetPatchType()) } + + if err = o.tracker.Patch(gvr, obj, ns, action.PatchOptions); err != nil { + return nil, err + } + + return obj, nil } type tracker struct { @@ -231,7 +310,11 @@ func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracke } } -func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } // Heuristic for list kind: original kind + List suffix. Might // not always be true but this tracker has a pretty limited // understanding of the actual API model. @@ -270,7 +353,12 @@ func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionK return list.DeepCopyObject(), nil } -func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + t.lock.Lock() defer t.lock.Unlock() @@ -283,8 +371,12 @@ func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Inter return fakewatcher, nil } -func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { - errNotFound := errors.NewNotFound(gvr.GroupResource(), name) +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return nil, err + } + errNotFound := apierrors.NewNotFound(gvr.GroupResource(), name) t.lock.RLock() defer t.lock.RUnlock() @@ -305,7 +397,7 @@ func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime obj := matchingObj.DeepCopyObject() if status, ok := obj.(*metav1.Status); ok { if status.Status != metav1.StatusSuccess { - return nil, &errors.StatusError{ErrStatus: *status} + return nil, &apierrors.StatusError{ErrStatus: *status} } } @@ -352,11 +444,70 @@ func (t *tracker) Add(obj runtime.Object) error { return nil } -func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } return t.add(gvr, obj, ns, false) } -func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + return t.add(gvr, patchedObject, ns, true) +} + +func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + obj, err := t.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + old, err := json.Marshal(obj) + if err != nil { + return err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + // For backward compatibility with behavior 1.30 and earlier, continue to handle apply + // via strategic merge patch (clients may use fake.NewClientset and ManagedFieldObjectTracker + // for full field manager support). + patch, err := json.Marshal(applyConfiguration) + if err != nil { + return err + } + mergedByte, err := strategicpatch.StrategicMergePatch(old, patch, obj) + if err != nil { + return err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return err + } + return t.add(gvr, obj, ns, true) } @@ -398,7 +549,7 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st if ns != newMeta.GetNamespace() { msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) - return errors.NewBadRequest(msg) + return apierrors.NewBadRequest(msg) } _, ok := t.objects[gvr] @@ -416,12 +567,12 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st t.objects[gvr][namespacedName] = obj return nil } - return errors.NewAlreadyExists(gr, newMeta.GetName()) + return apierrors.NewAlreadyExists(gr, newMeta.GetName()) } if replaceExisting { // Tried to update but no matching object was found. - return errors.NewNotFound(gr, newMeta.GetName()) + return apierrors.NewNotFound(gr, newMeta.GetName()) } t.objects[gvr][namespacedName] = obj @@ -451,19 +602,23 @@ func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { return nil } -func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error { + _, err := assertOptionalSingleArgument(opts) + if err != nil { + return err + } t.lock.Lock() defer t.lock.Unlock() objs, ok := t.objects[gvr] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } namespacedName := types.NamespacedName{Namespace: ns, Name: name} obj, ok := objs[namespacedName] if !ok { - return errors.NewNotFound(gvr.GroupResource(), name) + return apierrors.NewNotFound(gvr.GroupResource(), name) } delete(objs, namespacedName) @@ -473,6 +628,203 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error return nil } +type managedFieldObjectTracker struct { + ObjectTracker + scheme ObjectScheme + objectConverter runtime.ObjectConvertor + mapper meta.RESTMapper + typeConverter managedfields.TypeConverter +} + +var _ ObjectTracker = &managedFieldObjectTracker{} + +// NewFieldManagedObjectTracker returns an ObjectTracker that can be used to keep track +// of objects and managed fields for the fake clientset. Mostly useful for unit tests. +func NewFieldManagedObjectTracker(scheme *runtime.Scheme, decoder runtime.Decoder, typeConverter managedfields.TypeConverter) ObjectTracker { + return &managedFieldObjectTracker{ + ObjectTracker: NewObjectTracker(scheme, decoder), + scheme: scheme, + objectConverter: scheme, + mapper: testrestmapper.TestOnlyStaticRESTMapper(scheme), + typeConverter: typeConverter, + } +} + +func (t *managedFieldObjectTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.CreateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objType, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + // Stamp GVK + apiVersion, kind := gvk.ToAPIVersionAndKind() + objType.SetAPIVersion(apiVersion) + objType.SetKind(kind) + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + liveObject, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(liveObject, obj, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, vopts ...metav1.UpdateOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, obj, opts.FieldManager) + if err != nil { + return err + } + + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, opts) +} + +func (t *managedFieldObjectTracker) Patch(gvr schema.GroupVersionResource, patchedObject runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + + objMeta, err := meta.Accessor(patchedObject) + if err != nil { + return err + } + oldObj, err := t.ObjectTracker.Get(gvr, ns, objMeta.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + objWithManagedFields, err := mgr.Update(oldObj, patchedObject, opts.FieldManager) + if err != nil { + return err + } + return t.ObjectTracker.Patch(gvr, objWithManagedFields, ns, vopts...) +} + +func (t *managedFieldObjectTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, vopts ...metav1.PatchOptions) error { + opts, err := assertOptionalSingleArgument(vopts) + if err != nil { + return err + } + gvk, err := t.mapper.KindFor(gvr) + if err != nil { + return err + } + applyConfigurationMeta, err := meta.Accessor(applyConfiguration) + if err != nil { + return err + } + + exists := true + liveObject, err := t.ObjectTracker.Get(gvr, ns, applyConfigurationMeta.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + exists = false + liveObject, err = t.scheme.New(gvk) + if err != nil { + return err + } + liveObject.GetObjectKind().SetGroupVersionKind(gvk) + } else if err != nil { + return err + } + mgr, err := t.fieldManagerFor(gvk) + if err != nil { + return err + } + force := false + if opts.Force != nil { + force = *opts.Force + } + objWithManagedFields, err := mgr.Apply(liveObject, applyConfiguration, opts.FieldManager, force) + if err != nil { + return err + } + + if !exists { + return t.ObjectTracker.Create(gvr, objWithManagedFields, ns, metav1.CreateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } else { + return t.ObjectTracker.Update(gvr, objWithManagedFields, ns, metav1.UpdateOptions{ + DryRun: opts.DryRun, + FieldManager: opts.FieldManager, + FieldValidation: opts.FieldValidation, + }) + } +} + +func (t *managedFieldObjectTracker) fieldManagerFor(gvk schema.GroupVersionKind) (*managedfields.FieldManager, error) { + return managedfields.NewDefaultFieldManager( + t.typeConverter, + t.objectConverter, + &objectDefaulter{}, + t.scheme, + gvk, + gvk.GroupVersion(), + "", + nil) +} + +// objectDefaulter implements runtime.Defaulter, but it actually +// does nothing. +type objectDefaulter struct{} + +func (d *objectDefaulter) Default(_ runtime.Object) {} + // filterByNamespace returns all objects in the collection that // match provided namespace. Empty namespace matches // non-namespaced objects. @@ -579,3 +931,76 @@ func resourceCovers(resource string, action Action) bool { return false } + +// assertOptionalSingleArgument returns an error if there is more than one variadic argument. +// Otherwise, it returns the first variadic argument, or zero value if there are no arguments. +func assertOptionalSingleArgument[T any](arguments []T) (T, error) { + var a T + switch len(arguments) { + case 0: + return a, nil + case 1: + return arguments[0], nil + default: + return a, fmt.Errorf("expected only one option argument but got %d", len(arguments)) + } +} + +type TypeResolver interface { + Type(openAPIName string) typed.ParseableType +} + +type TypeConverter struct { + Scheme *runtime.Scheme + TypeResolver TypeResolver +} + +func (tc TypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + name, err := tc.openAPIName(gvk) + if err != nil { + return nil, err + } + t := tc.TypeResolver.Type(name) + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (tc TypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + vu := value.AsValue().Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func (tc TypeConverter) openAPIName(kind schema.GroupVersionKind) (string, error) { + example, err := tc.Scheme.New(kind) + if err != nil { + return "", err + } + rtype := reflect.TypeOf(example).Elem() + name := friendlyName(rtype.PkgPath() + "." + rtype.Name()) + return name, nil +} + +// This is a copy of openapi.friendlyName. +// TODO: consider introducing a shared version of this function in apimachinery. +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS b/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS index 921ac2fa0..fc441e0ef 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/constraint/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -7,7 +7,6 @@ approvers: - deads2k - caesarxuchao - liggitt - - ncdc reviewers: - thockin - smarterclayton @@ -23,6 +22,6 @@ reviewers: - jsafrane - dims - ingvagabund - - ncdc emeritus_approvers: - lavalamp + - ncdc diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/controller.go b/constraint/vendor/k8s.io/client-go/tools/cache/controller.go index ee19a5af9..e523a6652 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/controller.go @@ -59,6 +59,12 @@ type Config struct { // FullResyncPeriod is the period at which ShouldResync is considered. FullResyncPeriod time.Duration + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + // ShouldResync is periodically used by the reflector to determine // whether to Resync the Queue. If ShouldResync is `nil` or // returns true, it means the reflector should proceed with the @@ -138,6 +144,7 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.config.Queue, ReflectorOptions{ ResyncPeriod: c.config.FullResyncPeriod, + MinWatchTimeout: c.config.MinWatchTimeout, TypeDescription: c.config.ObjectDescription, Clock: c.clock, }, @@ -346,6 +353,58 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { return ObjectToName(obj) } +// InformerOptions configure a Reflector. +type InformerOptions struct { + // ListerWatcher implements List and Watch functions for the source of the resource + // the informer will be informing about. + ListerWatcher ListerWatcher + + // ObjectType is an object of the type that informer is expected to receive. + ObjectType runtime.Object + + // Handler defines functions that should called on object mutations. + Handler ResourceEventHandler + + // ResyncPeriod is the underlying Reflector's resync period. If non-zero, the store + // is re-synced with that frequency - Modify events are delivered even if objects + // didn't change. + // This is useful for synchronizing objects that configure external resources + // (e.g. configure cloud provider functionalities). + // Optional - if unset, store resyncing is not happening periodically. + ResyncPeriod time.Duration + + // MinWatchTimeout, if set, will define the minimum timeout for watch requests send + // to kube-apiserver. However, values lower than 5m will not be honored to avoid + // negative performance impact on controlplane. + // Optional - if unset a default value of 5m will be used. + MinWatchTimeout time.Duration + + // Indexers, if set, are the indexers for the received objects to optimize + // certain queries. + // Optional - if unset no indexes are maintained. + Indexers Indexers + + // Transform function, if set, will be called on all objects before they will be + // put into the Store and corresponding Add/Modify/Delete handlers will be invoked + // for them. + // Optional - if unset no additional transforming is happening. + Transform TransformFunc +} + +// NewInformerWithOptions returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +func NewInformerWithOptions(options InformerOptions) (Store, Controller) { + var clientState Store + if options.Indexers == nil { + clientState = NewStore(DeletionHandlingMetaNamespaceKeyFunc) + } else { + clientState = NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers) + } + return clientState, newInformer(clientState, options) +} + // NewInformer returns a Store and a controller for populating the store // while also providing event notifications. You should only used the returned // Store for Get/List operations; Add/Modify/Deletes will cause the event @@ -360,6 +419,8 @@ func DeletionHandlingObjectToName(obj interface{}) (ObjectName, error) { // long as possible (until the upstream source closes the watch or times out, // or you stop the controller). // - h is the object you want notifications sent to. +// +// Deprecated: Use NewInformerWithOptions instead. func NewInformer( lw ListerWatcher, objType runtime.Object, @@ -369,7 +430,13 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + } + return clientState, newInformer(clientState, options) } // NewIndexerInformer returns an Indexer and a Controller for populating the index @@ -387,6 +454,8 @@ func NewInformer( // or you stop the controller). // - h is the object you want notifications sent to. // - indexers is the indexer for the received object type. +// +// Deprecated: Use NewInformerWithOptions instead. func NewIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -397,7 +466,14 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + } + return clientState, newInformer(clientState, options) } // NewTransformingInformer returns a Store and a controller for populating @@ -407,6 +483,8 @@ func NewIndexerInformer( // The given transform function will be called on all objects before they will // put into the Store and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingInformer( lw ListerWatcher, objType runtime.Object, @@ -417,7 +495,14 @@ func NewTransformingInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // NewTransformingIndexerInformer returns an Indexer and a controller for @@ -427,6 +512,8 @@ func NewTransformingInformer( // The given transform function will be called on all objects before they will // be put into the Index and corresponding Add/Modify/Delete handlers will // be invoked for them. +// +// Deprecated: Use NewInformerWithOptions instead. func NewTransformingIndexerInformer( lw ListerWatcher, objType runtime.Object, @@ -438,7 +525,15 @@ func NewTransformingIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) + options := InformerOptions{ + ListerWatcher: lw, + ObjectType: objType, + Handler: h, + ResyncPeriod: resyncPeriod, + Indexers: indexers, + Transform: transformer, + } + return clientState, newInformer(clientState, options) } // Multiplexes updates in the form of a list of Deltas into a Store, and informs @@ -481,42 +576,29 @@ func processDeltas( // providing event notifications. // // Parameters -// - lw is list and watch functions for the source of the resource you want to -// be informed of. -// - objType is an object of the type that you expect to receive. -// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// - h is the object you want notifications sent to. // - clientState is the store you want to populate -func newInformer( - lw ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - clientState Store, - transformer TransformFunc, -) Controller { +// - options contain the options to configure the controller +func newInformer(clientState Store, options InformerOptions) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ KnownObjects: clientState, EmitDeltaTypeReplaced: true, - Transformer: transformer, + Transformer: options.Transform, }) cfg := &Config{ Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, + ListerWatcher: options.ListerWatcher, + ObjectType: options.ObjectType, + FullResyncPeriod: options.ResyncPeriod, + MinWatchTimeout: options.MinWatchTimeout, RetryOnError: false, Process: func(obj interface{}, isInInitialList bool) error { if deltas, ok := obj.(Deltas); ok { - return processDeltas(h, clientState, deltas, isInInitialList) + return processDeltas(options.Handler, clientState, deltas, isInInitialList) } return errors.New("object given as Process argument is not Deltas") }, diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 7160bb1ee..ce74dfb6f 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -139,20 +139,17 @@ type DeltaFIFO struct { } // TransformFunc allows for transforming an object before it will be processed. -// TransformFunc (similarly to ResourceEventHandler functions) should be able -// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown. -// -// New in v1.27: In such cases, the contained object will already have gone -// through the transform object separately (when it was added / updated prior -// to the delete), so the TransformFunc can likely safely ignore such objects -// (i.e., just return the input object). // // The most common usage pattern is to clean-up some parts of the object to // reduce component memory usage if a given component doesn't care about them. // -// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc -// sees the object before any other actor, and it is now safe to mutate the -// object in place instead of making a copy. +// New in v1.27: TransformFunc sees the object before any other actor, and it +// is now safe to mutate the object in place instead of making a copy. +// +// It's recommended for the TransformFunc to be idempotent. +// It MUST be idempotent if objects already present in the cache are passed to +// the Replace() to avoid re-mutating them. Default informers do not pass +// existing objects to Replace though. // // Note that TransformFunc is called while inserting objects into the // notification queue and is therefore extremely performance sensitive; please @@ -440,22 +437,38 @@ func isDeletionDup(a, b *Delta) *Delta { // queueActionLocked appends to the delta list for the object. // Caller must lock first. func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + return f.queueActionInternalLocked(actionType, actionType, obj) +} + +// queueActionInternalLocked appends to the delta list for the object. +// The actionType is emitted and must honor emitDeltaTypeReplaced. +// The internalActionType is only used within this function and must +// ignore emitDeltaTypeReplaced. +// Caller must lock first. +func (f *DeltaFIFO) queueActionInternalLocked(actionType, internalActionType DeltaType, obj interface{}) error { id, err := f.KeyOf(obj) if err != nil { return KeyError{obj, err} } // Every object comes through this code path once, so this is a good - // place to call the transform func. If obj is a - // DeletedFinalStateUnknown tombstone, then the containted inner object - // will already have gone through the transformer, but we document that - // this can happen. In cases involving Replace(), such an object can - // come through multiple times. + // place to call the transform func. + // + // If obj is a DeletedFinalStateUnknown tombstone or the action is a Sync, + // then the object have already gone through the transformer. + // + // If the objects already present in the cache are passed to Replace(), + // the transformer must be idempotent to avoid re-mutating them, + // or coordinate with all readers from the cache to avoid data races. + // Default informers do not pass existing objects to Replace. if f.transformer != nil { - var err error - obj, err = f.transformer(obj) - if err != nil { - return err + _, isTombstone := obj.(DeletedFinalStateUnknown) + if !isTombstone && internalActionType != Sync { + var err error + obj, err = f.transformer(obj) + if err != nil { + return err + } } } @@ -638,7 +651,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { return KeyError{item, err} } keys.Insert(key) - if err := f.queueActionLocked(action, item); err != nil { + if err := f.queueActionInternalLocked(action, Replaced, item); err != nil { return fmt.Errorf("couldn't enqueue object: %v", err) } } diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/listers.go b/constraint/vendor/k8s.io/client-go/tools/cache/listers.go index 420ca7b2a..a60f44943 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/listers.go @@ -30,7 +30,7 @@ import ( // AppendFunc is used to add a matching item to whatever list the caller is using type AppendFunc func(interface{}) -// ListAll calls appendFn with each value retrieved from store which matches the selector. +// ListAll lists items in the store matching the given selector, calling appendFn on each one. func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { selectAll := selector.Empty() for _, m := range store.List() { @@ -51,7 +51,9 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { return nil } -// ListAllByNamespace used to list items belongs to namespace from Indexer. +// ListAllByNamespace lists items in the given namespace in the store matching the given selector, +// calling appendFn on each one. +// If a blank namespace (NamespaceAll) is specified, this delegates to ListAll(). func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { if namespace == metav1.NamespaceAll { return ListAll(indexer, selector, appendFn) diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go b/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go index 10b7e6512..f5708ffeb 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -36,6 +36,10 @@ type Lister interface { // Watcher is any object that knows how to start a watch on a resource. type Watcher interface { // Watch should begin a watch at the specified version. + // + // If Watch returns an error, it should handle its own cleanup, including + // but not limited to calling Stop() on the watch, if one was constructed. + // This allows the caller to ignore the watch, if the error is non-nil. Watch(options metav1.ListOptions) (watch.Interface, error) } diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go b/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go index f733e244c..030b45297 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math/rand" - "os" "reflect" "strings" "sync" @@ -39,6 +38,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + clientfeatures "k8s.io/client-go/features" "k8s.io/client-go/tools/pager" "k8s.io/klog/v2" "k8s.io/utils/clock" @@ -49,9 +49,15 @@ import ( const defaultExpectedTypeName = "" +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + defaultMinWatchTimeout = 5 * time.Minute +) + // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { - // name identifies this reflector. By default it will be a file:line if possible. + // name identifies this reflector. By default, it will be a file:line if possible. name string // The name of the type we expect to place in the store. The name // will be the stringification of expectedGVK if provided, and the @@ -72,6 +78,8 @@ type Reflector struct { // backoff manages backoff of ListWatch backoffManager wait.BackoffManager resyncPeriod time.Duration + // minWatchTimeout defines the minimum timeout for watch requests. + minWatchTimeout time.Duration // clock allows tests to manipulate time clock clock.Clock // paginatedResult defines whether pagination should be forced for list calls. @@ -113,6 +121,14 @@ type Reflector struct { UseWatchList *bool } +func (r *Reflector) Name() string { + return r.name +} + +func (r *Reflector) TypeDescription() string { + return r.typeDescription +} + // ResourceVersionUpdater is an interface that allows store implementation to // track the current resource version of the reflector. This is especially // important if storage bookmarks are enabled. @@ -151,12 +167,6 @@ func DefaultWatchErrorHandler(r *Reflector, err error) { } } -var ( - // We try to spread the load on apiserver by setting timeouts for - // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. - minWatchTimeout = 5 * time.Minute -) - // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector // The indexer is configured to key on namespace func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { @@ -194,6 +204,10 @@ type ReflectorOptions struct { // (do not resync). ResyncPeriod time.Duration + // MinWatchTimeout, if non-zero, defines the minimum timeout for watch requests send to kube-apiserver. + // However, values lower than 5m will not be honored to avoid negative performance impact on controlplane. + MinWatchTimeout time.Duration + // Clock allows tests to control time. If unset defaults to clock.RealClock{} Clock clock.Clock } @@ -213,9 +227,14 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S if reflectorClock == nil { reflectorClock = clock.RealClock{} } + minWatchTimeout := defaultMinWatchTimeout + if options.MinWatchTimeout > defaultMinWatchTimeout { + minWatchTimeout = options.MinWatchTimeout + } r := &Reflector{ name: options.Name, resyncPeriod: options.ResyncPeriod, + minWatchTimeout: minWatchTimeout, typeDescription: options.TypeDescription, listerWatcher: lw, store: store, @@ -243,9 +262,7 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S // don't overwrite UseWatchList if already set // because the higher layers (e.g. storage/cacher) disabled it on purpose if r.UseWatchList == nil { - if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 { - r.UseWatchList = ptr.To(true) - } + r.UseWatchList = ptr.To(clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)) } return r @@ -357,12 +374,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) - - resyncerrc := make(chan error, 1) - cancelCh := make(chan struct{}) - defer close(cancelCh) - go r.startResync(stopCh, cancelCh, resyncerrc) - return r.watch(w, stopCh, resyncerrc) + return r.watchWithResync(w, stopCh) } // startResync periodically calls r.store.Resync() method. @@ -393,6 +405,15 @@ func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{} } } +// watchWithResync runs watch with startResync in the background. +func (r *Reflector) watchWithResync(w watch.Interface, stopCh <-chan struct{}) error { + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go r.startResync(stopCh, cancelCh, resyncerrc) + return r.watch(w, stopCh, resyncerrc) +} + // watch simply starts a watch request with the server. func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error { var err error @@ -415,7 +436,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc start := r.clock.Now() if w == nil { - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: r.LastSyncResourceVersion(), // We want to avoid situations of hanging watchers. Stop any watchers that do not @@ -442,13 +463,14 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc } } - err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh) + err = handleWatch(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, + r.clock, resyncerrc, stopCh) // Ensure that watch will not be reused across iterations. w.Stop() w = nil retry.After(err) if err != nil { - if err != errorStopRequested { + if !errors.Is(err, errorStopRequested) { switch { case isExpiredError(err): // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already @@ -642,7 +664,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // TODO(#115478): large "list", slow clients, slow network, p&f // might slow down streaming and eventually fail. // maybe in such a case we should retry with an increased timeout? - timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + timeoutSeconds := int64(r.minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options := metav1.ListOptions{ ResourceVersion: lastKnownRV, AllowWatchBookmarks: true, @@ -659,14 +681,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - bookmarkReceived := pointer.Bool(false) - err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, + watchListBookmarkReceived, err := handleListWatch(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription, func(rv string) { resourceVersion = rv }, - bookmarkReceived, r.clock, make(chan error), stopCh) if err != nil { w.Stop() // stop and retry with clean state - if err == errorStopRequested { + if errors.Is(err, errorStopRequested) { return nil, nil } if isErrorRetriableWithSideEffectsFn(err) { @@ -674,7 +694,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { } return nil, err } - if *bookmarkReceived { + if watchListBookmarkReceived { break } } @@ -686,10 +706,10 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) + checkWatchListDataConsistencyIfRequested(wait.ContextForChannel(stopCh), r.name, resourceVersion, wrapListFuncWithContext(r.listerWatcher.List), temporaryStore.List) - if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { - return nil, fmt.Errorf("unable to sync watch-list result: %v", err) + if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { + return nil, fmt.Errorf("unable to sync watch-list result: %w", err) } initTrace.Step("SyncWith done") r.setLastSyncResourceVersion(resourceVersion) @@ -706,8 +726,12 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err return r.store.Replace(found, resourceVersion) } -// watchHandler watches w and sets setLastSyncResourceVersion -func watchHandler(start time.Time, +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. If successful, the watcher will be left open after receiving the +// initial set of objects, to allow watching for future events. +func handleListWatch( + start time.Time, w watch.Interface, store Store, expectedType reflect.Type, @@ -715,31 +739,77 @@ func watchHandler(start time.Time, name string, expectedTypeName string, setLastSyncResourceVersion func(string), - exitOnInitialEventsEndBookmark *bool, clock clock.Clock, - errc chan error, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + exitOnWatchListBookmarkReceived := true + return handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) +} + +// handleListWatch consumes events from w, updates the Store, and records the +// last seen ResourceVersion, to allow continuing from that ResourceVersion on +// retry. The watcher will always be stopped on exit. +func handleWatch( + start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errCh chan error, stopCh <-chan struct{}, ) error { + exitOnWatchListBookmarkReceived := false + _, err := handleAnyWatch(start, w, store, expectedType, expectedGVK, name, expectedTypeName, + setLastSyncResourceVersion, exitOnWatchListBookmarkReceived, clock, errCh, stopCh) + return err +} + +// handleAnyWatch consumes events from w, updates the Store, and records the last +// seen ResourceVersion, to allow continuing from that ResourceVersion on retry. +// If exitOnWatchListBookmarkReceived is true, the watch events will be consumed +// until a bookmark event is received with the WatchList annotation present. +// Returns true (watchListBookmarkReceived) if the WatchList bookmark was +// received, even if exitOnWatchListBookmarkReceived is false. +// The watcher will always be stopped, unless exitOnWatchListBookmarkReceived is +// true and watchListBookmarkReceived is true. This allows the same watch stream +// to be re-used by the caller to continue watching for new events. +func handleAnyWatch(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + exitOnWatchListBookmarkReceived bool, + clock clock.Clock, + errCh chan error, + stopCh <-chan struct{}, +) (bool, error) { + watchListBookmarkReceived := false eventCount := 0 - if exitOnInitialEventsEndBookmark != nil { - // set it to false just in case somebody - // made it positive - *exitOnInitialEventsEndBookmark = false - } + initialEventsEndBookmarkWarningTicker := newInitialEventsEndBookmarkTicker(name, clock, start, exitOnWatchListBookmarkReceived) + defer initialEventsEndBookmarkWarningTicker.Stop() loop: for { select { case <-stopCh: - return errorStopRequested - case err := <-errc: - return err + return watchListBookmarkReceived, errorStopRequested + case err := <-errCh: + return watchListBookmarkReceived, err case event, ok := <-w.ResultChan(): if !ok { break loop } if event.Type == watch.Error { - return apierrors.FromObject(event.Object) + return watchListBookmarkReceived, apierrors.FromObject(event.Object) } if expectedType != nil { if e, a := expectedType, reflect.TypeOf(event.Object); e != a { @@ -780,10 +850,8 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { - if exitOnInitialEventsEndBookmark != nil { - *exitOnInitialEventsEndBookmark = true - } + if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" { + watchListBookmarkReceived = true } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) @@ -793,20 +861,23 @@ loop: rvu.UpdateResourceVersion(resourceVersion) } eventCount++ - if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark { + if exitOnWatchListBookmarkReceived && watchListBookmarkReceived { watchDuration := clock.Since(start) klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration) - return nil + return watchListBookmarkReceived, nil } + initialEventsEndBookmarkWarningTicker.observeLastEventTimeStamp(clock.Now()) + case <-initialEventsEndBookmarkWarningTicker.C(): + initialEventsEndBookmarkWarningTicker.warnIfExpired() } } watchDuration := clock.Since(start) if watchDuration < 1*time.Second && eventCount == 0 { - return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) + return watchListBookmarkReceived, fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) } klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) - return nil + return watchListBookmarkReceived, nil } // LastSyncResourceVersion is the resource version observed when last sync with the underlying store @@ -918,3 +989,95 @@ func isWatchErrorRetriable(err error) bool { } return false } + +// wrapListFuncWithContext simply wraps ListFunction into another function that accepts a context and ignores it. +func wrapListFuncWithContext(listFn ListFunc) func(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) { + return func(_ context.Context, options metav1.ListOptions) (runtime.Object, error) { + return listFn(options) + } +} + +// initialEventsEndBookmarkTicker a ticker that produces a warning if the bookmark event +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note: +// The methods exposed by this type are not thread-safe. +type initialEventsEndBookmarkTicker struct { + clock.Ticker + clock clock.Clock + name string + + watchStart time.Time + tickInterval time.Duration + lastEventObserveTime time.Time +} + +// newInitialEventsEndBookmarkTicker returns a noop ticker if exitOnInitialEventsEndBookmarkRequested is false. +// Otherwise, it returns a ticker that exposes a method producing a warning if the bookmark event, +// which marks the end of the watch stream, has not been received within the defined tick interval. +// +// Note that the caller controls whether to call t.C() and t.Stop(). +// +// In practice, the reflector exits the watchHandler as soon as the bookmark event is received and calls the t.C() method. +func newInitialEventsEndBookmarkTicker(name string, c clock.Clock, watchStart time.Time, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + return newInitialEventsEndBookmarkTickerInternal(name, c, watchStart, 10*time.Second, exitOnWatchListBookmarkReceived) +} + +func newInitialEventsEndBookmarkTickerInternal(name string, c clock.Clock, watchStart time.Time, tickInterval time.Duration, exitOnWatchListBookmarkReceived bool) *initialEventsEndBookmarkTicker { + clockWithTicker, ok := c.(clock.WithTicker) + if !ok || !exitOnWatchListBookmarkReceived { + if exitOnWatchListBookmarkReceived { + klog.Warningf("clock does not support WithTicker interface but exitOnInitialEventsEndBookmark was requested") + } + return &initialEventsEndBookmarkTicker{ + Ticker: &noopTicker{}, + } + } + + return &initialEventsEndBookmarkTicker{ + Ticker: clockWithTicker.NewTicker(tickInterval), + clock: c, + name: name, + watchStart: watchStart, + tickInterval: tickInterval, + } +} + +func (t *initialEventsEndBookmarkTicker) observeLastEventTimeStamp(lastEventObserveTime time.Time) { + t.lastEventObserveTime = lastEventObserveTime +} + +func (t *initialEventsEndBookmarkTicker) warnIfExpired() { + if err := t.produceWarningIfExpired(); err != nil { + klog.Warning(err) + } +} + +// produceWarningIfExpired returns an error that represents a warning when +// the time elapsed since the last received event exceeds the tickInterval. +// +// Note that this method should be called when t.C() yields a value. +func (t *initialEventsEndBookmarkTicker) produceWarningIfExpired() error { + if _, ok := t.Ticker.(*noopTicker); ok { + return nil /*noop ticker*/ + } + if t.lastEventObserveTime.IsZero() { + return fmt.Errorf("%s: awaiting required bookmark event for initial events stream, no events received for %v", t.name, t.clock.Since(t.watchStart)) + } + elapsedTime := t.clock.Now().Sub(t.lastEventObserveTime) + hasBookmarkTimerExpired := elapsedTime >= t.tickInterval + + if !hasBookmarkTimerExpired { + return nil + } + return fmt.Errorf("%s: hasn't received required bookmark event marking the end of initial events stream, received last event %v ago", t.name, elapsedTime) +} + +var _ clock.Ticker = &noopTicker{} + +// TODO(#115478): move to k8s/utils repo +type noopTicker struct{} + +func (t *noopTicker) C() <-chan time.Time { return nil } + +func (t *noopTicker) Stop() {} diff --git a/constraint/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/constraint/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index aa3027d71..a7e0d9c43 100644 --- a/constraint/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/constraint/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,102 +18,26 @@ package cache import ( "context" - "os" - "sort" - "strconv" - "time" - "github.com/google/go-cmp/cmp" - - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" + "k8s.io/client-go/util/consistencydetector" ) -var dataConsistencyDetectionEnabled = false - -func init() { - dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) -} - -// checkWatchListConsistencyIfRequested performs a data consistency check only when +// checkWatchListDataConsistencyIfRequested performs a data consistency check only when // the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. // // The consistency check is meant to be enforced only in the CI, not in production. // The check ensures that data retrieved by the watch-list api call -// is exactly the same as data received by the standard list api call. +// is exactly the same as data received by the standard list api call against etcd. // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - if !dataConsistencyDetectionEnabled { - return - } - checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) -} - -// checkWatchListConsistency exists solely for testing purposes. -// we cannot use checkWatchListConsistencyIfRequested because -// it is guarded by an environmental variable. -// we cannot manipulate the environmental variable because -// it will affect other tests in this package. -func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) - opts := metav1.ListOptions{ - ResourceVersion: lastSyncedResourceVersion, - ResourceVersionMatch: metav1.ResourceVersionMatchExact, - } - var list runtime.Object - err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { - list, err = listerWatcher.List(opts) - if err != nil { - // the consistency check will only be enabled in the CI - // and LIST calls in general will be retired by the client-go library - // if we fail simply log and retry - klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) - return false, nil - } - return true, nil - }) - if err != nil { - klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { + if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } - - rawListItems, err := meta.ExtractListWithAlloc(list) - if err != nil { - panic(err) // this should never happen - } - - listItems := toMetaObjectSliceOrDie(rawListItems) - storeItems := toMetaObjectSliceOrDie(store.List()) - - sort.Sort(byUID(listItems)) - sort.Sort(byUID(storeItems)) - - if !cmp.Equal(listItems, storeItems) { - klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) - msg := "data inconsistency detected for the watch-list feature, panicking!" - panic(msg) - } -} - -type byUID []metav1.Object - -func (a byUID) Len() int { return len(a) } -func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } -func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { - result := make([]metav1.Object, len(s)) - for i, v := range s { - m, err := meta.Accessor(v) - if err != nil { - panic(err) - } - result[i] = m - } - return result + // for informers we pass an empty ListOptions because + // listFn might be wrapped for filtering during informer construction. + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go index dd5f91806..261dcacb5 100644 --- a/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "os" - "path" "path/filepath" "reflect" "strings" @@ -115,7 +114,7 @@ func ShortenConfig(config *Config) { // FlattenConfig changes the config object into a self-contained config (useful for making secrets) func FlattenConfig(config *Config) error { for key, authInfo := range config.AuthInfos { - baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "") if err != nil { return err } @@ -130,7 +129,7 @@ func FlattenConfig(config *Config) error { config.AuthInfos[key] = authInfo } for key, cluster := range config.Clusters { - baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "") if err != nil { return err } diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go index 35bb5dde1..c575652b1 100644 --- a/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -50,7 +50,7 @@ func init() { Scheme = runtime.NewScheme() utilruntime.Must(api.AddToScheme(Scheme)) utilruntime.Must(v1.AddToScheme(Scheme)) - yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) + yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true}) Codec = versioning.NewDefaultingCodecForScheme( Scheme, yamlSerializer, diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 952f6d7eb..cd0a8649b 100644 --- a/constraint/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -29,8 +29,6 @@ import ( clientauth "k8s.io/client-go/tools/auth" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/klog/v2" - - "github.com/imdario/mergo" ) const ( @@ -241,45 +239,37 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { if err != nil { return nil, err } - mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride) + if err := merge(clientConfig, userAuthPartialConfig); err != nil { + return nil, err + } - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { + serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo) + if err := merge(clientConfig, serverAuthPartialConfig); err != nil { return nil, err } - mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride) } return clientConfig, nil } // clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} +// both, so we have to split the objects and merge them separately. - // configClusterInfo holds the information identify the server provided by .kubeconfig +// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo +// (the final result of command line flags and merged .kubeconfig files). +func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config { configClientConfig := &restclient.Config{} configClientConfig.CAFile = configClusterInfo.CertificateAuthority configClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify configClientConfig.ServerName = configClusterInfo.TLSServerName - mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride) - return mergedConfig, nil + return configClientConfig } -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identification -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible +// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo +// (the final result of command line flags and merged .kubeconfig files); +// if the information available there is insufficient, it prompts (if possible) for additional information. func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { mergedConfig := &restclient.Config{} @@ -338,8 +328,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) previouslyMergedConfig := mergedConfig mergedConfig = &restclient.Config{} - mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride) - mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride) + if err := merge(mergedConfig, promptedConfig); err != nil { + return nil, err + } + if err := merge(mergedConfig, previouslyMergedConfig); err != nil { + return nil, err + } config.promptedCredentials.username = mergedConfig.Username config.promptedCredentials.password = mergedConfig.Password } @@ -347,7 +341,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI return mergedConfig, nil } -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { config := &restclient.Config{} config.Username = info.User @@ -507,12 +501,16 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { mergedContext := clientcmdapi.NewContext() if configContext, exists := contexts[contextName]; exists { - mergo.Merge(mergedContext, configContext, mergo.WithOverride) + if err := merge(mergedContext, configContext); err != nil { + return clientcmdapi.Context{}, err + } } else if required { return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) } if config.overrides != nil { - mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride) + if err := merge(mergedContext, &config.overrides.Context); err != nil { + return clientcmdapi.Context{}, err + } } return *mergedContext, nil @@ -525,12 +523,16 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { mergedAuthInfo := clientcmdapi.NewAuthInfo() if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride) + if err := merge(mergedAuthInfo, configAuthInfo); err != nil { + return clientcmdapi.AuthInfo{}, err + } } else if required { return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) } if config.overrides != nil { - mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride) + if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil { + return clientcmdapi.AuthInfo{}, err + } } return *mergedAuthInfo, nil @@ -543,15 +545,21 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { mergedClusterInfo := clientcmdapi.NewCluster() if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride) + if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil { + return clientcmdapi.Cluster{}, err + } } if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride) + if err := merge(mergedClusterInfo, configClusterInfo); err != nil { + return clientcmdapi.Cluster{}, err + } } else if required { return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) } if config.overrides != nil { - mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride) + if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil { + return clientcmdapi.Cluster{}, err + } } // * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/config.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/config.go index 31f896316..2cd213ccb 100644 --- a/constraint/vendor/k8s.io/client-go/tools/clientcmd/config.go +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -19,7 +19,6 @@ package clientcmd import ( "errors" "os" - "path" "path/filepath" "reflect" "sort" @@ -148,7 +147,7 @@ func NewDefaultPathOptions() *PathOptions { EnvVar: RecommendedConfigPathEnvVar, ExplicitFileFlag: RecommendedConfigPathFlag, - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName), LoadingRules: NewDefaultClientConfigLoadingRules(), } diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/loader.go index b75737f1c..c900e5fd1 100644 --- a/constraint/vendor/k8s.io/client-go/tools/clientcmd/loader.go +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -24,7 +24,6 @@ import ( goruntime "runtime" "strings" - "github.com/imdario/mergo" "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" @@ -248,7 +247,9 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { mapConfig := clientcmdapi.NewConfig() for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride) + if err := merge(mapConfig, kubeconfig); err != nil { + return nil, err + } } // merge all of the struct values in the reverse order so that priority is given correctly @@ -256,14 +257,20 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { nonMapConfig := clientcmdapi.NewConfig() for i := len(kubeconfigs) - 1; i >= 0; i-- { kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride) + if err := merge(nonMapConfig, kubeconfig); err != nil { + return nil, err + } } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdapi.NewConfig() - mergo.Merge(config, mapConfig, mergo.WithOverride) - mergo.Merge(config, nonMapConfig, mergo.WithOverride) + if err := merge(config, mapConfig); err != nil { + return nil, err + } + if err := merge(config, nonMapConfig); err != nil { + return nil, err + } if rules.ResolvePaths() { if err := ResolveLocalPaths(config); err != nil { diff --git a/constraint/vendor/k8s.io/client-go/tools/clientcmd/merge.go b/constraint/vendor/k8s.io/client-go/tools/clientcmd/merge.go new file mode 100644 index 000000000..3d74e6029 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/clientcmd/merge.go @@ -0,0 +1,121 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "reflect" + "strings" +) + +// recursively merges src into dst: +// - non-pointer struct fields with any exported fields are recursively merged +// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero +// - maps are shallow merged with src keys taking priority over dst +// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops +func merge[T any](dst, src *T) error { + if dst == nil { + return fmt.Errorf("cannot merge into nil pointer") + } + if src == nil { + return nil + } + return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem()) +} + +func mergeValues(fieldNames []string, dst, src reflect.Value) error { + dstType := dst.Type() + // no-op if we can't read the src + if !src.IsValid() { + return nil + } + // sanity check types match + if srcType := src.Type(); dstType != srcType { + return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, ".")) + } + + switch dstType.Kind() { + case reflect.Struct: + if hasExportedField(dstType) { + // recursively merge + for i, n := 0, dstType.NumField(); i < n; i++ { + if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil { + return err + } + } + } else if dst.CanSet() { + // If all fields are unexported, overwrite with src. + // Using src.IsZero() would make more sense but that's not what mergo did. + dst.Set(src) + } + + case reflect.Map: + if dst.CanSet() && !src.IsZero() { + // initialize dst if needed + if dst.IsZero() { + dst.Set(reflect.MakeMap(dstType)) + } + // shallow-merge overwriting dst keys with src keys + for _, mapKey := range src.MapKeys() { + dst.SetMapIndex(mapKey, src.MapIndex(mapKey)) + } + } + + case reflect.Slice: + if dst.CanSet() && src.Len() > 0 { + // overwrite dst with non-empty src slice + dst.Set(src) + } + + case reflect.Pointer: + if dst.CanSet() && !src.IsZero() { + // overwrite dst with non-zero values for other types + if dstType.Elem().Kind() == reflect.Struct { + // use struct pointer as-is + dst.Set(src) + } else { + // shallow-copy non-struct pointer (interfaces, primitives, etc) + dst.Set(reflect.New(dstType.Elem())) + dst.Elem().Set(src.Elem()) + } + } + + default: + if dst.CanSet() && !src.IsZero() { + // overwrite dst with non-zero values for other types + dst.Set(src) + } + } + + return nil +} + +// hasExportedField returns true if the given type has any exported fields, +// or if it has any anonymous/embedded struct fields with exported fields +func hasExportedField(dstType reflect.Type) bool { + for i, n := 0, dstType.NumField(); i < n; i++ { + field := dstType.Field(i) + if field.Anonymous && field.Type.Kind() == reflect.Struct { + if hasExportedField(dstType.Field(i).Type) { + return true + } + } else if len(field.PkgPath) == 0 { + return true + } + } + return false +} diff --git a/constraint/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/constraint/vendor/k8s.io/client-go/tools/leaderelection/OWNERS index 908bdacdf..70787f2b5 100644 --- a/constraint/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ b/constraint/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -2,10 +2,12 @@ approvers: - mikedanese + - jefftree reviewers: - wojtek-t - deads2k - mikedanese - ingvagabund + - jefftree emeritus_approvers: - timothysc diff --git a/constraint/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/constraint/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index af840c4a2..c3c1d9be1 100644 --- a/constraint/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/constraint/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -159,6 +159,10 @@ type LeaderElectionConfig struct { // Name is the name of the resource lock for debugging Name string + + // Coordinated will use the Coordinated Leader Election feature + // WARNING: Coordinated leader election is ALPHA. + Coordinated bool } // LeaderCallbacks are callbacks that are triggered during certain @@ -169,7 +173,10 @@ type LeaderElectionConfig struct { type LeaderCallbacks struct { // OnStartedLeading is called when a LeaderElector client starts leading OnStartedLeading func(context.Context) - // OnStoppedLeading is called when a LeaderElector client stops leading + // OnStoppedLeading is called when a LeaderElector client stops leading. + // This callback is always called when the LeaderElector exits, even if it did not start leading. + // Users should not assume that OnStoppedLeading is only called after OnStartedLeading. + // see: https://github.com/kubernetes/kubernetes/pull/127675#discussion_r1780059887 OnStoppedLeading func() // OnNewLeader is called when the client observes a leader that is // not the previously observed leader. This includes the first observed @@ -249,7 +256,11 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { desc := le.config.Lock.Describe() klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { - succeeded = le.tryAcquireOrRenew(ctx) + if !le.config.Coordinated { + succeeded = le.tryAcquireOrRenew(ctx) + } else { + succeeded = le.tryCoordinatedRenew(ctx) + } le.maybeReportTransition() if !succeeded { klog.V(4).Infof("failed to acquire lease %v", desc) @@ -269,12 +280,13 @@ func (le *LeaderElector) renew(ctx context.Context) { ctx, cancel := context.WithCancel(ctx) defer cancel() wait.Until(func() { - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) - defer timeoutCancel() - err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { - return le.tryAcquireOrRenew(timeoutCtx), nil - }, timeoutCtx.Done()) - + err := wait.PollUntilContextTimeout(ctx, le.config.RetryPeriod, le.config.RenewDeadline, true, func(ctx context.Context) (done bool, err error) { + if !le.config.Coordinated { + return le.tryAcquireOrRenew(ctx), nil + } else { + return le.tryCoordinatedRenew(ctx), nil + } + }) le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { @@ -304,7 +316,9 @@ func (le *LeaderElector) release() bool { RenewTime: now, AcquireTime: now, } - if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), le.config.RenewDeadline) + defer timeoutCancel() + if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil { klog.Errorf("Failed to release lock: %v", err) return false } @@ -313,6 +327,81 @@ func (le *LeaderElector) release() bool { return true } +// tryCoordinatedRenew checks if it acquired a lease and tries to renew the +// lease if it has already been acquired. Returns true on success else returns +// false. +func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool { + now := metav1.NewTime(le.clock.Now()) + leaderElectionRecord := rl.LeaderElectionRecord{ + HolderIdentity: le.config.Lock.Identity(), + LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), + RenewTime: now, + AcquireTime: now, + } + + // 1. obtain the electionRecord + oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) + if err != nil { + if !errors.IsNotFound(err) { + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + return false + } + klog.Infof("lease lock not found: %v", le.config.Lock.Describe()) + return false + } + + // 2. Record obtained, check the Identity & Time + if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { + le.setObservedRecord(oldLeaderElectionRecord) + + le.observedRawRecord = oldLeaderElectionRawRecord + } + + hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time) + if hasExpired { + klog.Infof("lock has expired: %v", le.config.Lock.Describe()) + return false + } + + if !le.IsLeader() { + klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe()) + return false + } + + // 2b. If the lease has been marked as "end of term", don't renew it + if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" { + klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe()) + // TODO: Instead of letting lease expire, the holder may deleted it directly + // This will not be compatible with all controllers, so it needs to be opt-in behavior. + // We must ensure all code guarded by this lease has successfully completed + // prior to releasing or there may be two processes + // simultaneously acting on the critical path. + // Usually once this returns false, the process is terminated.. + // xref: OnStoppedLeading + return false + } + + // 3. We're going to try to update. The leaderElectionRecord is set to it's default + // here. Let's correct it before updating. + if le.IsLeader() { + leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + leaderElectionRecord.Strategy = oldLeaderElectionRecord.Strategy + le.metrics.slowpathExercised(le.config.Name) + } else { + leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 + } + + // update the lock itself + if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil { + klog.Errorf("Failed to update lock: %v", err) + return false + } + + le.setObservedRecord(&leaderElectionRecord) + return true +} + // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, // else it tries to renew the lease if it has already been acquired. Returns true // on success else returns false. @@ -337,7 +426,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { le.setObservedRecord(&leaderElectionRecord) return true } - klog.Errorf("Failed to update lock optimitically: %v, falling back to slow path", err) + klog.Errorf("Failed to update lock optimistically: %v, falling back to slow path", err) } // 2. obtain or create the ElectionRecord diff --git a/constraint/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/constraint/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go new file mode 100644 index 000000000..6ccd4cfbe --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go @@ -0,0 +1,202 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "context" + "reflect" + "time" + + v1 "k8s.io/api/coordination/v1" + v1alpha2 "k8s.io/api/coordination/v1alpha2" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + coordinationv1alpha2client "k8s.io/client-go/kubernetes/typed/coordination/v1alpha2" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const requeueInterval = 5 * time.Minute + +type CacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + +type LeaseCandidate struct { + leaseClient coordinationv1alpha2client.LeaseCandidateInterface + leaseCandidateInformer cache.SharedIndexInformer + informerFactory informers.SharedInformerFactory + hasSynced cache.InformerSynced + + // At most there will be one item in this Queue (since we only watch one item) + queue workqueue.TypedRateLimitingInterface[int] + + name string + namespace string + + // controller lease + leaseName string + + clock clock.Clock + + binaryVersion, emulationVersion string + strategy v1.CoordinatedLeaseStrategy +} + +// NewCandidate creates new LeaseCandidate controller that creates a +// LeaseCandidate object if it does not exist and watches changes +// to the corresponding object and renews if PingTime is set. +// WARNING: This is an ALPHA feature. Ensure that the CoordinatedLeaderElection +// feature gate is on. +func NewCandidate(clientset kubernetes.Interface, + candidateNamespace string, + candidateName string, + targetLease string, + binaryVersion, emulationVersion string, + strategy v1.CoordinatedLeaseStrategy, +) (*LeaseCandidate, CacheSyncWaiter, error) { + fieldSelector := fields.OneTermEqualSelector("metadata.name", candidateName).String() + // A separate informer factory is required because this must start before informerFactories + // are started for leader elected components + informerFactory := informers.NewSharedInformerFactoryWithOptions( + clientset, 5*time.Minute, + informers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector + }), + ) + leaseCandidateInformer := informerFactory.Coordination().V1alpha2().LeaseCandidates().Informer() + + lc := &LeaseCandidate{ + leaseClient: clientset.CoordinationV1alpha2().LeaseCandidates(candidateNamespace), + leaseCandidateInformer: leaseCandidateInformer, + informerFactory: informerFactory, + name: candidateName, + namespace: candidateNamespace, + leaseName: targetLease, + clock: clock.RealClock{}, + binaryVersion: binaryVersion, + emulationVersion: emulationVersion, + strategy: strategy, + } + lc.queue = workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.DefaultTypedControllerRateLimiter[int](), workqueue.TypedRateLimitingQueueConfig[int]{Name: "leasecandidate"}) + + h, err := leaseCandidateInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + if leasecandidate, ok := newObj.(*v1alpha2.LeaseCandidate); ok { + if leasecandidate.Spec.PingTime != nil && leasecandidate.Spec.PingTime.After(leasecandidate.Spec.RenewTime.Time) { + lc.enqueueLease() + } + } + }, + }) + if err != nil { + return nil, nil, err + } + lc.hasSynced = h.HasSynced + + return lc, informerFactory, nil +} + +func (c *LeaseCandidate) Run(ctx context.Context) { + defer c.queue.ShutDown() + + c.informerFactory.Start(ctx.Done()) + if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) { + return + } + + c.enqueueLease() + go c.runWorker(ctx) + <-ctx.Done() +} + +func (c *LeaseCandidate) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool { + key, shutdown := c.queue.Get() + if shutdown { + return false + } + defer c.queue.Done(key) + + err := c.ensureLease(ctx) + if err == nil { + c.queue.AddAfter(key, requeueInterval) + return true + } + + utilruntime.HandleError(err) + c.queue.AddRateLimited(key) + + return true +} + +func (c *LeaseCandidate) enqueueLease() { + c.queue.Add(0) +} + +// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and +// a bool (true if this call created the lease), or any error that occurs. +func (c *LeaseCandidate) ensureLease(ctx context.Context) error { + lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.V(2).Infof("Creating lease candidate") + // lease does not exist, create it. + leaseToCreate := c.newLeaseCandidate() + if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil { + return err + } + klog.V(2).Infof("Created lease candidate") + return nil + } else if err != nil { + return err + } + klog.V(2).Infof("lease candidate exists. Renewing.") + clone := lease.DeepCopy() + clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + _, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil +} + +func (c *LeaseCandidate) newLeaseCandidate() *v1alpha2.LeaseCandidate { + lc := &v1alpha2.LeaseCandidate{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.name, + Namespace: c.namespace, + }, + Spec: v1alpha2.LeaseCandidateSpec{ + LeaseName: c.leaseName, + BinaryVersion: c.binaryVersion, + EmulationVersion: c.emulationVersion, + Strategy: c.strategy, + }, + } + lc.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()} + return lc +} diff --git a/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 483753d63..2a1eb9caa 100644 --- a/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -19,14 +19,15 @@ package resourcelock import ( "context" "fmt" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" "time" + v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" ) const ( @@ -34,74 +35,8 @@ const ( endpointsResourceLock = "endpoints" configMapsResourceLock = "configmaps" LeasesResourceLock = "leases" - // When using endpointsLeasesResourceLock, you need to ensure that - // API Priority & Fairness is configured with non-default flow-schema - // that will catch the necessary operations on leader-election related - // endpoint objects. - // - // The example of such flow scheme could look like this: - // apiVersion: flowcontrol.apiserver.k8s.io/v1beta2 - // kind: FlowSchema - // metadata: - // name: my-leader-election - // spec: - // distinguisherMethod: - // type: ByUser - // matchingPrecedence: 200 - // priorityLevelConfiguration: - // name: leader-election # reference the PL - // rules: - // - resourceRules: - // - apiGroups: - // - "" - // namespaces: - // - '*' - // resources: - // - endpoints - // verbs: - // - get - // - create - // - update - // subjects: - // - kind: ServiceAccount - // serviceAccount: - // name: '*' - // namespace: kube-system - endpointsLeasesResourceLock = "endpointsleases" - // When using configMapsLeasesResourceLock, you need to ensure that - // API Priority & Fairness is configured with non-default flow-schema - // that will catch the necessary operations on leader-election related - // configmap objects. - // - // The example of such flow scheme could look like this: - // apiVersion: flowcontrol.apiserver.k8s.io/v1beta2 - // kind: FlowSchema - // metadata: - // name: my-leader-election - // spec: - // distinguisherMethod: - // type: ByUser - // matchingPrecedence: 200 - // priorityLevelConfiguration: - // name: leader-election # reference the PL - // rules: - // - resourceRules: - // - apiGroups: - // - "" - // namespaces: - // - '*' - // resources: - // - configmaps - // verbs: - // - get - // - create - // - update - // subjects: - // - kind: ServiceAccount - // serviceAccount: - // name: '*' - // namespace: kube-system - configMapsLeasesResourceLock = "configmapsleases" + endpointsLeasesResourceLock = "endpointsleases" + configMapsLeasesResourceLock = "configmapsleases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -114,11 +49,13 @@ type LeaderElectionRecord struct { // attempt to acquire leases with empty identities and will wait for the full lease // interval to expire before attempting to reacquire. This value is set to empty when // a client voluntarily steps down. - HolderIdentity string `json:"holderIdentity"` - LeaseDurationSeconds int `json:"leaseDurationSeconds"` - AcquireTime metav1.Time `json:"acquireTime"` - RenewTime metav1.Time `json:"renewTime"` - LeaderTransitions int `json:"leaderTransitions"` + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime metav1.Time `json:"acquireTime"` + RenewTime metav1.Time `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` + Strategy v1.CoordinatedLeaseStrategy `json:"strategy"` + PreferredHolder string `json:"preferredHolder"` } // EventRecorder records a change in the ResourceLock. @@ -174,9 +111,9 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf } switch lockType { case endpointsResourceLock: - return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock) + return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", LeasesResourceLock) case configMapsResourceLock: - return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock) + return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", LeasesResourceLock) case LeasesResourceLock: return leaseLock, nil case endpointsLeasesResourceLock: diff --git a/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 8a9d7d60f..7cd2a8b9c 100644 --- a/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/constraint/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -122,6 +122,12 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec if spec.RenewTime != nil { r.RenewTime = metav1.Time{Time: spec.RenewTime.Time} } + if spec.PreferredHolder != nil { + r.PreferredHolder = *spec.PreferredHolder + } + if spec.Strategy != nil { + r.Strategy = *spec.Strategy + } return &r } @@ -129,11 +135,18 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { leaseDurationSeconds := int32(ler.LeaseDurationSeconds) leaseTransitions := int32(ler.LeaderTransitions) - return coordinationv1.LeaseSpec{ + spec := coordinationv1.LeaseSpec{ HolderIdentity: &ler.HolderIdentity, LeaseDurationSeconds: &leaseDurationSeconds, AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time}, RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time}, LeaseTransitions: &leaseTransitions, } + if ler.PreferredHolder != "" { + spec.PreferredHolder = &ler.PreferredHolder + } + if ler.Strategy != "" { + spec.Strategy = &ler.Strategy + } + return spec } diff --git a/constraint/vendor/k8s.io/client-go/transport/cache_go118.go b/constraint/vendor/k8s.io/client-go/transport/cache_go118.go index d21d5137d..babdaf8b5 100644 --- a/constraint/vendor/k8s.io/client-go/transport/cache_go118.go +++ b/constraint/vendor/k8s.io/client-go/transport/cache_go118.go @@ -18,7 +18,29 @@ limitations under the License. package transport +// this is just to make the "unused" linter rule happy +var _ = isCacheKeyComparable[tlsCacheKey] + // assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime. -var _ = isComparable[tlsCacheKey] +// +// Golang 1.20 introduced an exception to type constraints that allows comparable, but not +// necessarily strictly comparable type arguments to satisfy the `comparable` type constraint, +// thus allowing interfaces to fulfil the `comparable` constraint. +// However, by definition, "A comparison of two interface values with identical +// dynamic types causes a run-time panic if that type is not comparable". +// +// We want to make sure that comparing two `tlsCacheKey` elements won't cause a +// runtime panic. In order to do that, we'll force the `tlsCacheKey` to be strictly +// comparable, thus making it impossible for it to contain interfaces. +// To assert strict comparability, we'll use another definition: "Type +// parameters are comparable if they are strictly comparable". +// Below, we first construct a type parameter from the `tlsCacheKey` type so that +// we can then push this type parameter to a comparable check, thus checking these +// are strictly comparable. +// +// Original suggestion from https://github.com/golang/go/issues/56548#issuecomment-1317673963 +func isCacheKeyComparable[K tlsCacheKey]() { + _ = isComparable[K] +} func isComparable[T comparable]() {} diff --git a/constraint/vendor/k8s.io/client-go/transport/cert_rotation.go b/constraint/vendor/k8s.io/client-go/transport/cert_rotation.go index dc22b6ec4..e76f65812 100644 --- a/constraint/vendor/k8s.io/client-go/transport/cert_rotation.go +++ b/constraint/vendor/k8s.io/client-go/transport/cert_rotation.go @@ -47,14 +47,17 @@ type dynamicClientCert struct { connDialer *connrotation.Dialer // queue only ever has one item, but it has nice error handling backoff/retry semantics - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[string] } func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert { d := &dynamicClientCert{ reload: reload, connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"), + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"}, + ), } return d diff --git a/constraint/vendor/k8s.io/client-go/transport/round_trippers.go b/constraint/vendor/k8s.io/client-go/transport/round_trippers.go index e2d1dcc9a..52fefb531 100644 --- a/constraint/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/constraint/vendor/k8s.io/client-go/transport/round_trippers.go @@ -86,6 +86,7 @@ func DebugWrappers(rt http.RoundTripper) http.RoundTripper { type authProxyRoundTripper struct { username string + uid string groups []string extra map[string][]string @@ -98,15 +99,17 @@ var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{} // authentication terminating proxy cases // assuming you pull the user from the context: // username is the user.Info.GetName() of the user +// uid is the user.Info.GetUID() of the user // groups is the user.Info.GetGroups() of the user // extra is the user.Info.GetExtra() of the user // extra can contain any additional information that the authenticator // thought was interesting, for example authorization scopes. // In order to faithfully round-trip through an impersonation flow, these keys // MUST be lowercase. -func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { +func NewAuthProxyRoundTripper(username, uid string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { return &authProxyRoundTripper{ username: username, + uid: uid, groups: groups, extra: extra, rt: rt, @@ -115,14 +118,15 @@ func NewAuthProxyRoundTripper(username string, groups []string, extra map[string func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { req = utilnet.CloneRequest(req) - SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + SetAuthProxyHeaders(req, rt.username, rt.uid, rt.groups, rt.extra) return rt.rt.RoundTrip(req) } // SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. -func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { +func SetAuthProxyHeaders(req *http.Request, username, uid string, groups []string, extra map[string][]string) { req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Uid") req.Header.Del("X-Remote-Group") for key := range req.Header { if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { @@ -131,6 +135,9 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } req.Header.Set("X-Remote-User", username) + if len(uid) > 0 { + req.Header.Set("X-Remote-Uid", uid) + } for _, group := range groups { req.Header.Add("X-Remote-Group", group) } diff --git a/constraint/vendor/k8s.io/client-go/util/apply/apply.go b/constraint/vendor/k8s.io/client-go/util/apply/apply.go new file mode 100644 index 000000000..0cc85df6c --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/util/apply/apply.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apply + +import ( + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/features" + "k8s.io/client-go/rest" +) + +// NewRequest builds a new server-side apply request. The provided apply configuration object will +// be marshalled to the request's body using the default encoding, and the Content-Type header will +// be set to application/apply-patch with the appropriate structured syntax name suffix (today, +// either +yaml or +cbor, see +// https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml). +func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) { + pt := types.ApplyYAMLPatchType + marshal := json.Marshal + + if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) { + pt = types.ApplyCBORPatchType + marshal = cbor.Marshal + } + + body, err := marshal(applyConfiguration) + if err != nil { + return nil, fmt.Errorf("failed to marshal apply configuration: %w", err) + } + + return client.Patch(pt).Body(body), nil +} diff --git a/constraint/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/constraint/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go new file mode 100644 index 000000000..b33d08032 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -0,0 +1,146 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/google/go-cmp/cmp" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type RetrieveItemsFunc[U any] func() []U + +type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) + +// CheckDataConsistency exists solely for testing purposes. +// we cannot use checkWatchListDataConsistencyIfRequested because +// it is guarded by an environmental variable. +// we cannot manipulate the environmental variable because +// it will affect other tests in this package. +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { + if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { + klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) + return + } + klog.Warningf("data consistency check for %s is enabled, this will result in an additional call to the API server.", identity) + + retrievedItems := toMetaObjectSliceOrDie(retrieveItemsFn()) + listOptions = prepareListCallOptions(lastSyncedResourceVersion, listOptions, len(retrievedItems)) + var list runtime.Object + err := wait.PollUntilContextCancel(ctx, time.Second, true, func(_ context.Context) (done bool, err error) { + list, err = listFn(ctx, listOptions) + if err != nil { + // the consistency check will only be enabled in the CI + // and LIST calls in general will be retired by the client-go library + // if we fail simply log and retry + klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + klog.Errorf("failed to list data from the server, the data consistency check for %s won't be performed, stopCh was closed, err: %v", identity, err) + return + } + + rawListItems, err := meta.ExtractListWithAlloc(list) + if err != nil { + panic(err) // this should never happen + } + listItems := toMetaObjectSliceOrDie(rawListItems) + + sort.Sort(byUID(listItems)) + sort.Sort(byUID(retrievedItems)) + + if !cmp.Equal(listItems, retrievedItems) { + klog.Infof("previously received data for %s is different than received by the standard list api call against etcd, diff: %v", identity, cmp.Diff(listItems, retrievedItems)) + msg := fmt.Sprintf("data inconsistency detected for %s, panicking!", identity) + panic(msg) + } +} + +// canFormAdditionalListCall ensures that we can form a valid LIST requests +// for checking data consistency. +func canFormAdditionalListCall(lastSyncedResourceVersion string, listOptions metav1.ListOptions) bool { + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the continuation hasn't been set + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L38 + if len(listOptions.Continue) > 0 { + return false + } + + // since we are setting ResourceVersionMatch to metav1.ResourceVersionMatchExact + // we need to make sure that the RV is valid because the validation code forbids RV == "0" + // https://github.com/kubernetes/kubernetes/blob/be4afb9ef90b19ccb6f7e595cbdb247e088b2347/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go#L44 + if lastSyncedResourceVersion == "0" { + return false + } + + return true +} + +// prepareListCallOptions changes the input list options so that +// the list call goes directly to etcd +func prepareListCallOptions(lastSyncedResourceVersion string, listOptions metav1.ListOptions, retrievedItemsCount int) metav1.ListOptions { + // this is our legacy case: + // + // the watch cache skips the Limit if the ResourceVersion was set to "0" + // thus, to compare with data retrieved directly from etcd + // we need to skip the limit to for the list call as well. + // + // note that when the number of retrieved items is less than the request limit, + // it means either the watch cache is disabled, or there is not enough data. + // in both cases, we can use the limit because we will be able to compare + // the data with the items retrieved from etcd. + if listOptions.ResourceVersion == "0" && listOptions.Limit > 0 && int64(retrievedItemsCount) > listOptions.Limit { + listOptions.Limit = 0 + } + + // set the RV and RVM so that we get the snapshot of data + // directly from etcd. + listOptions.ResourceVersion = lastSyncedResourceVersion + listOptions.ResourceVersionMatch = metav1.ResourceVersionMatchExact + + return listOptions +} + +type byUID []metav1.Object + +func (a byUID) Len() int { return len(a) } +func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } +func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { + result := make([]metav1.Object, len(s)) + for i, v := range s { + m, err := meta.Accessor(v) + if err != nil { + panic(err) + } + result[i] = m + } + return result +} diff --git a/constraint/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go b/constraint/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go new file mode 100644 index 000000000..61b8fe28b --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForListFromCacheEnabled = false + +func init() { + dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR")) +} + +// IsDataConsistencyDetectionForListEnabled returns true when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForListEnabled() bool { + return dataConsistencyDetectionForListFromCacheEnabled +} + +// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup +// for requests that have a high chance of being served from the watch-cache. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by a list api call from the watch-cache +// is exactly the same as data received by the list api call from etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +// +// Note that this function doesn't examine the ListOptions to determine +// if the original request has hit the cache because it would be challenging +// to maintain consistency with the server-side implementation. +// For simplicity, we assume that the first request retrieved data from +// the cache (even though this might not be true for some requests) +// and issue the second call to get data from etcd for comparison. +func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !IsDataConsistencyDetectionForListEnabled() { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} + +func checkListFromCacheDataConsistencyIfRequestedInternal[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + receivedListMeta, err := meta.ListAccessor(receivedList) + if err != nil { + panic(err) + } + rawListItems, err := meta.ExtractListWithAlloc(receivedList) + if err != nil { + panic(err) // this should never happen + } + lastSyncedResourceVersion := receivedListMeta.GetResourceVersion() + CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listItemsFn, optionsUsedToReceiveList, func() []runtime.Object { return rawListItems }) +} diff --git a/constraint/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go b/constraint/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go new file mode 100644 index 000000000..cda5fc205 --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consistencydetector + +import ( + "context" + "os" + "strconv" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var dataConsistencyDetectionForWatchListEnabled = false + +func init() { + dataConsistencyDetectionForWatchListEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) +} + +// IsDataConsistencyDetectionForWatchListEnabled returns true when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +func IsDataConsistencyDetectionForWatchListEnabled() bool { + return dataConsistencyDetectionForWatchListEnabled +} + +// CheckWatchListFromCacheDataConsistencyIfRequested performs a data consistency check only when +// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. +// +// The consistency check is meant to be enforced only in the CI, not in production. +// The check ensures that data retrieved by the watch-list api call +// is exactly the same as data received by the standard list api call against etcd. +// +// Note that this function will panic when data inconsistency is detected. +// This is intentional because we want to catch it in the CI. +func CheckWatchListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) { + if !IsDataConsistencyDetectionForWatchListEnabled() { + return + } + checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList) +} diff --git a/constraint/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/constraint/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 82e4c4c40..899b8e34e 100644 --- a/constraint/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/constraint/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -32,7 +32,12 @@ type backoffEntry struct { type Backoff struct { sync.RWMutex - Clock clock.Clock + Clock clock.Clock + // HasExpiredFunc controls the logic that determines whether the backoff + // counter should be reset, and when to GC old backoff entries. If nil, the + // default hasExpired function will restart the backoff factor to the + // beginning after observing time has passed at least equal to 2*maxDuration + HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool defaultDuration time.Duration maxDuration time.Duration perItemBackoff map[string]*backoffEntry @@ -93,7 +98,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) { p.Lock() defer p.Unlock() entry, ok := p.perItemBackoff[id] - if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { entry = p.initEntryUnsafe(id) entry.backoff += p.jitter(entry.backoff) } else { @@ -119,7 +124,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { if !ok { return false } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return p.Clock.Since(eventTime) < entry.backoff @@ -133,21 +138,21 @@ func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { if !ok { return false } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { return false } return eventTime.Sub(entry.lastUpdate) < entry.backoff } -// Garbage collect records that have aged past maxDuration. Backoff users are expected -// to invoke this periodically. +// Garbage collect records that have aged past their expiration, which defaults +// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke +// this periodically. func (p *Backoff) GC() { p.Lock() defer p.Unlock() now := p.Clock.Now() for id, entry := range p.perItemBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration*2 { - // GC when entry has not been updated for 2*maxDuration + if p.hasExpired(now, entry.lastUpdate, p.maxDuration) { delete(p.perItemBackoff, id) } } @@ -174,7 +179,10 @@ func (p *Backoff) jitter(delay time.Duration) time.Duration { return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay)) } -// After 2*maxDuration we restart the backoff factor to the beginning -func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { +// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning +func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + if p.HasExpiredFunc != nil { + return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration) + } return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration } diff --git a/constraint/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/constraint/vendor/k8s.io/client-go/util/watchlist/watch_list.go new file mode 100644 index 000000000..84106458a --- /dev/null +++ b/constraint/vendor/k8s.io/client-go/util/watchlist/watch_list.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watchlist + +import ( + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientfeatures "k8s.io/client-go/features" + "k8s.io/utils/ptr" +) + +var scheme = runtime.NewScheme() + +func init() { + utilruntime.Must(metainternalversion.AddToScheme(scheme)) +} + +// PrepareWatchListOptionsFromListOptions creates a new ListOptions +// that can be used for a watch-list request from the given listOptions. +// +// This function also determines if the given listOptions can be used to form a watch-list request, +// which would result in streaming semantically equivalent data from the server. +func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) { + if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) { + return metav1.ListOptions{}, false, nil + } + + internalListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + watchListOptions := listOptions + // this is our legacy case, the cache ignores LIMIT for + // ResourceVersion == 0 and RVM=unset|NotOlderThan + if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" { + return metav1.ListOptions{}, false, nil + } + watchListOptions.Limit = 0 + + // to ensure that we can create a watch-list request that returns + // semantically equivalent data for the given listOptions, + // we need to validate that the RVM for the list is supported by watch-list requests. + if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact { + return metav1.ListOptions{}, false, nil + } + watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + + watchListOptions.Watch = true + watchListOptions.AllowWatchBookmarks = true + watchListOptions.SendInitialEvents = ptr.To(true) + + internalWatchListOptions := &metainternalversion.ListOptions{} + if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil { + return metav1.ListOptions{}, false, err + } + if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 { + return metav1.ListOptions{}, false, nil + } + + return watchListOptions, true, nil +} diff --git a/constraint/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/constraint/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index efda7c197..1f9567881 100644 --- a/constraint/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/constraint/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -24,49 +24,66 @@ import ( "golang.org/x/time/rate" ) -type RateLimiter interface { +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. +type RateLimiter TypedRateLimiter[any] + +type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration + When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int + NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +// +// Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + return DefaultTypedControllerRateLimiter[any]() +} + +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedMaxOfRateLimiter( + NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. +type BucketRateLimiter = TypedBucketRateLimiter[any] + +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} -func (r *BucketRateLimiter) When(item interface{}) time.Duration { +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } -func (r *BucketRateLimiter) Forget(item interface{}) { +func (r *TypedBucketRateLimiter[T]) Forget(item T) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] + +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { +type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int baseDelay time.Duration maxDelay time.Duration @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct { var _ RateLimiter = &ItemExponentialFailureRateLimiter{} +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) +} + +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedItemExponentialFailureRateLimiter[T]{ + failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) + return DefaultTypedItemBasedRateLimiter[any]() } -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) +} + +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration return calculated } -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { +// Deprecated: Use TypedItemFastSlowRateLimiter instead. +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] + +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int maxFastAttempts int fastDelay time.Duration @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct { var _ RateLimiter = &ItemFastSlowRateLimiter{} +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) +} + +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { + return &TypedItemFastSlowRateLimiter[T]{ + failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { return r.slowDelay } -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter +// +// Deprecated: Use TypedMaxOfRateLimiter instead. +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] + +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type TypedMaxOfRateLimiter[T comparable] struct { + limiters []TypedRateLimiter[T] } -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { return ret } -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { + return NewTypedMaxOfRateLimiter(limiters...) } -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { + return &TypedMaxOfRateLimiter[T]{limiters: limiters} +} + +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { return ret } -func (r *MaxOfRateLimiter) Forget(item interface{}) { +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long -type WithMaxWaitRateLimiter struct { - limiter RateLimiter +// Deprecated: Use TypedWithMaxWaitRateLimiter instead. +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] + +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type TypedWithMaxWaitRateLimiter[T comparable] struct { + limiter TypedRateLimiter[T] maxDelay time.Duration } +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { - return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} + return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) +} + +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { return delay } -func (w WithMaxWaitRateLimiter) Forget(item interface{}) { +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) } diff --git a/constraint/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/constraint/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index c1df72030..e33a6c692 100644 --- a/constraint/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/constraint/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -27,14 +27,25 @@ import ( // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface +// +// Deprecated: use TypedDelayingInterface instead. +type DelayingInterface TypedDelayingInterface[any] + +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type TypedDelayingInterface[T comparable] interface { + TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) + AddAfter(item T, duration time.Duration) } // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. -type DelayingQueueConfig struct { +// +// Deprecated: use TypedDelayingQueueConfig instead. +type DelayingQueueConfig = TypedDelayingQueueConfig[any] + +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type TypedDelayingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -46,25 +57,49 @@ type DelayingQueueConfig struct { Clock clock.WithTicker // Queue optionally allows injecting custom queue Interface instead of the default one. - Queue Interface + Queue TypedInterface[T] } // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. +// +// Deprecated: use NewTypedDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } +// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability. +// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedDelayingQueueWithConfig instead and specify a name. +func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) +} + // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. +// +// Deprecated: use NewTypedDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + return NewTypedDelayingQueueWithConfig[any](config) +} + +// TypedNewDelayingQueue exists for backwards compatibility only. +// +// Deprecated: use NewTypedDelayingQueueWithConfig instead. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueue[T]() +} + +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { - config.Queue = NewWithConfig(QueueConfig{ + config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, @@ -100,13 +135,13 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { - ret := &delayingType{ - Interface: q, +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { + ret := &delayingType[T]{ + TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), + waitingForAddCh: make(chan *waitFor[T], 1000), metrics: newRetryMetrics(name, provider), } @@ -115,8 +150,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider } // delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface +type delayingType[T comparable] struct { + TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock @@ -130,15 +165,15 @@ type delayingType struct { heartbeat clock.Ticker // waitingForAddCh is a buffered channel that feeds waitingForAdd - waitingForAddCh chan *waitFor + waitingForAddCh chan *waitFor[T] // metrics counts the number of retries metrics retryMetrics } // waitFor holds the data to add and the time it should be added -type waitFor struct { - data t +type waitFor[T any] struct { + data T readyAt time.Time // index in the priority queue (heap) index int @@ -152,15 +187,15 @@ type waitFor struct { // it has been removed from the queue and placed at index Len()-1 by // container/heap. Push adds an item at index Len(), and container/heap // percolates it into the correct location. -type waitForPriorityQueue []*waitFor +type waitForPriorityQueue[T any] []*waitFor[T] -func (pq waitForPriorityQueue) Len() int { +func (pq waitForPriorityQueue[T]) Len() int { return len(pq) } -func (pq waitForPriorityQueue) Less(i, j int) bool { +func (pq waitForPriorityQueue[T]) Less(i, j int) bool { return pq[i].readyAt.Before(pq[j].readyAt) } -func (pq waitForPriorityQueue) Swap(i, j int) { +func (pq waitForPriorityQueue[T]) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] pq[i].index = i pq[j].index = j @@ -168,16 +203,16 @@ func (pq waitForPriorityQueue) Swap(i, j int) { // Push adds an item to the queue. Push should not be called directly; instead, // use `heap.Push`. -func (pq *waitForPriorityQueue) Push(x interface{}) { +func (pq *waitForPriorityQueue[T]) Push(x interface{}) { n := len(*pq) - item := x.(*waitFor) + item := x.(*waitFor[T]) item.index = n *pq = append(*pq, item) } // Pop removes an item from the queue. Pop should not be called directly; // instead, use `heap.Pop`. -func (pq *waitForPriorityQueue) Pop() interface{} { +func (pq *waitForPriorityQueue[T]) Pop() interface{} { n := len(*pq) item := (*pq)[n-1] item.index = -1 @@ -187,22 +222,22 @@ func (pq *waitForPriorityQueue) Pop() interface{} { // Peek returns the item at the beginning of the queue, without removing the // item or otherwise mutating the queue. It is safe to call directly. -func (pq waitForPriorityQueue) Peek() interface{} { +func (pq waitForPriorityQueue[T]) Peek() interface{} { return pq[0] } // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. -func (q *delayingType) ShutDown() { +func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { - q.Interface.ShutDown() + q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return @@ -219,7 +254,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { select { case <-q.stopCh: // unblock if ShutDown() is called - case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}: } } @@ -229,7 +264,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { +func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list @@ -238,13 +273,13 @@ func (q *delayingType) waitingLoop() { // Make a timer that expires when the item at the head of the waiting queue is ready var nextReadyAtTimer clock.Timer - waitingForQueue := &waitForPriorityQueue{} + waitingForQueue := &waitForPriorityQueue[T]{} heap.Init(waitingForQueue) - waitingEntryByData := map[t]*waitFor{} + waitingEntryByData := map[T]*waitFor[T]{} for { - if q.Interface.ShuttingDown() { + if q.TypedInterface.ShuttingDown() { return } @@ -252,12 +287,12 @@ func (q *delayingType) waitingLoop() { // Add ready entries for waitingForQueue.Len() > 0 { - entry := waitingForQueue.Peek().(*waitFor) + entry := waitingForQueue.Peek().(*waitFor[T]) if entry.readyAt.After(now) { break } - entry = heap.Pop(waitingForQueue).(*waitFor) + entry = heap.Pop(waitingForQueue).(*waitFor[T]) q.Add(entry.data) delete(waitingEntryByData, entry.data) } @@ -268,7 +303,7 @@ func (q *delayingType) waitingLoop() { if nextReadyAtTimer != nil { nextReadyAtTimer.Stop() } - entry := waitingForQueue.Peek().(*waitFor) + entry := waitingForQueue.Peek().(*waitFor[T]) nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now)) nextReadyAt = nextReadyAtTimer.C() } @@ -308,7 +343,7 @@ func (q *delayingType) waitingLoop() { } // insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue -func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) { +func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) { // if the entry already exists, update the time only if it would cause the item to be queued sooner existing, exists := knownEntries[entry.data] if exists { diff --git a/constraint/vendor/k8s.io/client-go/util/workqueue/metrics.go b/constraint/vendor/k8s.io/client-go/util/workqueue/metrics.go index f012ccc55..4400cb65e 100644 --- a/constraint/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/constraint/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -26,10 +26,10 @@ import ( // This file provides abstractions for setting the provider (e.g., prometheus) // of metrics. -type queueMetrics interface { - add(item t) - get(item t) - done(item t) +type queueMetrics[T comparable] interface { + add(item T) + get(item T) + done(item T) updateUnfinishedWork() } @@ -70,7 +70,7 @@ func (noopMetric) Set(float64) {} func (noopMetric) Observe(float64) {} // defaultQueueMetrics expects the caller to lock before setting any metrics. -type defaultQueueMetrics struct { +type defaultQueueMetrics[T comparable] struct { clock clock.Clock // current depth of a workqueue @@ -81,15 +81,15 @@ type defaultQueueMetrics struct { latency HistogramMetric // how long processing an item from a workqueue takes workDuration HistogramMetric - addTimes map[t]time.Time - processingStartTimes map[t]time.Time + addTimes map[T]time.Time + processingStartTimes map[T]time.Time // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric } -func (m *defaultQueueMetrics) add(item t) { +func (m *defaultQueueMetrics[T]) add(item T) { if m == nil { return } @@ -101,7 +101,7 @@ func (m *defaultQueueMetrics) add(item t) { } } -func (m *defaultQueueMetrics) get(item t) { +func (m *defaultQueueMetrics[T]) get(item T) { if m == nil { return } @@ -114,7 +114,7 @@ func (m *defaultQueueMetrics) get(item t) { } } -func (m *defaultQueueMetrics) done(item t) { +func (m *defaultQueueMetrics[T]) done(item T) { if m == nil { return } @@ -125,7 +125,7 @@ func (m *defaultQueueMetrics) done(item t) { } } -func (m *defaultQueueMetrics) updateUnfinishedWork() { +func (m *defaultQueueMetrics[T]) updateUnfinishedWork() { // Note that a summary metric would be better for this, but prometheus // doesn't seem to have non-hacky ways to reset the summary metrics. var total float64 @@ -141,15 +141,15 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { m.longestRunningProcessor.Set(oldest) } -type noMetrics struct{} +type noMetrics[T any] struct{} -func (noMetrics) add(item t) {} -func (noMetrics) get(item t) {} -func (noMetrics) done(item t) {} -func (noMetrics) updateUnfinishedWork() {} +func (noMetrics[T]) add(item T) {} +func (noMetrics[T]) get(item T) {} +func (noMetrics[T]) done(item T) {} +func (noMetrics[T]) updateUnfinishedWork() {} // Gets the time since the specified start in seconds. -func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { +func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 { return m.clock.Since(start).Seconds() } @@ -210,28 +210,15 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } -var globalMetricsFactory = queueMetricsFactory{ - metricsProvider: noopMetricsProvider{}, -} - -type queueMetricsFactory struct { - metricsProvider MetricsProvider +var globalMetricsProvider MetricsProvider = noopMetricsProvider{} - onlyOnce sync.Once -} +var setGlobalMetricsProviderOnce sync.Once -func (f *queueMetricsFactory) setProvider(mp MetricsProvider) { - f.onlyOnce.Do(func() { - f.metricsProvider = mp - }) -} - -func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics { - mp := f.metricsProvider +func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] { if len(name) == 0 || mp == (noopMetricsProvider{}) { - return noMetrics{} + return noMetrics[T]{} } - return &defaultQueueMetrics{ + return &defaultQueueMetrics[T]{ clock: clock, depth: mp.NewDepthMetric(name), adds: mp.NewAddsMetric(name), @@ -239,8 +226,8 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu workDuration: mp.NewWorkDurationMetric(name), unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + addTimes: map[T]time.Time{}, + processingStartTimes: map[T]time.Time{}, } } @@ -251,7 +238,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { } if provider == nil { - provider = globalMetricsFactory.metricsProvider + provider = globalMetricsProvider } return &defaultRetryMetrics{ @@ -262,5 +249,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics { // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { - globalMetricsFactory.setProvider(metricsProvider) + setGlobalMetricsProviderOnce.Do(func() { + globalMetricsProvider = metricsProvider + }) } diff --git a/constraint/vendor/k8s.io/client-go/util/workqueue/queue.go b/constraint/vendor/k8s.io/client-go/util/workqueue/queue.go index a363d1afb..3cec1768a 100644 --- a/constraint/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/constraint/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -23,18 +23,66 @@ import ( "k8s.io/utils/clock" ) -type Interface interface { - Add(item interface{}) +// Deprecated: Interface is deprecated, use TypedInterface instead. +type Interface TypedInterface[any] + +type TypedInterface[T comparable] interface { + Add(item T) Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) + Get() (item T, shutdown bool) + Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } +// Queue is the underlying storage for items. The functions below are always +// called from the same goroutine. +type Queue[T comparable] interface { + // Touch can be hooked when an existing item is added again. This may be + // useful if the implementation allows priority change for the given item. + Touch(item T) + // Push adds a new item. + Push(item T) + // Len tells the total number of items. + Len() int + // Pop retrieves an item. + Pop() (item T) +} + +// DefaultQueue is a slice based FIFO queue. +func DefaultQueue[T comparable]() Queue[T] { + return new(queue[T]) +} + +// queue is a slice which implements Queue. +type queue[T comparable] []T + +func (q *queue[T]) Touch(item T) {} + +func (q *queue[T]) Push(item T) { + *q = append(*q, item) +} + +func (q *queue[T]) Len() int { + return len(*q) +} + +func (q *queue[T]) Pop() (item T) { + item = (*q)[0] + + // The underlying array still exists and reference this object, so the object will not be garbage collected. + (*q)[0] = *new(T) + *q = (*q)[1:] + + return item +} + // QueueConfig specifies optional configurations to customize an Interface. -type QueueConfig struct { +// Deprecated: use TypedQueueConfig instead. +type QueueConfig = TypedQueueConfig[any] + +type TypedQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -44,18 +92,38 @@ type QueueConfig struct { // Clock ability to inject real or fake clock for testing purposes. Clock clock.WithTicker + + // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. + Queue Queue[T] } // New constructs a new work queue (see the package comment). +// +// Deprecated: use NewTyped instead. func New() *Type { return NewWithConfig(QueueConfig{ Name: "", }) } +// NewTyped constructs a new work queue (see the package comment). +func NewTyped[T comparable]() *Typed[T] { + return NewTypedWithConfig(TypedQueueConfig[T]{ + Name: "", + }) +} + // NewWithConfig constructs a new workqueue with ability to // customize different properties. +// +// Deprecated: use NewTypedWithConfig instead. func NewWithConfig(config QueueConfig) *Type { + return NewTypedWithConfig(config) +} + +// NewTypedWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) } @@ -69,32 +137,34 @@ func NewNamed(name string) *Type { // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes -func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { - var metricsFactory *queueMetricsFactory +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { + metricsProvider := globalMetricsProvider if config.MetricsProvider != nil { - metricsFactory = &queueMetricsFactory{ - metricsProvider: config.MetricsProvider, - } - } else { - metricsFactory = &globalMetricsFactory + metricsProvider = config.MetricsProvider } if config.Clock == nil { config.Clock = clock.RealClock{} } + if config.Queue == nil { + config.Queue = DefaultQueue[T]() + } + return newQueue( config.Clock, - metricsFactory.newQueueMetrics(config.Name, config.Clock), + config.Queue, + newQueueMetrics[T](metricsProvider, config.Name, config.Clock), updatePeriod, ) } -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { - t := &Type{ +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] { + t := &Typed[T]{ clock: c, - dirty: set{}, - processing: set{}, + queue: queue, + dirty: set[T]{}, + processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, @@ -102,7 +172,7 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati // Don't start the goroutine for a type of noMetrics so we don't consume // resources unnecessarily - if _, ok := metrics.(noMetrics); !ok { + if _, ok := metrics.(noMetrics[T]); !ok { go t.updateUnfinishedWorkLoop() } @@ -112,61 +182,68 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). -type Type struct { +// Deprecated: Use Typed instead. +type Type = Typed[any] + +type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. - queue []t + queue Queue[t] // dirty defines all of the items that need to be processed. - dirty set + dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. - processing set + processing set[t] cond *sync.Cond shuttingDown bool drain bool - metrics queueMetrics + metrics queueMetrics[t] unfinishedWorkUpdatePeriod time.Duration clock clock.WithTicker } type empty struct{} -type t interface{} -type set map[t]empty +type set[t comparable] map[t]empty -func (s set) has(item t) bool { +func (s set[t]) has(item t) bool { _, exists := s[item] return exists } -func (s set) insert(item t) { +func (s set[t]) insert(item t) { s[item] = empty{} } -func (s set) delete(item t) { +func (s set[t]) delete(item t) { delete(s, item) } -func (s set) len() int { +func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. -func (q *Type) Add(item interface{}) { +func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { + // the same item is added again before it is processed, call the Touch + // function if the queue cares about it (for e.g, reset its priority) + if !q.processing.has(item) { + q.queue.Touch(item) + } return } @@ -177,37 +254,34 @@ func (q *Type) Add(item interface{}) { return } - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. -func (q *Type) Len() int { +func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() - return len(q.queue) + return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { +func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { + for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } - if len(q.queue) == 0 { + if q.queue.Len() == 0 { // We must be shutting down. - return nil, true + return *new(T), true } - item = q.queue[0] - // The underlying array still exists and reference this object, so the object will not be garbage collected. - q.queue[0] = nil - q.queue = q.queue[1:] + item = q.queue.Pop() q.metrics.get(item) @@ -220,7 +294,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) { // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. -func (q *Type) Done(item interface{}) { +func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -228,7 +302,7 @@ func (q *Type) Done(item interface{}) { q.processing.delete(item) if q.dirty.has(item) { - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() @@ -237,7 +311,7 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. -func (q *Type) ShutDown() { +func (q *Typed[T]) ShutDown() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -255,7 +329,7 @@ func (q *Type) ShutDown() { // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. -func (q *Type) ShutDownWithDrain() { +func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -268,14 +342,14 @@ func (q *Type) ShutDownWithDrain() { } } -func (q *Type) ShuttingDown() bool { +func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } -func (q *Type) updateUnfinishedWorkLoop() { +func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { diff --git a/constraint/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/constraint/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 3e4016fb0..fe45afa5a 100644 --- a/constraint/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/constraint/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -19,24 +19,33 @@ package workqueue import "k8s.io/utils/clock" // RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface +// +// Deprecated: Use TypedRateLimitingInterface instead. +type RateLimitingInterface TypedRateLimitingInterface[any] + +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue. +type TypedRateLimitingInterface[T comparable] interface { + TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok - AddRateLimited(item interface{}) + AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int + NumRequeues(item T) int } // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. +// +// Deprecated: Use TypedRateLimitingQueueConfig instead. +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] -type RateLimitingQueueConfig struct { +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. +type TypedRateLimitingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct { Clock clock.WithTicker // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. - DelayingQueue DelayingInterface + DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewRateLimitingQueueWithConfig instead and specify a name. +// +// Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) } +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedRateLimitingQueueWithConfig instead and specify a name. +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) +} + // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. +// +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) +} + +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.DelayingQueue == nil { - config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } - return &rateLimitingType{ - DelayingInterface: config.DelayingQueue, - rateLimiter: rateLimiter, + return &rateLimitingType[T]{ + TypedDelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, } } @@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter } // rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface +type rateLimitingType[T comparable] struct { + TypedDelayingInterface[T] - rateLimiter RateLimiter + rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +func (q *rateLimitingType[T]) AddRateLimited(item T) { + q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } -func (q *rateLimitingType) NumRequeues(item interface{}) int { +func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } -func (q *rateLimitingType) Forget(item interface{}) { +func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) } diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/constraint/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go new file mode 100644 index 000000000..11adc2683 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "crypto/tls" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + // ciphers maps strings into tls package cipher constants in + // https://golang.org/pkg/crypto/tls/#pkg-constants + ciphers = map[string]uint16{} + insecureCiphers = map[string]uint16{} +) + +func init() { + for _, suite := range tls.CipherSuites() { + ciphers[suite.Name] = suite.ID + } + // keep legacy names for backward compatibility + ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + + for _, suite := range tls.InsecureCipherSuites() { + insecureCiphers[suite.Name] = suite.ID + } +} + +// InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have +// security issues. +func InsecureTLSCiphers() map[string]uint16 { + cipherKeys := make(map[string]uint16, len(insecureCiphers)) + for k, v := range insecureCiphers { + cipherKeys[k] = v + } + return cipherKeys +} + +// InsecureTLSCipherNames returns a list of cipher suite names implemented by crypto/tls +// which have security issues. +func InsecureTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range insecureCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// PreferredTLSCipherNames returns a list of cipher suite names implemented by crypto/tls. +func PreferredTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range ciphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +func allCiphers() map[string]uint16 { + acceptedCiphers := make(map[string]uint16, len(ciphers)+len(insecureCiphers)) + for k, v := range ciphers { + acceptedCiphers[k] = v + } + for k, v := range insecureCiphers { + acceptedCiphers[k] = v + } + return acceptedCiphers +} + +// TLSCipherPossibleValues returns all acceptable cipher suite names. +// This is a combination of both InsecureTLSCipherNames() and PreferredTLSCipherNames(). +func TLSCipherPossibleValues() []string { + cipherKeys := sets.NewString() + acceptedCiphers := allCiphers() + for key := range acceptedCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed. +func TLSCipherSuites(cipherNames []string) ([]uint16, error) { + if len(cipherNames) == 0 { + return nil, nil + } + ciphersIntSlice := make([]uint16, 0) + possibleCiphers := allCiphers() + for _, cipher := range cipherNames { + intValue, ok := possibleCiphers[cipher] + if !ok { + return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher) + } + ciphersIntSlice = append(ciphersIntSlice, intValue) + } + return ciphersIntSlice, nil +} + +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSPossibleVersions returns all acceptable values for TLS Version. +func TLSPossibleVersions() []string { + versionsKeys := sets.NewString() + for key := range versions { + versionsKeys.Insert(key) + } + return versionsKeys.List() +} + +// TLSVersion returns the TLS Version ID for the version name passed. +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} + +// DefaultTLSVersion defines the default TLS Version. +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/constraint/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go new file mode 100644 index 000000000..728fa520b --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding +// that separates keys from values with ':' and separates key-value pairs with ','. +// A key can be repeated multiple times, in which case the values are appended to a +// slice of strings associated with that key. Items in the list associated with a given +// key will appear in the order provided. +// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map. +// This makes it possible to override default values with a command-line option rather than appending to defaults, +// while still allowing the distribution of key-value pairs across multiple flag invocations. +// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +type ColonSeparatedMultimapStringString struct { + Multimap *map[string][]string + initialized bool // set to true after the first Set call + allowDefaultEmptyKey bool +} + +// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. +func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m} +} + +// NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. It allows default empty key with no colon in the flag. +func NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m, allowDefaultEmptyKey: true} +} + +// Set implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Set(value string) error { + if m.Multimap == nil { + return fmt.Errorf("no target (nil pointer to map[string][]string)") + } + if !m.initialized || *m.Multimap == nil { + // clear default values, or allocate if no existing map + *m.Multimap = make(map[string][]string) + m.initialized = true + } + for _, pair := range strings.Split(value, ",") { + if len(pair) == 0 { + continue + } + kv := strings.SplitN(pair, ":", 2) + var k, v string + if m.allowDefaultEmptyKey && len(kv) == 1 { + v = strings.TrimSpace(kv[0]) + } else { + if len(kv) != 2 { + return fmt.Errorf("malformed pair, expect string:string") + } + k = strings.TrimSpace(kv[0]) + v = strings.TrimSpace(kv[1]) + } + (*m.Multimap)[k] = append((*m.Multimap)[k], v) + } + return nil +} + +// String implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) String() string { + type kv struct { + k string + v string + } + kvs := make([]kv, 0, len(*m.Multimap)) + for k, vs := range *m.Multimap { + for i := range vs { + kvs = append(kvs, kv{k: k, v: vs[i]}) + } + } + // stable sort by keys, order of values should be preserved + sort.SliceStable(kvs, func(i, j int) bool { + return kvs[i].k < kvs[j].k + }) + pairs := make([]string, 0, len(kvs)) + for i := range kvs { + pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v)) + } + return strings.Join(pairs, ",") +} + +// Type implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Type() string { + return "colonSeparatedMultimapStringString" +} + +// Empty implements OmitEmpty +func (m *ColonSeparatedMultimapStringString) Empty() bool { + return len(*m.Multimap) == 0 +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/configuration_map.go b/constraint/vendor/k8s.io/component-base/cli/flag/configuration_map.go new file mode 100644 index 000000000..911b05ec6 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/configuration_map.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/flags.go b/constraint/vendor/k8s.io/component-base/cli/flag/flags.go new file mode 100644 index 000000000..8d4a59ce9 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/flags.go @@ -0,0 +1,66 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "strings" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" +) + +var underscoreWarnings = make(map[string]struct{}) + +// WordSepNormalizeFunc changes all flags that contain "_" separators +func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) + } + return pflag.NormalizedName(name) +} + +// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators +func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + nname := strings.Replace(name, "_", "-", -1) + if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned { + klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname) + underscoreWarnings[name] = struct{}{} + } + + return pflag.NormalizedName(nname) + } + return pflag.NormalizedName(name) +} + +// InitFlags normalizes, parses, then logs the command line flags +func InitFlags() { + pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() + pflag.VisitAll(func(flag *pflag.Flag) { + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} + +// PrintFlags logs the flags in the flagset +func PrintFlags(flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go b/constraint/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go new file mode 100644 index 000000000..bf8dbfb9b --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 { + s = s + ":" + strings.Join(nkc.Names, ",") + } + return s +} + +func (nkc *NamedCertKey) Set(value string) error { + cs := strings.SplitN(value, ":", 2) + var keycert string + if len(cs) == 2 { + var names string + keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) + if names == "" { + return errors.New("empty names list is not allowed") + } + nkc.Names = nil + for _, name := range strings.Split(names, ",") { + nkc.Names = append(nkc.Names, strings.TrimSpace(name)) + } + } else { + nkc.Names = nil + keycert = strings.TrimSpace(cs[0]) + } + cs = strings.Split(keycert, ",") + if len(cs) != 2 { + return errors.New("expected comma separated certificate and key file paths") + } + nkc.CertFile = strings.TrimSpace(cs[0]) + nkc.KeyFile = strings.TrimSpace(cs[1]) + return nil +} + +func (*NamedCertKey) Type() string { + return "namedCertKey" +} + +// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own +// flag instance (in contrast to comma separated slices). +type NamedCertKeyArray struct { + value *[]NamedCertKey + changed bool +} + +var _ flag.Value = &NamedCertKeyArray{} + +// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value +// pointing to p. +func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { + return &NamedCertKeyArray{ + value: p, + } +} + +func (a *NamedCertKeyArray) Set(val string) error { + nkc := NamedCertKey{} + err := nkc.Set(val) + if err != nil { + return err + } + if !a.changed { + *a.value = []NamedCertKey{nkc} + a.changed = true + } else { + *a.value = append(*a.value, nkc) + } + return nil +} + +func (a *NamedCertKeyArray) Type() string { + return "namedCertKey" +} + +func (a *NamedCertKeyArray) String() string { + nkcs := make([]string, 0, len(*a.value)) + for i := range *a.value { + nkcs = append(nkcs, (*a.value)[i].String()) + } + return "[" + strings.Join(nkcs, ";") + "]" +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/noop.go b/constraint/vendor/k8s.io/component-base/cli/flag/noop.go new file mode 100644 index 000000000..03f7f14c0 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/noop.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "github.com/spf13/pflag" +) + +// NoOp implements goflag.Value and plfag.Value, +// but has a noop Set implementation +type NoOp struct{} + +var _ goflag.Value = NoOp{} +var _ pflag.Value = NoOp{} + +func (NoOp) String() string { + return "" +} + +func (NoOp) Set(val string) error { + return nil +} + +func (NoOp) Type() string { + return "NoOp" +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/omitempty.go b/constraint/vendor/k8s.io/component-base/cli/flag/omitempty.go new file mode 100644 index 000000000..c354754ea --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/omitempty.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// OmitEmpty is an interface for flags to report whether their underlying value +// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(), +// it is assumed that flag may be omitted from the command line. +type OmitEmpty interface { + Empty() bool +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/sectioned.go b/constraint/vendor/k8s.io/component-base/cli/flag/sectioned.go new file mode 100644 index 000000000..235742876 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/sectioned.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + usageFmt = "Usage:\n %s\n" +) + +// NamedFlagSets stores named flag sets in the order of calling FlagSet. +type NamedFlagSets struct { + // Order is an ordered list of flag set names. + Order []string + // FlagSets stores the flag sets by name. + FlagSets map[string]*pflag.FlagSet + // NormalizeNameFunc is the normalize function which used to initialize FlagSets created by NamedFlagSets. + NormalizeNameFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName +} + +// FlagSet returns the flag set with the given name and adds it to the +// ordered name list if it is not in there yet. +func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet { + if nfs.FlagSets == nil { + nfs.FlagSets = map[string]*pflag.FlagSet{} + } + if _, ok := nfs.FlagSets[name]; !ok { + flagSet := pflag.NewFlagSet(name, pflag.ExitOnError) + flagSet.SetNormalizeFunc(pflag.CommandLine.GetNormalizeFunc()) + if nfs.NormalizeNameFunc != nil { + flagSet.SetNormalizeFunc(nfs.NormalizeNameFunc) + } + nfs.FlagSets[name] = flagSet + nfs.Order = append(nfs.Order, name) + } + return nfs.FlagSets[name] +} + +// PrintSections prints the given names flag sets in sections, with the maximal given column number. +// If cols is zero, lines are not wrapped. +func PrintSections(w io.Writer, fss NamedFlagSets, cols int) { + for _, name := range fss.Order { + fs := fss.FlagSets[name] + if !fs.HasFlags() { + continue + } + + wideFS := pflag.NewFlagSet("", pflag.ExitOnError) + wideFS.AddFlagSet(fs) + + var zzz string + if cols > 24 { + zzz = strings.Repeat("z", cols-24) + wideFS.Int(zzz, 0, strings.Repeat("z", cols-24)) + } + + var buf bytes.Buffer + fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols)) + + if cols > 24 { + i := strings.Index(buf.String(), zzz) + lines := strings.Split(buf.String()[:i], "\n") + fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n")) + fmt.Fprintln(w) + } else { + fmt.Fprint(w, buf.String()) + } + } +} + +// SetUsageAndHelpFunc set both usage and help function. +// Print the flag sets we need instead of all of them. +func SetUsageAndHelpFunc(cmd *cobra.Command, fss NamedFlagSets, cols int) { + cmd.SetUsageFunc(func(cmd *cobra.Command) error { + fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine()) + PrintSections(cmd.OutOrStderr(), fss, cols) + return nil + }) + cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { + fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine()) + PrintSections(cmd.OutOrStdout(), fss, cols) + }) +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/string_flag.go b/constraint/vendor/k8s.io/component-base/cli/flag/string_flag.go new file mode 100644 index 000000000..331bdb66e --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/string_flag.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. +type StringFlag struct { + // If Set has been invoked this value is true + provided bool + // The exact value provided on the flag + value string +} + +func NewStringFlag(defaultVal string) StringFlag { + return StringFlag{value: defaultVal} +} + +func (f *StringFlag) Default(value string) { + f.value = value +} + +func (f StringFlag) String() string { + return f.value +} + +func (f StringFlag) Value() string { + return f.value +} + +func (f *StringFlag) Set(value string) error { + f.value = value + f.provided = true + + return nil +} + +func (f StringFlag) Provided() bool { + return f.provided +} + +func (f *StringFlag) Type() string { + return "string" +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go b/constraint/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go new file mode 100644 index 000000000..ad0d07d75 --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "fmt" + "strings" + + "github.com/spf13/pflag" +) + +// StringSlice implements goflag.Value and plfag.Value, +// and allows set to be invoked repeatedly to accumulate values. +type StringSlice struct { + value *[]string + changed bool +} + +func NewStringSlice(s *[]string) *StringSlice { + return &StringSlice{value: s} +} + +var _ goflag.Value = &StringSlice{} +var _ pflag.Value = &StringSlice{} + +func (s *StringSlice) String() string { + if s == nil || s.value == nil { + return "" + } + return strings.Join(*s.value, " ") +} + +func (s *StringSlice) Set(val string) error { + if s.value == nil { + return fmt.Errorf("no target (nil pointer to []string)") + } + if !s.changed { + *s.value = make([]string, 0) + } + *s.value = append(*s.value, val) + s.changed = true + return nil +} + +func (StringSlice) Type() string { + return "sliceString" +} diff --git a/constraint/vendor/k8s.io/component-base/cli/flag/tristate.go b/constraint/vendor/k8s.io/component-base/cli/flag/tristate.go new file mode 100644 index 000000000..cf16376bf --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/cli/flag/tristate.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "strconv" +) + +// Tristate is a flag compatible with flags and pflags that +// keeps track of whether it had a value supplied or not. +type Tristate int + +const ( + Unset Tristate = iota // 0 + True + False +) + +func (f *Tristate) Default(value bool) { + *f = triFromBool(value) +} + +func (f Tristate) String() string { + b := boolFromTri(f) + return fmt.Sprintf("%t", b) +} + +func (f Tristate) Value() bool { + b := boolFromTri(f) + return b +} + +func (f *Tristate) Set(value string) error { + boolVal, err := strconv.ParseBool(value) + if err != nil { + return err + } + + *f = triFromBool(boolVal) + return nil +} + +func (f Tristate) Provided() bool { + if f != Unset { + return true + } + return false +} + +func (f *Tristate) Type() string { + return "tristate" +} + +func boolFromTri(t Tristate) bool { + if t == True { + return true + } else { + return false + } +} + +func triFromBool(b bool) Tristate { + if b { + return True + } else { + return False + } +} diff --git a/constraint/vendor/k8s.io/component-base/featuregate/feature_gate.go b/constraint/vendor/k8s.io/component-base/featuregate/feature_gate.go index 1e441289e..b6f08a6cd 100644 --- a/constraint/vendor/k8s.io/component-base/featuregate/feature_gate.go +++ b/constraint/vendor/k8s.io/component-base/featuregate/feature_gate.go @@ -19,6 +19,7 @@ package featuregate import ( "context" "fmt" + "reflect" "sort" "strconv" "strings" @@ -27,8 +28,12 @@ import ( "github.com/spf13/pflag" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/naming" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" featuremetrics "k8s.io/component-base/metrics/prometheus/feature" + baseversion "k8s.io/component-base/version" "k8s.io/klog/v2" ) @@ -52,13 +57,13 @@ const ( var ( // The generic features. - defaultFeatures = map[Feature]FeatureSpec{ - allAlphaGate: {Default: false, PreRelease: Alpha}, - allBetaGate: {Default: false, PreRelease: Beta}, + defaultFeatures = map[Feature]VersionedSpecs{ + allAlphaGate: {{Default: false, PreRelease: Alpha, Version: version.MajorMinor(0, 0)}}, + allBetaGate: {{Default: false, PreRelease: Beta, Version: version.MajorMinor(0, 0)}}, } // Special handling for a few gates. - specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + specialFeatures = map[Feature]func(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version){ allAlphaGate: setUnsetAlphaGates, allBetaGate: setUnsetBetaGates, } @@ -69,13 +74,28 @@ type FeatureSpec struct { Default bool // LockToDefault indicates that the feature is locked to its default and cannot be changed LockToDefault bool - // PreRelease indicates the maturity level of the feature + // PreRelease indicates the current maturity level of the feature PreRelease prerelease + // Version indicates the earliest version from which this FeatureSpec is valid. + // If multiple FeatureSpecs exist for a Feature, the one with the highest version that is less + // than or equal to the effective version of the component is used. + Version *version.Version } +type VersionedSpecs []FeatureSpec + +func (g VersionedSpecs) Len() int { return len(g) } +func (g VersionedSpecs) Less(i, j int) bool { + return g[i].Version.LessThan(g[j].Version) +} +func (g VersionedSpecs) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type PromotionVersionMapping map[prerelease]string + type prerelease string const ( + PreAlpha = prerelease("PRE-ALPHA") // Values for PreRelease. Alpha = prerelease("ALPHA") Beta = prerelease("BETA") @@ -94,7 +114,9 @@ type FeatureGate interface { // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be // set on the copy without mutating the original. This is useful for validating // config against potential feature gate changes before committing those changes. - DeepCopy() MutableFeatureGate + DeepCopy() MutableVersionedFeatureGate + // Validate checks if the flag gates are valid at the emulated version. + Validate() []error } // MutableFeatureGate parses and stores flag gates for known features from @@ -104,6 +126,8 @@ type MutableFeatureGate interface { // AddFlag adds a flag for setting global feature gates to the specified FlagSet. AddFlag(fs *pflag.FlagSet) + // Close sets closed to true, and prevents subsequent calls to Add + Close() // Set parses and stores flag gates for known features // from a string like feature1=true,feature2=false,... Set(value string) error @@ -128,25 +152,77 @@ type MutableFeatureGate interface { OverrideDefault(name Feature, override bool) error } +// MutableVersionedFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +// MutableVersionedFeatureGate sets options based on the emulated version of the featured gate. +type MutableVersionedFeatureGate interface { + MutableFeatureGate + // EmulationVersion returns the version the feature gate is set to emulate. + // If set, the feature gate would enable/disable features based on + // feature availability and pre-release at the emulated version instead of the binary version. + EmulationVersion() *version.Version + // SetEmulationVersion overrides the emulationVersion of the feature gate. + // Otherwise, the emulationVersion will be the same as the binary version. + // If set, the feature defaults and availability will be as if the binary is at the emulated version. + SetEmulationVersion(emulationVersion *version.Version) error + // GetAll returns a copy of the map of known feature names to versioned feature specs. + GetAllVersioned() map[Feature]VersionedSpecs + // AddVersioned adds versioned feature specs to the featureGate. + AddVersioned(features map[Feature]VersionedSpecs) error + // OverrideDefaultAtVersion sets a local override for the registered default value of a named + // feature for the prerelease lifecycle the given version is at. + // If the feature has not been previously registered (e.g. by a call to Add), + // has a locked default, or if the gate has already registered itself with a FlagSet, a non-nil + // error is returned. + // + // When two or more components consume a common feature, one component can override its + // default at runtime in order to adopt new defaults before or after the other + // components. For example, a new feature can be evaluated with a limited blast radius by + // overriding its default to true for a limited number of components without simultaneously + // changing its default for all consuming components. + OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error + // ExplicitlySet returns true if the feature value is explicitly set instead of + // being derived from the default values or special features. + ExplicitlySet(name Feature) bool + // ResetFeatureValueToDefault resets the value of the feature back to the default value. + ResetFeatureValueToDefault(name Feature) error + // DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, + // and resets all the enabled status of the new feature gate. + // This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. + DeepCopyAndReset() MutableVersionedFeatureGate +} + // featureGate implements FeatureGate as well as pflag.Value for flag parsing. type featureGate struct { featureGateName string - special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + special map[Feature]func(map[Feature]VersionedSpecs, map[Feature]bool, bool, *version.Version) - // lock guards writes to known, enabled, and reads/writes of closed + // lock guards writes to all below fields. lock sync.Mutex // known holds a map[Feature]FeatureSpec known atomic.Value // enabled holds a map[Feature]bool enabled atomic.Value + // enabledRaw holds a raw map[string]bool of the parsed flag. + // It keeps the original values of "special" features like "all alpha gates", + // while enabled keeps the values of all resolved features. + enabledRaw atomic.Value // closed is set to true when AddFlag is called, and prevents subsequent calls to Add closed bool + // queriedFeatures stores all the features that have been queried through the Enabled interface. + // It is reset when SetEmulationVersion is called. + queriedFeatures atomic.Value + emulationVersion atomic.Pointer[version.Version] } -func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { +func setUnsetAlphaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { for k, v := range known { - if v.PreRelease == Alpha { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Alpha { if _, found := enabled[k]; !found { enabled[k] = val } @@ -154,9 +230,13 @@ func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, } } -func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { +func setUnsetBetaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { for k, v := range known { - if v.PreRelease == Beta { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Beta { if _, found := enabled[k]; !found { enabled[k] = val } @@ -171,8 +251,10 @@ var _ pflag.Value = &featureGate{} // call chains, so they'd be unhelpful as names. var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"} -func NewFeatureGate() *featureGate { - known := map[Feature]FeatureSpec{} +// NewVersionedFeatureGate creates a feature gate with the emulation version set to the provided version. +// SetEmulationVersion can be called after to change emulation version to a desired value. +func NewVersionedFeatureGate(emulationVersion *version.Version) *featureGate { + known := map[Feature]VersionedSpecs{} for k, v := range defaultFeatures { known[k] = v } @@ -183,10 +265,19 @@ func NewFeatureGate() *featureGate { } f.known.Store(known) f.enabled.Store(map[Feature]bool{}) - + f.enabledRaw.Store(map[string]bool{}) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + klog.V(1).Infof("new feature gate with emulationVersion=%s", f.emulationVersion.Load().String()) return f } +// NewFeatureGate creates a feature gate with the current binary version. +func NewFeatureGate() *featureGate { + binaryVersison := version.MustParse(baseversion.DefaultKubeBinaryVersion) + return NewVersionedFeatureGate(binaryVersison) +} + // Set parses a string of the form "key1=value1,key2=value2,..." into a // map[string]bool of known keys or returns an error. func (f *featureGate) Set(value string) error { @@ -210,35 +301,52 @@ func (f *featureGate) Set(value string) error { return f.SetFromMap(m) } -// SetFromMap stores flag gates for known features from a map[string]bool or returns an error -func (f *featureGate) SetFromMap(m map[string]bool) error { +// Validate checks if the flag gates are valid at the emulated version. +func (f *featureGate) Validate() []error { f.lock.Lock() defer f.lock.Unlock() + m, ok := f.enabledRaw.Load().(map[string]bool) + if !ok { + return []error{fmt.Errorf("cannot cast enabledRaw to map[string]bool")} + } + enabled := map[Feature]bool{} + return f.unsafeSetFromMap(enabled, m, f.EmulationVersion()) +} +// unsafeSetFromMap stores flag gates for known features from a map[string]bool into an enabled map. +func (f *featureGate) unsafeSetFromMap(enabled map[Feature]bool, m map[string]bool, emulationVersion *version.Version) []error { + var errs []error // Copy existing state - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + sort.Sort(v) known[k] = v } - enabled := map[Feature]bool{} - for k, v := range f.enabled.Load().(map[Feature]bool) { - enabled[k] = v - } for k, v := range m { - k := Feature(k) - featureSpec, ok := known[k] + key := Feature(k) + versionedSpecs, ok := known[key] if !ok { - return fmt.Errorf("unrecognized feature gate: %s", k) + // early return if encounters an unknown feature. + errs = append(errs, fmt.Errorf("unrecognized feature gate: %s", k)) + return errs } + featureSpec := featureSpecAtEmulationVersion(versionedSpecs, emulationVersion) if featureSpec.LockToDefault && featureSpec.Default != v { - return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default) + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)) + continue } - enabled[k] = v // Handle "special" features like "all alpha gates" - if fn, found := f.special[k]; found { - fn(known, enabled, v) + if fn, found := f.special[key]; found { + fn(known, enabled, v, emulationVersion) + enabled[key] = v + continue + } + if featureSpec.PreRelease == PreAlpha { + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is PreAlpha at emulated version %s", k, v, emulationVersion.String())) + continue } + enabled[key] = v if featureSpec.PreRelease == Deprecated { klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) @@ -246,13 +354,39 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) } } + return errs +} - // Persist changes - f.known.Store(known) - f.enabled.Store(enabled) +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() - klog.V(1).Infof("feature gates: %v", f.enabled) - return nil + // Copy existing state + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + + // Update enabledRaw first. + // SetFromMap might be called when emulationVersion is not finalized yet, and we do not know the final state of enabled. + // But the flags still need to be saved. + for k, v := range m { + enabledRaw[k] = v + } + f.enabledRaw.Store(enabledRaw) + + errs := f.unsafeSetFromMap(enabled, enabledRaw, f.EmulationVersion()) + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) + } + return utilerrors.NewAggregate(errs) } // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". @@ -271,6 +405,17 @@ func (f *featureGate) Type() string { // Add adds features to the featureGate. func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + vs := map[Feature]VersionedSpecs{} + for name, spec := range features { + // if no version is provided for the FeatureSpec, it is defaulted to version 0.0 so that it can be enabled/disabled regardless of emulation version. + spec.Version = version.MajorMinor(0, 0) + vs[name] = VersionedSpecs{spec} + } + return f.AddVersioned(vs) +} + +// AddVersioned adds versioned feature specs to the featureGate. +func (f *featureGate) AddVersioned(features map[Feature]VersionedSpecs) error { f.lock.Lock() defer f.lock.Unlock() @@ -279,20 +424,18 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { } // Copy existing state - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - known[k] = v - } + known := f.GetAllVersioned() - for name, spec := range features { + for name, specs := range features { + sort.Sort(specs) if existingSpec, found := known[name]; found { - if existingSpec == spec { + sort.Sort(existingSpec) + if reflect.DeepEqual(existingSpec, specs) { continue } return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) } - - known[name] = spec + known[name] = specs } // Persist updated state @@ -302,6 +445,10 @@ func (f *featureGate) Add(features map[Feature]FeatureSpec) error { } func (f *featureGate) OverrideDefault(name Feature, override bool) error { + return f.OverrideDefaultAtVersion(name, override, f.EmulationVersion()) +} + +func (f *featureGate) OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error { f.lock.Lock() defer f.lock.Unlock() @@ -309,17 +456,19 @@ func (f *featureGate) OverrideDefault(name Feature, override bool) error { return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name) } - known := map[Feature]FeatureSpec{} - for name, spec := range f.known.Load().(map[Feature]FeatureSpec) { - known[name] = spec - } + // Copy existing state + known := f.GetAllVersioned() - spec, ok := known[name] - switch { - case !ok: + specs, ok := known[name] + if !ok { return fmt.Errorf("cannot override default: feature %q is not registered", name) + } + spec := featureSpecAtEmulationVersion(specs, ver) + switch { case spec.LockToDefault: return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default) + case spec.PreRelease == PreAlpha: + return fmt.Errorf("cannot override default: feature %q is not available before version %s", name, ver.String()) case spec.PreRelease == Deprecated: klog.Warningf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override) case spec.PreRelease == GA: @@ -327,42 +476,155 @@ func (f *featureGate) OverrideDefault(name Feature, override bool) error { } spec.Default = override - known[name] = spec + known[name] = specs f.known.Store(known) return nil } -// GetAll returns a copy of the map of known feature names to feature specs. +// GetAll returns a copy of the map of known feature names to feature specs for the current emulationVersion. func (f *featureGate) GetAll() map[Feature]FeatureSpec { retval := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - retval[k] = v + f.lock.Lock() + versionedSpecs := f.GetAllVersioned() + emuVer := f.EmulationVersion() + f.lock.Unlock() + for k, v := range versionedSpecs { + spec := featureSpecAtEmulationVersion(v, emuVer) + if spec.PreRelease == PreAlpha { + // The feature is not available at the emulation version. + continue + } + retval[k] = *spec } return retval } +// GetAllVersioned returns a copy of the map of known feature names to versioned feature specs. +func (f *featureGate) GetAllVersioned() map[Feature]VersionedSpecs { + retval := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + vCopy := make([]FeatureSpec, len(v)) + _ = copy(vCopy, v) + retval[k] = vCopy + } + return retval +} + +func (f *featureGate) SetEmulationVersion(emulationVersion *version.Version) error { + if emulationVersion.EqualTo(f.EmulationVersion()) { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + klog.V(1).Infof("set feature gate emulationVersion to %s", emulationVersion.String()) + + // Copy existing state + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + // enabled map should be reset whenever emulationVersion is changed. + enabled := map[Feature]bool{} + errs := f.unsafeSetFromMap(enabled, enabledRaw, emulationVersion) + + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + known := f.known.Load().(map[Feature]VersionedSpecs) + for feature := range queriedFeatures { + newVal := featureEnabled(feature, enabled, known, emulationVersion) + oldVal := featureEnabled(feature, f.enabled.Load().(map[Feature]bool), known, f.EmulationVersion()) + if newVal != oldVal { + klog.Warningf("SetEmulationVersion will change already queried feature:%s from %v to %v", feature, oldVal, newVal) + } + } + + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + } + return utilerrors.NewAggregate(errs) +} + +func (f *featureGate) EmulationVersion() *version.Version { + return f.emulationVersion.Load() +} + +// featureSpec returns the featureSpec at the EmulationVersion if the key exists, an error otherwise. +// This is useful to keep multiple implementations of a feature based on the PreRelease or Version info. +func (f *featureGate) featureSpec(key Feature) (FeatureSpec, error) { + if v, ok := f.known.Load().(map[Feature]VersionedSpecs)[key]; ok { + featureSpec := f.featureSpecAtEmulationVersion(v) + return *featureSpec, nil + } + return FeatureSpec{}, fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName) +} + +func (f *featureGate) unsafeRecordQueried(key Feature) { + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + if _, ok := queriedFeatures[key]; ok { + return + } + // Clone items from queriedFeatures before mutating it + newQueriedFeatures := queriedFeatures.Clone() + newQueriedFeatures.Insert(key) + f.queriedFeatures.Store(newQueriedFeatures) +} + +func featureEnabled(key Feature, enabled map[Feature]bool, known map[Feature]VersionedSpecs, emulationVersion *version.Version) bool { + // check explicitly set enabled list + if v, ok := enabled[key]; ok { + return v + } + if v, ok := known[key]; ok { + return featureSpecAtEmulationVersion(v, emulationVersion).Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate", key)) +} + // Enabled returns true if the key is enabled. If the key is not known, this call will panic. func (f *featureGate) Enabled(key Feature) bool { - if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { - return v + // TODO: ideally we should lock the feature gate in this call to be safe, need to evaluate how much performance impact locking would have. + v := featureEnabled(key, f.enabled.Load().(map[Feature]bool), f.known.Load().(map[Feature]VersionedSpecs), f.EmulationVersion()) + f.unsafeRecordQueried(key) + return v +} + +func (f *featureGate) featureSpecAtEmulationVersion(v VersionedSpecs) *FeatureSpec { + return featureSpecAtEmulationVersion(v, f.EmulationVersion()) +} + +func featureSpecAtEmulationVersion(v VersionedSpecs, emulationVersion *version.Version) *FeatureSpec { + i := len(v) - 1 + for ; i >= 0; i-- { + if v[i].Version.GreaterThan(emulationVersion) { + continue + } + return &v[i] } - if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok { - return v.Default + return &FeatureSpec{ + Default: false, + PreRelease: PreAlpha, + Version: version.MajorMinor(0, 0), } +} - panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)) +// Close sets closed to true, and prevents subsequent calls to Add +func (f *featureGate) Close() { + f.lock.Lock() + f.closed = true + f.lock.Unlock() } // AddFlag adds a flag for setting global feature gates to the specified FlagSet. func (f *featureGate) AddFlag(fs *pflag.FlagSet) { - f.lock.Lock() // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? // Not all components expose a feature gates flag using this AddFlag method, and // in the future, all components will completely stop exposing a feature gates flag, // in favor of componentconfig. - f.closed = true - f.lock.Unlock() + f.Close() known := f.KnownFeatures() fs.Var(f, flagName, ""+ @@ -377,32 +639,50 @@ func (f *featureGate) AddMetrics() { } // KnownFeatures returns a slice of strings describing the FeatureGate's known features. -// Deprecated and GA features are hidden from the list. +// preAlpha, Deprecated and GA features are hidden from the list. func (f *featureGate) KnownFeatures() []string { var known []string - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - if v.PreRelease == GA || v.PreRelease == Deprecated { + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + if k == "AllAlpha" || k == "AllBeta" { + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v[0].PreRelease, v[0].Default)) + continue + } + featureSpec := f.featureSpecAtEmulationVersion(v) + if featureSpec.PreRelease == GA || featureSpec.PreRelease == Deprecated || featureSpec.PreRelease == PreAlpha { continue } - known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default)) + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, featureSpec.PreRelease, featureSpec.Default)) } sort.Strings(known) return known } +// DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, +// and resets all the enabled status of the new feature gate. +// This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. +func (f *featureGate) DeepCopyAndReset() MutableVersionedFeatureGate { + fg := NewVersionedFeatureGate(f.EmulationVersion()) + known := f.GetAllVersioned() + fg.known.Store(known) + return fg +} + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be // set on the copy without mutating the original. This is useful for validating // config against potential feature gate changes before committing those changes. -func (f *featureGate) DeepCopy() MutableFeatureGate { +func (f *featureGate) DeepCopy() MutableVersionedFeatureGate { + f.lock.Lock() + defer f.lock.Unlock() // Copy existing state. - known := map[Feature]FeatureSpec{} - for k, v := range f.known.Load().(map[Feature]FeatureSpec) { - known[k] = v - } + known := f.GetAllVersioned() enabled := map[Feature]bool{} for k, v := range f.enabled.Load().(map[Feature]bool) { enabled[k] = v } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } // Construct a new featureGate around the copied state. // Note that specialFeatures is treated as immutable by convention, @@ -411,9 +691,48 @@ func (f *featureGate) DeepCopy() MutableFeatureGate { special: specialFeatures, closed: f.closed, } - + fg.emulationVersion.Store(f.EmulationVersion()) fg.known.Store(known) fg.enabled.Store(enabled) - + fg.enabledRaw.Store(enabledRaw) + fg.queriedFeatures.Store(sets.Set[Feature]{}) return fg } + +// ExplicitlySet returns true if the feature value is explicitly set instead of +// being derived from the default values or special features. +func (f *featureGate) ExplicitlySet(name Feature) bool { + enabledRaw := f.enabledRaw.Load().(map[string]bool) + _, ok := enabledRaw[string(name)] + return ok +} + +// ResetFeatureValueToDefault resets the value of the feature back to the default value. +func (f *featureGate) ResetFeatureValueToDefault(name Feature) error { + f.lock.Lock() + defer f.lock.Unlock() + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + _, inEnabled := enabled[name] + if inEnabled { + delete(enabled, name) + } + _, inEnabledRaw := enabledRaw[string(name)] + if inEnabledRaw { + delete(enabledRaw, string(name)) + } + // some features could be in enabled map but not enabledRaw map, + // for example some Alpha feature when AllAlpha is set. + if inEnabledRaw && !inEnabled { + return fmt.Errorf("feature:%s was explicitly set, but not in enabled map", name) + } + f.enabled.Store(enabled) + f.enabledRaw.Store(enabledRaw) + return nil +} diff --git a/constraint/vendor/k8s.io/component-base/featuregate/registry.go b/constraint/vendor/k8s.io/component-base/featuregate/registry.go new file mode 100644 index 000000000..cf35403da --- /dev/null +++ b/constraint/vendor/k8s.io/component-base/featuregate/registry.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package featuregate + +import ( + "fmt" + "sort" + "strings" + "sync" + + "github.com/spf13/pflag" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" + cliflag "k8s.io/component-base/cli/flag" + baseversion "k8s.io/component-base/version" + "k8s.io/klog/v2" +) + +// DefaultComponentGlobalsRegistry is the global var to store the effective versions and feature gates for all components for easy access. +// Example usage: +// // register the component effective version and feature gate first +// _, _ = utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(utilversion.DefaultKubeComponent, utilversion.DefaultKubeEffectiveVersion(), utilfeature.DefaultMutableFeatureGate) +// wardleEffectiveVersion := utilversion.NewEffectiveVersion("1.2") +// wardleFeatureGate := featuregate.NewFeatureGate() +// utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(apiserver.WardleComponentName, wardleEffectiveVersion, wardleFeatureGate, false)) +// +// cmd := &cobra.Command{ +// ... +// // call DefaultComponentGlobalsRegistry.Set() in PersistentPreRunE +// PersistentPreRunE: func(*cobra.Command, []string) error { +// if err := utilversion.DefaultComponentGlobalsRegistry.Set(); err != nil { +// return err +// } +// ... +// }, +// RunE: func(c *cobra.Command, args []string) error { +// // call utilversion.DefaultComponentGlobalsRegistry.Validate() somewhere +// }, +// } +// +// flags := cmd.Flags() +// // add flags +// utilversion.DefaultComponentGlobalsRegistry.AddFlags(flags) +var DefaultComponentGlobalsRegistry ComponentGlobalsRegistry = NewComponentGlobalsRegistry() + +const ( + DefaultKubeComponent = "kube" + + klogLevel = 2 +) + +type VersionMapping func(from *version.Version) *version.Version + +// ComponentGlobals stores the global variables for a component for easy access. +type ComponentGlobals struct { + effectiveVersion baseversion.MutableEffectiveVersion + featureGate MutableVersionedFeatureGate + + // emulationVersionMapping contains the mapping from the emulation version of this component + // to the emulation version of another component. + emulationVersionMapping map[string]VersionMapping + // dependentEmulationVersion stores whether or not this component's EmulationVersion is dependent through mapping on another component. + // If true, the emulation version cannot be set from the flag, or version mapping from another component. + dependentEmulationVersion bool + // minCompatibilityVersionMapping contains the mapping from the min compatibility version of this component + // to the min compatibility version of another component. + minCompatibilityVersionMapping map[string]VersionMapping + // dependentMinCompatibilityVersion stores whether or not this component's MinCompatibilityVersion is dependent through mapping on another component + // If true, the min compatibility version cannot be set from the flag, or version mapping from another component. + dependentMinCompatibilityVersion bool +} + +type ComponentGlobalsRegistry interface { + // EffectiveVersionFor returns the EffectiveVersion registered under the component. + // Returns nil if the component is not registered. + EffectiveVersionFor(component string) baseversion.EffectiveVersion + // FeatureGateFor returns the FeatureGate registered under the component. + // Returns nil if the component is not registered. + FeatureGateFor(component string) FeatureGate + // Register registers the EffectiveVersion and FeatureGate for a component. + // returns error if the component is already registered. + Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error + // ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry. + // Otherwise, the provided variables would be registered under the component, and the same variables would be returned. + ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate) + // AddFlags adds flags of "--emulated-version" and "--feature-gates" + AddFlags(fs *pflag.FlagSet) + // Set sets the flags for all global variables for all components registered. + Set() error + // SetFallback calls Set() if it has never been called. + SetFallback() error + // Validate calls the Validate() function for all the global variables for all components registered. + Validate() []error + // Reset removes all stored ComponentGlobals, configurations, and version mappings. + Reset() + // SetEmulationVersionMapping sets the mapping from the emulation version of one component + // to the emulation version of another component. + // Once set, the emulation version of the toComponent will be determined by the emulation version of the fromComponent, + // and cannot be set from cmd flags anymore. + // For a given component, its emulation version can only depend on one other component, no multiple dependency is allowed. + SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error +} + +type componentGlobalsRegistry struct { + componentGlobals map[string]*ComponentGlobals + mutex sync.RWMutex + // list of component name to emulation version set from the flag. + emulationVersionConfig []string + // map of component name to the list of feature gates set from the flag. + featureGatesConfig map[string][]string + // set stores if the Set() function for the registry is already called. + set bool +} + +func NewComponentGlobalsRegistry() *componentGlobalsRegistry { + return &componentGlobalsRegistry{ + componentGlobals: make(map[string]*ComponentGlobals), + emulationVersionConfig: nil, + featureGatesConfig: nil, + } +} + +func (r *componentGlobalsRegistry) Reset() { + r.mutex.Lock() + defer r.mutex.Unlock() + r.componentGlobals = make(map[string]*ComponentGlobals) + r.emulationVersionConfig = nil + r.featureGatesConfig = nil + r.set = false +} + +func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) baseversion.EffectiveVersion { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.effectiveVersion +} + +func (r *componentGlobalsRegistry) FeatureGateFor(component string) FeatureGate { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.featureGate +} + +func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error { + if _, ok := r.componentGlobals[component]; ok { + return fmt.Errorf("component globals of %s already registered", component) + } + if featureGate != nil { + if err := featureGate.SetEmulationVersion(effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + c := ComponentGlobals{ + effectiveVersion: effectiveVersion, + featureGate: featureGate, + emulationVersionMapping: make(map[string]VersionMapping), + minCompatibilityVersionMapping: make(map[string]VersionMapping), + } + r.componentGlobals[component] = &c + return nil +} + +func (r *componentGlobalsRegistry) Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error { + if effectiveVersion == nil { + return fmt.Errorf("cannot register nil effectiveVersion") + } + r.mutex.Lock() + defer r.mutex.Unlock() + return r.unsafeRegister(component, effectiveVersion, featureGate) +} + +func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate) { + r.mutex.Lock() + defer r.mutex.Unlock() + globals, ok := r.componentGlobals[component] + if ok { + return globals.effectiveVersion, globals.featureGate + } + utilruntime.Must(r.unsafeRegister(component, effectiveVersion, featureGate)) + return effectiveVersion, featureGate +} + +func (r *componentGlobalsRegistry) unsafeKnownFeatures() []string { + var known []string + for component, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + for _, f := range globals.featureGate.KnownFeatures() { + known = append(known, component+":"+f) + } + } + sort.Strings(known) + return known +} + +func (r *componentGlobalsRegistry) unsafeVersionFlagOptions(isEmulation bool) []string { + var vs []string + for component, globals := range r.componentGlobals { + binaryVer := globals.effectiveVersion.BinaryVersion() + if isEmulation { + if globals.dependentEmulationVersion { + continue + } + // emulated version could be between binaryMajor.{binaryMinor} and binaryMajor.{binaryMinor} + // TODO: change to binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} in 1.32 + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(0).String(), binaryVer.String(), globals.effectiveVersion.EmulationVersion().String())) + } else { + if globals.dependentMinCompatibilityVersion { + continue + } + // min compatibility version could be between binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(1).String(), binaryVer.String(), globals.effectiveVersion.MinCompatibilityVersion().String())) + } + } + sort.Strings(vs) + return vs +} + +func (r *componentGlobalsRegistry) AddFlags(fs *pflag.FlagSet) { + if r == nil { + return + } + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + if globals.featureGate != nil { + globals.featureGate.Close() + } + } + if r.emulationVersionConfig != nil || r.featureGatesConfig != nil { + klog.Warning("calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags") + } + r.emulationVersionConfig = []string{} + r.featureGatesConfig = make(map[string][]string) + + fs.StringSliceVar(&r.emulationVersionConfig, "emulated-version", r.emulationVersionConfig, ""+ + "The versions different components emulate their capabilities (APIs, features, ...) of.\n"+ + "If set, the component will emulate the behavior of this version instead of the underlying binary version.\n"+ + "Version format could only be major.minor, for example: '--emulated-version=wardle=1.2,kube=1.31'. Options are:\n"+strings.Join(r.unsafeVersionFlagOptions(true), "\n")+ + "If the component is not specified, defaults to \"kube\"") + + fs.Var(cliflag.NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(&r.featureGatesConfig), "feature-gates", "Comma-separated list of component:key=value pairs that describe feature gates for alpha/experimental features of different components.\n"+ + "If the component is not specified, defaults to \"kube\". This flag can be repeatedly invoked. For example: --feature-gates 'wardle:featureA=true,wardle:featureB=false' --feature-gates 'kube:featureC=true'"+ + "Options are:\n"+strings.Join(r.unsafeKnownFeatures(), "\n")) +} + +type componentVersion struct { + component string + ver *version.Version +} + +// getFullEmulationVersionConfig expands the given version config with version registered version mapping, +// and returns the map of component to Version. +func (r *componentGlobalsRegistry) getFullEmulationVersionConfig( + versionConfigMap map[string]*version.Version) (map[string]*version.Version, error) { + result := map[string]*version.Version{} + setQueue := []componentVersion{} + for comp, ver := range versionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return result, fmt.Errorf("component not registered: %s", comp) + } + klog.V(klogLevel).Infof("setting version %s=%s", comp, ver.String()) + setQueue = append(setQueue, componentVersion{comp, ver}) + } + for len(setQueue) > 0 { + cv := setQueue[0] + if _, visited := result[cv.component]; visited { + return result, fmt.Errorf("setting version of %s more than once, probably version mapping loop", cv.component) + } + setQueue = setQueue[1:] + result[cv.component] = cv.ver + for toComp, f := range r.componentGlobals[cv.component].emulationVersionMapping { + toVer := f(cv.ver) + if toVer == nil { + return result, fmt.Errorf("got nil version from mapping of %s=%s to component:%s", cv.component, cv.ver.String(), toComp) + } + klog.V(klogLevel).Infof("setting version %s=%s from version mapping of %s=%s", toComp, toVer.String(), cv.component, cv.ver.String()) + setQueue = append(setQueue, componentVersion{toComp, toVer}) + } + } + return result, nil +} + +func toVersionMap(versionConfig []string) (map[string]*version.Version, error) { + m := map[string]*version.Version{} + for _, compVer := range versionConfig { + // default to "kube" of component is not specified + k := "kube" + v := compVer + if strings.Contains(compVer, "=") { + arr := strings.SplitN(compVer, "=", 2) + if len(arr) != 2 { + return m, fmt.Errorf("malformed pair, expect string=string") + } + k = strings.TrimSpace(arr[0]) + v = strings.TrimSpace(arr[1]) + } + ver, err := version.Parse(v) + if err != nil { + return m, err + } + if ver.Patch() != 0 { + return m, fmt.Errorf("patch version not allowed, got: %s=%s", k, ver.String()) + } + if existingVer, ok := m[k]; ok { + return m, fmt.Errorf("duplicate version flag, %s=%s and %s=%s", k, existingVer.String(), k, ver.String()) + } + m[k] = ver + } + return m, nil +} + +func (r *componentGlobalsRegistry) SetFallback() error { + r.mutex.Lock() + set := r.set + r.mutex.Unlock() + if set { + return nil + } + klog.Warning("setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set()" + + " right after parsing flags to avoid using feature gates before their final values are set by the flags.") + return r.Set() +} + +func (r *componentGlobalsRegistry) Set() error { + r.mutex.Lock() + defer r.mutex.Unlock() + r.set = true + emulationVersionConfigMap, err := toVersionMap(r.emulationVersionConfig) + if err != nil { + return err + } + for comp := range emulationVersionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + // only components without any dependencies can be set from the flag. + if r.componentGlobals[comp].dependentEmulationVersion { + return fmt.Errorf("EmulationVersion of %s is set by mapping, cannot set it by flag", comp) + } + } + if emulationVersions, err := r.getFullEmulationVersionConfig(emulationVersionConfigMap); err != nil { + return err + } else { + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + } + // Set feature gate emulation version before setting feature gate flag values. + for comp, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + klog.V(klogLevel).Infof("setting %s:feature gate emulation version to %s", comp, globals.effectiveVersion.EmulationVersion().String()) + if err := globals.featureGate.SetEmulationVersion(globals.effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + for comp, fg := range r.featureGatesConfig { + if comp == "" { + if _, ok := r.featureGatesConfig[DefaultKubeComponent]; ok { + return fmt.Errorf("set kube feature gates with default empty prefix or kube: prefix consistently, do not mix use") + } + comp = DefaultKubeComponent + } + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + featureGate := r.componentGlobals[comp].featureGate + if featureGate == nil { + return fmt.Errorf("component featureGate not registered: %s", comp) + } + flagVal := strings.Join(fg, ",") + klog.V(klogLevel).Infof("setting %s:feature-gates=%s", comp, flagVal) + if err := featureGate.Set(flagVal); err != nil { + return err + } + } + return nil +} + +func (r *componentGlobalsRegistry) Validate() []error { + var errs []error + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + errs = append(errs, globals.effectiveVersion.Validate()...) + if globals.featureGate != nil { + errs = append(errs, globals.featureGate.Validate()...) + } + } + return errs +} + +func (r *componentGlobalsRegistry) SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error { + if f == nil { + return nil + } + klog.V(klogLevel).Infof("setting EmulationVersion mapping from %s to %s", fromComponent, toComponent) + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.componentGlobals[fromComponent]; !ok { + return fmt.Errorf("component not registered: %s", fromComponent) + } + if _, ok := r.componentGlobals[toComponent]; !ok { + return fmt.Errorf("component not registered: %s", toComponent) + } + // check multiple dependency + if r.componentGlobals[toComponent].dependentEmulationVersion { + return fmt.Errorf("mapping of %s already exists from another component", toComponent) + } + r.componentGlobals[toComponent].dependentEmulationVersion = true + + versionMapping := r.componentGlobals[fromComponent].emulationVersionMapping + if _, ok := versionMapping[toComponent]; ok { + return fmt.Errorf("EmulationVersion from %s to %s already exists", fromComponent, toComponent) + } + versionMapping[toComponent] = f + klog.V(klogLevel).Infof("setting the default EmulationVersion of %s based on mapping from the default EmulationVersion of %s", fromComponent, toComponent) + defaultFromVersion := r.componentGlobals[fromComponent].effectiveVersion.EmulationVersion() + emulationVersions, err := r.getFullEmulationVersionConfig(map[string]*version.Version{fromComponent: defaultFromVersion}) + if err != nil { + return err + } + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + return nil +} diff --git a/constraint/vendor/k8s.io/component-base/metrics/counter.go b/constraint/vendor/k8s.io/component-base/metrics/counter.go index 5664a68a9..8a7dd7154 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/counter.go +++ b/constraint/vendor/k8s.io/component-base/metrics/counter.go @@ -18,15 +18,19 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/trace" + dto "github.com/prometheus/client_model/go" ) // Counter is our internal representation for our wrapping struct around prometheus // counters. Counter implements both kubeCollector and CounterMetric. type Counter struct { + ctx context.Context CounterMetric *CounterOpts lazyMetric @@ -36,6 +40,14 @@ type Counter struct { // The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. var _ Metric = &Counter{} +// All supported exemplar metric types implement the metricWithExemplar interface. +var _ metricWithExemplar = &Counter{} + +// exemplarCounterMetric holds a context to extract exemplar labels from, and a counter metric to attach them to. It implements the metricWithExemplar interface. +type exemplarCounterMetric struct { + *Counter +} + // NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. @@ -93,11 +105,42 @@ func (c *Counter) initializeDeprecatedMetric() { c.initializeMetric() } -// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +// WithContext allows the normal Counter metric to pass in context. func (c *Counter) WithContext(ctx context.Context) CounterMetric { + c.ctx = ctx return c.CounterMetric } +// withExemplar initializes the exemplarMetric object and sets the exemplar value. +func (c *Counter) withExemplar(v float64) { + (&exemplarCounterMetric{c}).withExemplar(v) +} + +func (c *Counter) Add(v float64) { + c.withExemplar(v) +} + +func (c *Counter) Inc() { + c.withExemplar(1) +} + +// withExemplar attaches an exemplar to the metric. +func (e *exemplarCounterMetric) withExemplar(v float64) { + if m, ok := e.CounterMetric.(prometheus.ExemplarAdder); ok { + maybeSpanCtx := trace.SpanContextFromContext(e.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + exemplarLabels := prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + } + m.AddWithExemplar(v, exemplarLabels) + return + } + } + + e.CounterMetric.Add(v) +} + // CounterVec is the internal representation of our wrapping struct around prometheus // counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. type CounterVec struct { @@ -119,11 +162,6 @@ func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() cv := &CounterVec{ CounterVec: noopCounterVec, @@ -176,7 +214,17 @@ func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } + return v.CounterVec.WithLabelValues(lvs...) } @@ -190,6 +238,15 @@ func (v *CounterVec) With(labels map[string]string) CounterMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.CounterVec.With(labels) } @@ -217,6 +274,13 @@ func (v *CounterVec) Reset() { v.CounterVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the CounterVec. +// NOTE: This should only be used in test. +func (v *CounterVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped CounterVec with context func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { return &CounterVecWithContext{ diff --git a/constraint/vendor/k8s.io/component-base/metrics/gauge.go b/constraint/vendor/k8s.io/component-base/metrics/gauge.go index 89631115a..0621560d0 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/gauge.go +++ b/constraint/vendor/k8s.io/component-base/metrics/gauge.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" @@ -105,11 +106,6 @@ func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() cv := &GaugeVec{ GaugeVec: noopGaugeVec, @@ -149,6 +145,15 @@ func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...) return elt, err @@ -186,6 +191,15 @@ func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } elt, err := v.GaugeVec.GetMetricWith(labels) return elt, err @@ -226,6 +240,13 @@ func (v *GaugeVec) Reset() { v.GaugeVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the GaugeVec. +// NOTE: This should only be used in test. +func (v *GaugeVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc { g := NewGauge(opts) diff --git a/constraint/vendor/k8s.io/component-base/metrics/histogram.go b/constraint/vendor/k8s.io/component-base/metrics/histogram.go index e6884f35c..3065486ab 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/histogram.go +++ b/constraint/vendor/k8s.io/component-base/metrics/histogram.go @@ -18,20 +18,59 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/trace" ) // Histogram is our internal representation for our wrapping struct around prometheus // histograms. Summary implements both kubeCollector and ObserverMetric type Histogram struct { + ctx context.Context ObserverMetric *HistogramOpts lazyMetric selfCollector } +// exemplarHistogramMetric holds a context to extract exemplar labels from, and a historgram metric to attach them to. It implements the metricWithExemplar interface. +type exemplarHistogramMetric struct { + *Histogram +} + +type exemplarHistogramVec struct { + *HistogramVecWithContext + observer prometheus.Observer +} + +func (h *Histogram) Observe(v float64) { + h.withExemplar(v) +} + +// withExemplar initializes the exemplarMetric object and sets the exemplar value. +func (h *Histogram) withExemplar(v float64) { + (&exemplarHistogramMetric{h}).withExemplar(v) +} + +// withExemplar attaches an exemplar to the metric. +func (e *exemplarHistogramMetric) withExemplar(v float64) { + if m, ok := e.Histogram.ObserverMetric.(prometheus.ExemplarObserver); ok { + maybeSpanCtx := trace.SpanContextFromContext(e.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + exemplarLabels := prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + } + m.ObserveWithExemplar(v, exemplarLabels) + return + } + } + + e.ObserverMetric.Observe(v) +} + // NewHistogram returns an object which is Histogram-like. However, nothing // will be measured until the histogram is registered somewhere. func NewHistogram(opts *HistogramOpts) *Histogram { @@ -74,6 +113,7 @@ func (h *Histogram) initializeDeprecatedMetric() { // WithContext allows the normal Histogram metric to pass in context. The context is no-op now. func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + h.ctx = ctx return h.ObserverMetric } @@ -96,11 +136,6 @@ func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &HistogramVec{ HistogramVec: noopHistogramVec, @@ -148,6 +183,15 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } return v.HistogramVec.WithLabelValues(lvs...) } @@ -162,6 +206,15 @@ func (v *HistogramVec) With(labels map[string]string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.HistogramVec.With(labels) } @@ -189,6 +242,13 @@ func (v *HistogramVec) Reset() { v.HistogramVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the HistogramVec. +// NOTE: This should only be used in test. +func (v *HistogramVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped HistogramVec with context func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { return &HistogramVecWithContext{ @@ -203,12 +263,37 @@ type HistogramVecWithContext struct { ctx context.Context } +func (h *exemplarHistogramVec) Observe(v float64) { + h.withExemplar(v) +} + +func (h *exemplarHistogramVec) withExemplar(v float64) { + if m, ok := h.observer.(prometheus.ExemplarObserver); ok { + maybeSpanCtx := trace.SpanContextFromContext(h.HistogramVecWithContext.ctx) + if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() { + m.ObserveWithExemplar(v, prometheus.Labels{ + "trace_id": maybeSpanCtx.TraceID().String(), + "span_id": maybeSpanCtx.SpanID().String(), + }) + return + } + } + + h.observer.Observe(v) +} + // WithLabelValues is the wrapper of HistogramVec.WithLabelValues. -func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { - return vc.HistogramVec.WithLabelValues(lvs...) +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) *exemplarHistogramVec { + return &exemplarHistogramVec{ + HistogramVecWithContext: vc, + observer: vc.HistogramVec.WithLabelValues(lvs...), + } } // With is the wrapper of HistogramVec.With. -func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { - return vc.HistogramVec.With(labels) +func (vc *HistogramVecWithContext) With(labels map[string]string) *exemplarHistogramVec { + return &exemplarHistogramVec{ + HistogramVecWithContext: vc, + observer: vc.HistogramVec.With(labels), + } } diff --git a/constraint/vendor/k8s.io/component-base/metrics/metric.go b/constraint/vendor/k8s.io/component-base/metrics/metric.go index d68a98c44..c8b083995 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/metric.go +++ b/constraint/vendor/k8s.io/component-base/metrics/metric.go @@ -22,8 +22,8 @@ import ( "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - promext "k8s.io/component-base/metrics/prometheusextension" + promext "k8s.io/component-base/metrics/prometheusextension" "k8s.io/klog/v2" ) @@ -210,6 +210,11 @@ func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { ch <- c.metric } +// metricWithExemplar is an interface that knows how to attach an exemplar to certain supported metric types. +type metricWithExemplar interface { + withExemplar(v float64) +} + // no-op vecs for convenience var noopCounterVec = &prometheus.CounterVec{} var noopHistogramVec = &prometheus.HistogramVec{} diff --git a/constraint/vendor/k8s.io/component-base/metrics/options.go b/constraint/vendor/k8s.io/component-base/metrics/options.go index 2c72cb48f..17f44ef2a 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/options.go +++ b/constraint/vendor/k8s.io/component-base/metrics/options.go @@ -129,7 +129,7 @@ func validateAllowMetricLabel(allowListMapping map[string]string) error { for k := range allowListMapping { reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) if reg.FindString(k) != k { - return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName,labelName=labelValue, labelValue,...`") } } return nil diff --git a/constraint/vendor/k8s.io/component-base/metrics/opts.go b/constraint/vendor/k8s.io/component-base/metrics/opts.go index 30dfd2e3d..43015169e 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/opts.go +++ b/constraint/vendor/k8s.io/component-base/metrics/opts.go @@ -25,11 +25,11 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/yaml.v2" "k8s.io/apimachinery/pkg/util/sets" promext "k8s.io/component-base/metrics/prometheusextension" "k8s.io/klog/v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) var ( @@ -37,6 +37,14 @@ var ( allowListLock sync.RWMutex ) +// ResetLabelValueAllowLists resets the allow lists for label values. +// NOTE: This should only be used in test. +func ResetLabelValueAllowLists() { + allowListLock.Lock() + defer allowListLock.Unlock() + labelValueAllowLists = map[string]*MetricLabelAllowList{} +} + // KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure // is purposefully not embedded here because that would change struct initialization // in the manner which people are currently accustomed. @@ -44,16 +52,17 @@ var ( // Name must be set to a non-empty string. DeprecatedVersion is defined only // if the metric for which this options applies is, in fact, deprecated. type KubeOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // BuildFQName joins the given three name components by "_". Empty name @@ -160,17 +169,18 @@ func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { // and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type HistogramOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Buckets []float64 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -206,18 +216,19 @@ func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { // and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type TimingHistogramOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Buckets []float64 - InitialValue float64 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + InitialValue float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -255,20 +266,21 @@ func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts // a help string and to explicitly set the Objectives field to the desired value // as the default value will change in the upcoming v0.10 of the library. type SummaryOpts struct { - Namespace string - Subsystem string - Name string - Help string - ConstLabels map[string]string - Objectives map[float64]float64 - MaxAge time.Duration - AgeBuckets uint32 - BufCap uint32 - DeprecatedVersion string - deprecateOnce sync.Once - annotateOnce sync.Once - StabilityLevel StabilityLevel - LabelValueAllowLists *MetricLabelAllowList + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + initializeLabelAllowListsOnce sync.Once + LabelValueAllowLists *MetricLabelAllowList } // Modify help description on the metric description. @@ -315,7 +327,7 @@ func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { } type MetricLabelAllowList struct { - labelToAllowList map[string]sets.String + labelToAllowList map[string]sets.Set[string] } func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { @@ -347,13 +359,13 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { for metricLabelName, labelValues := range allowListMapping { metricName := strings.Split(metricLabelName, ",")[0] labelName := strings.Split(metricLabelName, ",")[1] - valueSet := sets.NewString(strings.Split(labelValues, ",")...) + valueSet := sets.New[string](strings.Split(labelValues, ",")...) allowList, ok := labelValueAllowLists[metricName] if ok { allowList.labelToAllowList[labelName] = valueSet } else { - labelToAllowList := make(map[string]sets.String) + labelToAllowList := make(map[string]sets.Set[string]) labelToAllowList[labelName] = valueSet labelValueAllowLists[metricName] = &MetricLabelAllowList{ labelToAllowList, @@ -363,8 +375,6 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { } func SetLabelAllowListFromManifest(manifest string) { - allowListLock.Lock() - defer allowListLock.Unlock() allowListMapping := make(map[string]string) data, err := os.ReadFile(filepath.Clean(manifest)) if err != nil { diff --git a/constraint/vendor/k8s.io/component-base/metrics/processstarttime.go b/constraint/vendor/k8s.io/component-base/metrics/processstarttime.go index 4b5e76935..f4b98f8eb 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/processstarttime.go +++ b/constraint/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -35,7 +35,7 @@ var processStartTime = NewGaugeVec( // a prometheus registry. This metric needs to be included to ensure counter // data fidelity. func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { - start, err := getProcessStart() + start, err := GetProcessStart() if err != nil { klog.Errorf("Could not get process start time, %v", err) start = float64(time.Now().Unix()) diff --git a/constraint/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/constraint/vendor/k8s.io/component-base/metrics/processstarttime_others.go index a14cd8833..611a12906 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/processstarttime_others.go +++ b/constraint/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/procfs" ) -func getProcessStart() (float64, error) { +func GetProcessStart() (float64, error) { pid := os.Getpid() p, err := procfs.NewProc(pid) if err != nil { diff --git a/constraint/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/constraint/vendor/k8s.io/component-base/metrics/processstarttime_windows.go index 7813115e7..afee6f9b1 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/processstarttime_windows.go +++ b/constraint/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -23,7 +23,7 @@ import ( "golang.org/x/sys/windows" ) -func getProcessStart() (float64, error) { +func GetProcessStart() (float64, error) { processHandle := windows.CurrentProcess() var creationTime, exitTime, kernelTime, userTime windows.Filetime diff --git a/constraint/vendor/k8s.io/component-base/metrics/summary.go b/constraint/vendor/k8s.io/component-base/metrics/summary.go index d40421645..f1af12175 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/summary.go +++ b/constraint/vendor/k8s.io/component-base/metrics/summary.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "github.com/blang/semver/v4" "github.com/prometheus/client_golang/prometheus" @@ -109,11 +110,6 @@ func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &SummaryVec{ SummaryOpts: opts, @@ -160,6 +156,15 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } return v.SummaryVec.WithLabelValues(lvs...) } @@ -174,6 +179,15 @@ func (v *SummaryVec) With(labels map[string]string) ObserverMetric { } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } return v.SummaryVec.With(labels) } @@ -201,6 +215,13 @@ func (v *SummaryVec) Reset() { v.SummaryVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the SummaryVec. +// NOTE: This should only be used in test. +func (v *SummaryVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped SummaryVec with context func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { return &SummaryVecWithContext{ diff --git a/constraint/vendor/k8s.io/component-base/metrics/timing_histogram.go b/constraint/vendor/k8s.io/component-base/metrics/timing_histogram.go index a0f0b253c..4fc757473 100644 --- a/constraint/vendor/k8s.io/component-base/metrics/timing_histogram.go +++ b/constraint/vendor/k8s.io/component-base/metrics/timing_histogram.go @@ -18,6 +18,7 @@ package metrics import ( "context" + "sync" "time" "github.com/blang/semver/v4" @@ -125,11 +126,6 @@ func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogr opts.StabilityLevel.setDefaults() fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) - allowListLock.RLock() - if allowList, ok := labelValueAllowLists[fqName]; ok { - opts.LabelValueAllowLists = allowList - } - allowListLock.RUnlock() v := &TimingHistogramVec{ TimingHistogramVec: noopTimingHistogramVec, @@ -175,6 +171,15 @@ func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainToAllowedList(v.originalLabels, lvs) + } + allowListLock.RUnlock() + }) } ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...) if err != nil { @@ -214,6 +219,15 @@ func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, } if v.LabelValueAllowLists != nil { v.LabelValueAllowLists.ConstrainLabelMap(labels) + } else { + v.initializeLabelAllowListsOnce.Do(func() { + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[v.FQName()]; ok { + v.LabelValueAllowLists = allowList + allowList.ConstrainLabelMap(labels) + } + allowListLock.RUnlock() + }) } ops, err := v.TimingHistogramVec.GetMetricWith(labels) return ops.(GaugeMetric), err @@ -254,6 +268,13 @@ func (v *TimingHistogramVec) Reset() { v.TimingHistogramVec.Reset() } +// ResetLabelAllowLists resets the label allow list for the TimingHistogramVec. +// NOTE: This should only be used in test. +func (v *TimingHistogramVec) ResetLabelAllowLists() { + v.initializeLabelAllowListsOnce = sync.Once{} + v.LabelValueAllowLists = nil +} + // WithContext returns wrapped TimingHistogramVec with context func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { return &TimingHistogramVecWithContext{ diff --git a/constraint/vendor/k8s.io/component-base/tracing/utils.go b/constraint/vendor/k8s.io/component-base/tracing/utils.go index b5141f033..dde7a5b28 100644 --- a/constraint/vendor/k8s.io/component-base/tracing/utils.go +++ b/constraint/vendor/k8s.io/component-base/tracing/utils.go @@ -27,6 +27,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" oteltrace "go.opentelemetry.io/otel/trace" + noopoteltrace "go.opentelemetry.io/otel/trace/noop" "k8s.io/client-go/transport" "k8s.io/component-base/tracing/api/v1" @@ -47,7 +48,7 @@ func (n *noopTracerProvider) Shutdown(context.Context) error { } func NewNoopTracerProvider() TracerProvider { - return &noopTracerProvider{TracerProvider: oteltrace.NewNoopTracerProvider()} + return &noopTracerProvider{TracerProvider: noopoteltrace.NewTracerProvider()} } // NewProvider creates a TracerProvider in a component, and enforces recommended tracing behavior diff --git a/constraint/vendor/k8s.io/component-base/version/base.go b/constraint/vendor/k8s.io/component-base/version/base.go index b753b7d19..46500118a 100644 --- a/constraint/vendor/k8s.io/component-base/version/base.go +++ b/constraint/vendor/k8s.io/component-base/version/base.go @@ -61,3 +61,10 @@ var ( buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) + +const ( + // DefaultKubeBinaryVersion is the hard coded k8 binary version based on the latest K8s release. + // It is supposed to be consistent with gitMajor and gitMinor, except for local tests, where gitMajor and gitMinor are "". + // Should update for each minor release! + DefaultKubeBinaryVersion = "1.32" +) diff --git a/constraint/vendor/k8s.io/component-base/version/version.go b/constraint/vendor/k8s.io/component-base/version/version.go index 1d268d4c6..99d368534 100644 --- a/constraint/vendor/k8s.io/component-base/version/version.go +++ b/constraint/vendor/k8s.io/component-base/version/version.go @@ -19,10 +19,41 @@ package version import ( "fmt" "runtime" + "sync/atomic" + "k8s.io/apimachinery/pkg/util/version" apimachineryversion "k8s.io/apimachinery/pkg/version" ) +type EffectiveVersion interface { + BinaryVersion() *version.Version + EmulationVersion() *version.Version + MinCompatibilityVersion() *version.Version + EqualTo(other EffectiveVersion) bool + String() string + Validate() []error +} + +type MutableEffectiveVersion interface { + EffectiveVersion + Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) + SetEmulationVersion(emulationVersion *version.Version) + SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) +} + +type effectiveVersion struct { + // When true, BinaryVersion() returns the current binary version + useDefaultBuildBinaryVersion atomic.Bool + // Holds the last binary version stored in Set() + binaryVersion atomic.Pointer[version.Version] + // If the emulationVersion is set by the users, it could only contain major and minor versions. + // In tests, emulationVersion could be the same as the binary version, or set directly, + // which can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + emulationVersion atomic.Pointer[version.Version] + // minCompatibilityVersion could only contain major and minor versions. + minCompatibilityVersion atomic.Pointer[version.Version] +} + // Get returns the overall codebase version. It's for detecting // what code a binary was built from. func Get() apimachineryversion.Info { @@ -40,3 +71,129 @@ func Get() apimachineryversion.Info { Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), } } + +func (m *effectiveVersion) BinaryVersion() *version.Version { + if m.useDefaultBuildBinaryVersion.Load() { + return defaultBuildBinaryVersion() + } + return m.binaryVersion.Load() +} + +func (m *effectiveVersion) EmulationVersion() *version.Version { + ver := m.emulationVersion.Load() + if ver != nil { + // Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + // The pre-release should not be accessible to the users. + return ver.WithPreRelease(m.BinaryVersion().PreRelease()) + } + return ver +} + +func (m *effectiveVersion) MinCompatibilityVersion() *version.Version { + return m.minCompatibilityVersion.Load() +} + +func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool { + return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion()) +} + +func (m *effectiveVersion) String() string { + if m == nil { + return "" + } + return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}", + m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String()) +} + +func majorMinor(ver *version.Version) *version.Version { + if ver == nil { + return ver + } + return version.MajorMinor(ver.Major(), ver.Minor()) +} + +func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) { + m.binaryVersion.Store(binaryVersion) + m.useDefaultBuildBinaryVersion.Store(false) + m.emulationVersion.Store(majorMinor(emulationVersion)) + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) { + m.emulationVersion.Store(majorMinor(emulationVersion)) +} + +func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) { + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) Validate() []error { + var errs []error + // Validate only checks the major and minor versions. + binaryVersion := m.BinaryVersion().WithPatch(0) + emulationVersion := m.emulationVersion.Load() + minCompatibilityVersion := m.minCompatibilityVersion.Load() + + // emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}. + maxEmuVer := binaryVersion + minEmuVer := binaryVersion.SubtractMinor(1) + if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) { + errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String())) + } + // minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha. + maxCompVer := binaryVersion.SubtractMinor(1) + minCompVer := binaryVersion.SubtractMinor(1) + if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) { + errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String())) + } + return errs +} + +func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion { + effective := &effectiveVersion{} + compatVersion := binaryVersion.SubtractMinor(1) + effective.Set(binaryVersion, binaryVersion, compatVersion) + effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion) + return effective +} + +func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion { + if binaryVer == "" { + return &effectiveVersion{} + } + binaryVersion := version.MustParse(binaryVer) + return newEffectiveVersion(binaryVersion, false) +} + +func defaultBuildBinaryVersion() *version.Version { + verInfo := Get() + return version.MustParse(verInfo.String()).WithInfo(verInfo) +} + +// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the +// current build information. +func DefaultBuildEffectiveVersion() MutableEffectiveVersion { + binaryVersion := defaultBuildBinaryVersion() + if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 { + return DefaultKubeEffectiveVersion() + } + return newEffectiveVersion(binaryVersion, true) +} + +// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the +// latest K8s release. +func DefaultKubeEffectiveVersion() MutableEffectiveVersion { + binaryVersion := version.MustParse(DefaultKubeBinaryVersion).WithInfo(Get()) + return newEffectiveVersion(binaryVersion, false) +} + +// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components. +// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31. +func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error { + binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor()) + if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) { + return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s", + binaryVersion.String(), effectiveVersion.EmulationVersion().String()) + } + return nil +} diff --git a/constraint/vendor/k8s.io/klog/v2/klog.go b/constraint/vendor/k8s.io/klog/v2/klog.go index 026be9e3b..47ec9466a 100644 --- a/constraint/vendor/k8s.io/klog/v2/klog.go +++ b/constraint/vendor/k8s.io/klog/v2/klog.go @@ -404,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -486,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -831,32 +824,12 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -868,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -1011,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -1028,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1223,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - _ = file.Flush() // ignore error - _ = file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's diff --git a/constraint/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/constraint/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5789e67ab..1b758ab25 100644 --- a/constraint/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/constraint/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v2 "github.com/google/gnostic-models/openapiv2" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) func newSchemaError(path *Path, format string, a ...interface{}) error { diff --git a/constraint/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/constraint/vendor/k8s.io/kube-openapi/pkg/util/trie.go deleted file mode 100644 index a9a76c179..000000000 --- a/constraint/vendor/k8s.io/kube-openapi/pkg/util/trie.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// A simple trie implementation with Add and HasPrefix methods only. -type Trie struct { - children map[byte]*Trie - wordTail bool - word string -} - -// NewTrie creates a Trie and add all strings in the provided list to it. -func NewTrie(list []string) Trie { - ret := Trie{ - children: make(map[byte]*Trie), - wordTail: false, - } - for _, v := range list { - ret.Add(v) - } - return ret -} - -// Add adds a word to this trie -func (t *Trie) Add(v string) { - root := t - for _, b := range []byte(v) { - child, exists := root.children[b] - if !exists { - child = &Trie{ - children: make(map[byte]*Trie), - wordTail: false, - } - root.children[b] = child - } - root = child - } - root.wordTail = true - root.word = v -} - -// HasPrefix returns true of v has any of the prefixes stored in this trie. -func (t *Trie) HasPrefix(v string) bool { - _, has := t.GetPrefix(v) - return has -} - -// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise. -func (t *Trie) GetPrefix(v string) (string, bool) { - root := t - if root.wordTail { - return root.word, true - } - for _, b := range []byte(v) { - child, exists := root.children[b] - if !exists { - return "", false - } - if child.wordTail { - return child.word, true - } - root = child - } - return "", false -} diff --git a/constraint/vendor/k8s.io/kube-openapi/pkg/util/util.go b/constraint/vendor/k8s.io/kube-openapi/pkg/util/util.go deleted file mode 100644 index 6eee935b2..000000000 --- a/constraint/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - "strings" -) - -// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name. -// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName -// instead -// -// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name", -// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined -// in GetCanonicalTypeName means Go type names with full package path. -// -// Examples of REST friendly OpenAPI name: -// -// Input: k8s.io/api/core/v1.Pod -// Output: io.k8s.api.core.v1.Pod -// -// Input: k8s.io/api/core/v1 -// Output: io.k8s.api.core.v1 -// -// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo -// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo -func ToCanonicalName(name string) string { - return ToRESTFriendlyName(name) -} - -// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name. -// -// Examples of REST friendly OpenAPI name: -// -// Input: k8s.io/api/core/v1.Pod -// Output: io.k8s.api.core.v1.Pod -// -// Input: k8s.io/api/core/v1 -// Output: io.k8s.api.core.v1 -// -// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo -// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo -func ToRESTFriendlyName(name string) string { - nameParts := strings.Split(name, "/") - // Reverse first part. e.g., io.k8s... instead of k8s.io... - if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { - parts := strings.Split(nameParts[0], ".") - for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { - parts[i], parts[j] = parts[j], parts[i] - } - nameParts[0] = strings.Join(parts, ".") - } - return strings.Join(nameParts, ".") -} - -// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name. -// -// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying -// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/" -// should be used. For custom resource definition (CRD), the canonical name is expected to be -// -// group/version.kind -// -// Examples of canonical name: -// -// Go type: k8s.io/kubernetes/pkg/apis/core.Pod -// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo -// -// Example for vendored Go type: -// -// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod -// -// Original full path: vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod -type OpenAPICanonicalTypeNamer interface { - OpenAPICanonicalTypeName() string -} - -// GetCanonicalTypeName will find the canonical type name of a sample object, removing -// the "vendor" part of the path -func GetCanonicalTypeName(model interface{}) string { - if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { - return namer.OpenAPICanonicalTypeName() - } - t := reflect.TypeOf(model) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.PkgPath() == "" { - return t.Name() - } - path := t.PkgPath() - if strings.Contains(path, "/vendor/") { - path = path[strings.Index(path, "/vendor/")+len("/vendor/"):] - } else if strings.HasPrefix(path, "vendor/") { - path = strings.TrimPrefix(path, "vendor/") - } - return path + "." + t.Name() -} diff --git a/constraint/vendor/k8s.io/utils/clock/testing/fake_clock.go b/constraint/vendor/k8s.io/utils/clock/testing/fake_clock.go index 79e11deb6..462c40c2c 100644 --- a/constraint/vendor/k8s.io/utils/clock/testing/fake_clock.go +++ b/constraint/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -48,7 +48,6 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time - fired bool afterFunc func() } @@ -198,12 +197,10 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: - w.fired = true default: } } else { w.destChan <- t - w.fired = true } if w.afterFunc != nil { @@ -305,44 +302,48 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +// Stop prevents the Timer from firing. It returns true if the call stops the +// timer, false if the timer has already expired or been stopped. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() + active := false newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters)) for i := range f.fakeClock.waiters { w := f.fakeClock.waiters[i] if w != &f.waiter { newWaiters = append(newWaiters, w) + continue } + // If timer is found, it has not been fired yet. + active = true } f.fakeClock.waiters = newWaiters - return !f.waiter.fired + return active } -// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet -// fired, or false otherwise. +// Reset changes the timer to expire after duration d. It returns true if the +// timer had been active, false if the timer had expired or been stopped. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - active := !f.waiter.fired + active := false - f.waiter.fired = false f.waiter.targetTime = f.fakeClock.time.Add(d) - var isWaiting bool for i := range f.fakeClock.waiters { w := f.fakeClock.waiters[i] if w == &f.waiter { - isWaiting = true + // If timer is found, it has not been fired yet. + active = true break } } - if !isWaiting { + if !active { f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter) } diff --git a/constraint/vendor/k8s.io/utils/lru/lru.go b/constraint/vendor/k8s.io/utils/lru/lru.go index 47f135281..40c22ece1 100644 --- a/constraint/vendor/k8s.io/utils/lru/lru.go +++ b/constraint/vendor/k8s.io/utils/lru/lru.go @@ -16,6 +16,7 @@ limitations under the License. package lru import ( + "fmt" "sync" groupcache "k8s.io/utils/internal/third_party/forked/golang/golang-lru" @@ -44,6 +45,17 @@ func NewWithEvictionFunc(size int, f EvictionFunc) *Cache { return c } +// SetEvictionFunc updates the eviction func +func (c *Cache) SetEvictionFunc(f EvictionFunc) error { + c.lock.Lock() + defer c.lock.Unlock() + if c.cache.OnEvicted != nil { + return fmt.Errorf("lru cache eviction function is already set") + } + c.cache.OnEvicted = f + return nil +} + // Add adds a value to the cache. func (c *Cache) Add(key Key, value interface{}) { c.lock.Lock() diff --git a/constraint/vendor/k8s.io/utils/net/multi_listen.go b/constraint/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 000000000..7cb7795be --- /dev/null +++ b/constraint/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/constraint/vendor/k8s.io/utils/strings/slices/slices.go b/constraint/vendor/k8s.io/utils/strings/slices/slices.go deleted file mode 100644 index 8e21838f2..000000000 --- a/constraint/vendor/k8s.io/utils/strings/slices/slices.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slices defines various functions useful with slices of string type. -// The goal is to be as close as possible to -// https://github.com/golang/go/issues/45955. Ideal would be if we can just -// replace "stringslices" if the "slices" package becomes standard. -package slices - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in index order, and the -// comparison stops at the first unequal pair. -func Equal(s1, s2 []string) bool { - if len(s1) != len(s2) { - return false - } - for i, n := range s1 { - if n != s2[i] { - return false - } - } - return true -} - -// Filter appends to d each element e of s for which keep(e) returns true. -// It returns the modified d. d may be s[:0], in which case the kept -// elements will be stored in the same slice. -// if the slices overlap in some other way, the results are unspecified. -// To create a new slice with the filtered results, pass nil for d. -func Filter(d, s []string, keep func(string) bool) []string { - for _, n := range s { - if keep(n) { - d = append(d, n) - } - } - return d -} - -// Contains reports whether v is present in s. -func Contains(s []string, v string) bool { - return Index(s, v) >= 0 -} - -// Index returns the index of the first occurrence of v in s, or -1 if -// not present. -func Index(s []string, v string) int { - // "Contains" may be replaced with "Index(s, v) >= 0": - // https://github.com/golang/go/issues/45955#issuecomment-873377947 - for i, n := range s { - if n == v { - return i - } - } - return -1 -} - -// Functions below are not in https://github.com/golang/go/issues/45955 - -// Clone returns a new clone of s. -func Clone(s []string) []string { - // https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go - if s == nil { - return nil - } - c := make([]string, len(s)) - copy(c, s) - return c -} diff --git a/constraint/vendor/k8s.io/utils/trace/trace.go b/constraint/vendor/k8s.io/utils/trace/trace.go index 187eb5d8c..559aebb59 100644 --- a/constraint/vendor/k8s.io/utils/trace/trace.go +++ b/constraint/vendor/k8s.io/utils/trace/trace.go @@ -192,7 +192,7 @@ func (t *Trace) Log() { t.endTime = &endTime t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } diff --git a/constraint/vendor/modules.txt b/constraint/vendor/modules.txt index e3e370616..088d59229 100644 --- a/constraint/vendor/modules.txt +++ b/constraint/vendor/modules.txt @@ -1,12 +1,15 @@ +# cel.dev/expr v0.19.0 +## explicit; go 1.21.1 +cel.dev/expr # github.com/OneOfOne/xxhash v1.2.8 ## explicit; go 1.11 github.com/OneOfOne/xxhash -# github.com/agnivade/levenshtein v1.1.1 -## explicit; go 1.13 +# github.com/agnivade/levenshtein v1.2.0 +## explicit; go 1.21 github.com/agnivade/levenshtein -# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df -## explicit; go 1.18 -github.com/antlr/antlr4/runtime/Go/antlr/v4 +# github.com/antlr4-go/antlr/v4 v4.13.0 +## explicit; go 1.20 +github.com/antlr4-go/antlr/v4 # github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a ## explicit github.com/asaskevich/govalidator @@ -29,9 +32,6 @@ github.com/davecgh/go-spew/spew ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/evanphx/json-patch v4.12.0+incompatible -## explicit -github.com/evanphx/json-patch # github.com/evanphx/json-patch/v5 v5.9.0 ## explicit; go 1.18 github.com/evanphx/json-patch/v5 @@ -39,9 +39,13 @@ github.com/evanphx/json-patch/v5/internal/json # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 # github.com/go-ini/ini v1.67.0 ## explicit github.com/go-ini/ini @@ -52,15 +56,15 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer # github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.22.3 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 github.com/go-openapi/swag # github.com/gobwas/glob v0.2.3 ## explicit @@ -76,7 +80,7 @@ github.com/gobwas/glob/util/strings ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v1.2.1 +# github.com/golang/glog v1.2.4 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -88,8 +92,8 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/cel-go v0.17.8 -## explicit; go 1.18 +# github.com/google/cel-go v0.22.0 +## explicit; go 1.21.1 github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls @@ -137,14 +141,11 @@ github.com/google/uuid # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 +## explicit; go 1.22.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/imdario/mergo v0.3.13 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -154,8 +155,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -177,30 +178,26 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/gomega v1.33.1 -## explicit; go 1.20 +# github.com/onsi/gomega v1.36.2 +## explicit; go 1.22.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers +github.com/onsi/gomega/matchers/internal/miter github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/open-policy-agent/opa v0.68.0 -## explicit; go 1.21 +# github.com/open-policy-agent/opa v1.1.0 +## explicit; go 1.22.7 github.com/open-policy-agent/opa/ast -github.com/open-policy-agent/opa/ast/internal/scanner -github.com/open-policy-agent/opa/ast/internal/tokens github.com/open-policy-agent/opa/ast/json -github.com/open-policy-agent/opa/ast/location github.com/open-policy-agent/opa/bundle github.com/open-policy-agent/opa/capabilities -github.com/open-policy-agent/opa/config github.com/open-policy-agent/opa/format -github.com/open-policy-agent/opa/hooks github.com/open-policy-agent/opa/internal/bundle github.com/open-policy-agent/opa/internal/cidr/merge github.com/open-policy-agent/opa/internal/compiler @@ -252,39 +249,54 @@ github.com/open-policy-agent/opa/internal/wasm/opcode github.com/open-policy-agent/opa/internal/wasm/sdk/opa/capabilities github.com/open-policy-agent/opa/internal/wasm/types github.com/open-policy-agent/opa/internal/wasm/util -github.com/open-policy-agent/opa/ir -github.com/open-policy-agent/opa/keys github.com/open-policy-agent/opa/loader -github.com/open-policy-agent/opa/loader/extension -github.com/open-policy-agent/opa/loader/filter -github.com/open-policy-agent/opa/logging -github.com/open-policy-agent/opa/metrics -github.com/open-policy-agent/opa/plugins -github.com/open-policy-agent/opa/plugins/rest github.com/open-policy-agent/opa/rego -github.com/open-policy-agent/opa/resolver -github.com/open-policy-agent/opa/resolver/wasm -github.com/open-policy-agent/opa/schemas github.com/open-policy-agent/opa/storage github.com/open-policy-agent/opa/storage/inmem -github.com/open-policy-agent/opa/storage/internal/errors -github.com/open-policy-agent/opa/storage/internal/ptr github.com/open-policy-agent/opa/topdown -github.com/open-policy-agent/opa/topdown/builtins -github.com/open-policy-agent/opa/topdown/cache -github.com/open-policy-agent/opa/topdown/copypropagation github.com/open-policy-agent/opa/topdown/print -github.com/open-policy-agent/opa/tracing github.com/open-policy-agent/opa/types -github.com/open-policy-agent/opa/util -github.com/open-policy-agent/opa/util/decoding -github.com/open-policy-agent/opa/version +github.com/open-policy-agent/opa/v1/ast +github.com/open-policy-agent/opa/v1/ast/internal/scanner +github.com/open-policy-agent/opa/v1/ast/internal/tokens +github.com/open-policy-agent/opa/v1/ast/json +github.com/open-policy-agent/opa/v1/ast/location +github.com/open-policy-agent/opa/v1/bundle +github.com/open-policy-agent/opa/v1/capabilities +github.com/open-policy-agent/opa/v1/config +github.com/open-policy-agent/opa/v1/format +github.com/open-policy-agent/opa/v1/hooks +github.com/open-policy-agent/opa/v1/ir +github.com/open-policy-agent/opa/v1/keys +github.com/open-policy-agent/opa/v1/loader +github.com/open-policy-agent/opa/v1/loader/extension +github.com/open-policy-agent/opa/v1/loader/filter +github.com/open-policy-agent/opa/v1/logging +github.com/open-policy-agent/opa/v1/metrics +github.com/open-policy-agent/opa/v1/plugins +github.com/open-policy-agent/opa/v1/plugins/rest +github.com/open-policy-agent/opa/v1/rego +github.com/open-policy-agent/opa/v1/resolver +github.com/open-policy-agent/opa/v1/resolver/wasm +github.com/open-policy-agent/opa/v1/schemas +github.com/open-policy-agent/opa/v1/storage +github.com/open-policy-agent/opa/v1/storage/inmem +github.com/open-policy-agent/opa/v1/storage/internal/errors +github.com/open-policy-agent/opa/v1/storage/internal/ptr +github.com/open-policy-agent/opa/v1/topdown +github.com/open-policy-agent/opa/v1/topdown/builtins +github.com/open-policy-agent/opa/v1/topdown/cache +github.com/open-policy-agent/opa/v1/topdown/copypropagation +github.com/open-policy-agent/opa/v1/topdown/print +github.com/open-policy-agent/opa/v1/tracing +github.com/open-policy-agent/opa/v1/types +github.com/open-policy-agent/opa/v1/util +github.com/open-policy-agent/opa/v1/util/decoding +github.com/open-policy-agent/opa/v1/version # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 -## explicit -# github.com/prometheus/client_golang v1.20.2 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -307,6 +319,13 @@ github.com/prometheus/procfs/internal/util # github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 ## explicit github.com/rcrowley/go-metrics +# github.com/sashabaranov/go-openai v1.36.1 +## explicit; go 1.18 +github.com/sashabaranov/go-openai +github.com/sashabaranov/go-openai/internal +# github.com/sethvargo/go-retry v0.3.0 +## explicit; go 1.21 +github.com/sethvargo/go-retry # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -316,12 +335,18 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stoewer/go-strcase v1.2.0 +# github.com/stoewer/go-strcase v1.3.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/tchap/go-patricia/v2 v2.3.1 +# github.com/tchap/go-patricia/v2 v2.3.2 ## explicit; go 1.16 github.com/tchap/go-patricia/v2/patricia +# github.com/walles/env v0.0.4 +## explicit; go 1.18 +github.com/walles/env +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 # github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb ## explicit github.com/xeipuuv/gojsonpointer @@ -331,15 +356,18 @@ github.com/xeipuuv/gojsonreference # github.com/yashtewari/glob-intersection v0.2.0 ## explicit; go 1.17 github.com/yashtewari/glob-intersection -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 -## explicit; go 1.20 -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 -## explicit; go 1.21 +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -351,47 +379,47 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/sdk v1.28.0 -## explicit; go 1.21 +go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/sdk v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/trace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/exp v0.0.0-20230905200255-921286631fa9 +# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.30.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -403,23 +431,23 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/singleflight -# golang.org/x/sys v0.26.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.25.0 +# golang.org/x/term v0.28.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.19.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -449,20 +477,19 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -golang.org/x/text/width -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.66.0 -## explicit; go 1.21 +# google.golang.org/grpc v1.70.0 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -470,6 +497,8 @@ google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -520,8 +549,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.36.3 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -544,6 +573,7 @@ google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version @@ -564,19 +594,18 @@ google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb +# gopkg.in/evanphx/json-patch.v4 v4.12.0 +## explicit +gopkg.in/evanphx/json-patch.v4 # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.4.0 -## explicit; go 1.15 -gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.30.3 -## explicit; go 1.22.0 +# k8s.io/api v0.32.1 +## explicit; go 1.23.0 k8s.io/api/admission/v1 -k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 @@ -601,6 +630,7 @@ k8s.io/api/certificates/v1 k8s.io/api/certificates/v1alpha1 k8s.io/api/certificates/v1beta1 k8s.io/api/coordination/v1 +k8s.io/api/coordination/v1alpha2 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/discovery/v1 @@ -623,7 +653,8 @@ k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/resource/v1alpha2 +k8s.io/api/resource/v1alpha3 +k8s.io/api/resource/v1beta1 k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 @@ -631,8 +662,8 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.30.3 -## explicit; go 1.22.0 +# k8s.io/apiextensions-apiserver v0.32.1 +## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -652,16 +683,18 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.30.3 -## explicit; go 1.22.0 +# k8s.io/apimachinery v0.32.1 +## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/meta/testrestmapper k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path k8s.io/apimachinery/pkg/apis/meta/internalversion k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme +k8s.io/apimachinery/pkg/apis/meta/internalversion/validation k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/validation @@ -673,6 +706,9 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -695,7 +731,6 @@ k8s.io/apimachinery/pkg/util/net k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets k8s.io/apimachinery/pkg/util/strategicpatch -k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/version @@ -705,27 +740,8 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.30.3 -## explicit; go 1.22.0 -k8s.io/apiserver/pkg/admission -k8s.io/apiserver/pkg/admission/initializer -k8s.io/apiserver/pkg/admission/metrics -k8s.io/apiserver/pkg/admission/plugin/cel -k8s.io/apiserver/pkg/admission/plugin/policy/generic -k8s.io/apiserver/pkg/admission/plugin/policy/internal/generic -k8s.io/apiserver/pkg/admission/plugin/policy/matching -k8s.io/apiserver/pkg/admission/plugin/policy/validating -k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics -k8s.io/apiserver/pkg/admission/plugin/webhook -k8s.io/apiserver/pkg/admission/plugin/webhook/config -k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission -k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1 -k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 -k8s.io/apiserver/pkg/admission/plugin/webhook/generic -k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions -k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace -k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object -k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules +# k8s.io/apiserver v0.32.1 +## explicit; go 1.23.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install k8s.io/apiserver/pkg/apis/apiserver/v1 @@ -741,23 +757,19 @@ k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/cel k8s.io/apiserver/pkg/cel/common k8s.io/apiserver/pkg/cel/environment -k8s.io/apiserver/pkg/cel/lazy k8s.io/apiserver/pkg/cel/library k8s.io/apiserver/pkg/cel/metrics k8s.io/apiserver/pkg/cel/openapi -k8s.io/apiserver/pkg/cel/openapi/resolver -k8s.io/apiserver/pkg/endpoints/openapi k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/features -k8s.io/apiserver/pkg/quota/v1 k8s.io/apiserver/pkg/server/egressselector k8s.io/apiserver/pkg/server/egressselector/metrics k8s.io/apiserver/pkg/util/feature k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/warning -# k8s.io/client-go v0.30.3 -## explicit; go 1.22.0 +# k8s.io/client-go v0.32.1 +## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -775,6 +787,7 @@ k8s.io/client-go/applyconfigurations/certificates/v1 k8s.io/client-go/applyconfigurations/certificates/v1alpha1 k8s.io/client-go/applyconfigurations/certificates/v1beta1 k8s.io/client-go/applyconfigurations/coordination/v1 +k8s.io/client-go/applyconfigurations/coordination/v1alpha2 k8s.io/client-go/applyconfigurations/coordination/v1beta1 k8s.io/client-go/applyconfigurations/core/v1 k8s.io/client-go/applyconfigurations/discovery/v1 @@ -799,7 +812,8 @@ k8s.io/client-go/applyconfigurations/policy/v1beta1 k8s.io/client-go/applyconfigurations/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 -k8s.io/client-go/applyconfigurations/resource/v1alpha2 +k8s.io/client-go/applyconfigurations/resource/v1alpha3 +k8s.io/client-go/applyconfigurations/resource/v1beta1 k8s.io/client-go/applyconfigurations/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 @@ -808,12 +822,9 @@ k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1 k8s.io/client-go/discovery -k8s.io/client-go/discovery/fake k8s.io/client-go/dynamic -k8s.io/client-go/dynamic/dynamicinformer -k8s.io/client-go/dynamic/dynamiclister -k8s.io/client-go/dynamic/fake k8s.io/client-go/features +k8s.io/client-go/gentype k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration k8s.io/client-go/informers/admissionregistration/v1 @@ -839,6 +850,7 @@ k8s.io/client-go/informers/certificates/v1alpha1 k8s.io/client-go/informers/certificates/v1beta1 k8s.io/client-go/informers/coordination k8s.io/client-go/informers/coordination/v1 +k8s.io/client-go/informers/coordination/v1alpha2 k8s.io/client-go/informers/coordination/v1beta1 k8s.io/client-go/informers/core k8s.io/client-go/informers/core/v1 @@ -872,7 +884,8 @@ k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/informers/rbac/v1beta1 k8s.io/client-go/informers/resource -k8s.io/client-go/informers/resource/v1alpha2 +k8s.io/client-go/informers/resource/v1alpha3 +k8s.io/client-go/informers/resource/v1beta1 k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/informers/scheduling/v1alpha1 @@ -884,112 +897,62 @@ k8s.io/client-go/informers/storage/v1beta1 k8s.io/client-go/informers/storagemigration k8s.io/client-go/informers/storagemigration/v1alpha1 k8s.io/client-go/kubernetes -k8s.io/client-go/kubernetes/fake k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1 -k8s.io/client-go/kubernetes/typed/apiserverinternal/v1alpha1/fake k8s.io/client-go/kubernetes/typed/apps/v1 -k8s.io/client-go/kubernetes/typed/apps/v1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta1 -k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta2 -k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake k8s.io/client-go/kubernetes/typed/authentication/v1 -k8s.io/client-go/kubernetes/typed/authentication/v1/fake k8s.io/client-go/kubernetes/typed/authentication/v1alpha1 -k8s.io/client-go/kubernetes/typed/authentication/v1alpha1/fake k8s.io/client-go/kubernetes/typed/authentication/v1beta1 -k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake k8s.io/client-go/kubernetes/typed/authorization/v1 -k8s.io/client-go/kubernetes/typed/authorization/v1/fake k8s.io/client-go/kubernetes/typed/authorization/v1beta1 -k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v1 -k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2 -k8s.io/client-go/kubernetes/typed/autoscaling/v2/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake k8s.io/client-go/kubernetes/typed/batch/v1 -k8s.io/client-go/kubernetes/typed/batch/v1/fake k8s.io/client-go/kubernetes/typed/batch/v1beta1 -k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake k8s.io/client-go/kubernetes/typed/certificates/v1 -k8s.io/client-go/kubernetes/typed/certificates/v1/fake k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 -k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake k8s.io/client-go/kubernetes/typed/certificates/v1beta1 -k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake k8s.io/client-go/kubernetes/typed/coordination/v1 -k8s.io/client-go/kubernetes/typed/coordination/v1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 -k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1 -k8s.io/client-go/kubernetes/typed/core/v1/fake k8s.io/client-go/kubernetes/typed/discovery/v1 -k8s.io/client-go/kubernetes/typed/discovery/v1/fake k8s.io/client-go/kubernetes/typed/discovery/v1beta1 -k8s.io/client-go/kubernetes/typed/discovery/v1beta1/fake k8s.io/client-go/kubernetes/typed/events/v1 -k8s.io/client-go/kubernetes/typed/events/v1/fake k8s.io/client-go/kubernetes/typed/events/v1beta1 -k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1 -k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3/fake k8s.io/client-go/kubernetes/typed/networking/v1 -k8s.io/client-go/kubernetes/typed/networking/v1/fake k8s.io/client-go/kubernetes/typed/networking/v1alpha1 -k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 -k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake k8s.io/client-go/kubernetes/typed/node/v1 -k8s.io/client-go/kubernetes/typed/node/v1/fake k8s.io/client-go/kubernetes/typed/node/v1alpha1 -k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake k8s.io/client-go/kubernetes/typed/node/v1beta1 -k8s.io/client-go/kubernetes/typed/node/v1beta1/fake k8s.io/client-go/kubernetes/typed/policy/v1 -k8s.io/client-go/kubernetes/typed/policy/v1/fake k8s.io/client-go/kubernetes/typed/policy/v1beta1 -k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake k8s.io/client-go/kubernetes/typed/rbac/v1 -k8s.io/client-go/kubernetes/typed/rbac/v1/fake k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 -k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake -k8s.io/client-go/kubernetes/typed/resource/v1alpha2 -k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake +k8s.io/client-go/kubernetes/typed/resource/v1alpha3 +k8s.io/client-go/kubernetes/typed/resource/v1beta1 k8s.io/client-go/kubernetes/typed/scheduling/v1 -k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 -k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 -k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake k8s.io/client-go/kubernetes/typed/storage/v1 -k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1 -k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1 -k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1 -k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake +k8s.io/client-go/listers k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1alpha1 k8s.io/client-go/listers/admissionregistration/v1beta1 @@ -1007,6 +970,7 @@ k8s.io/client-go/listers/certificates/v1 k8s.io/client-go/listers/certificates/v1alpha1 k8s.io/client-go/listers/certificates/v1beta1 k8s.io/client-go/listers/coordination/v1 +k8s.io/client-go/listers/coordination/v1alpha2 k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/core/v1 k8s.io/client-go/listers/discovery/v1 @@ -1029,7 +993,8 @@ k8s.io/client-go/listers/policy/v1beta1 k8s.io/client-go/listers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 -k8s.io/client-go/listers/resource/v1alpha2 +k8s.io/client-go/listers/resource/v1alpha3 +k8s.io/client-go/listers/resource/v1beta1 k8s.io/client-go/listers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 @@ -1046,7 +1011,6 @@ k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest -k8s.io/client-go/rest/fake k8s.io/client-go/rest/watch k8s.io/client-go/restmapper k8s.io/client-go/testing @@ -1063,15 +1027,19 @@ k8s.io/client-go/tools/metrics k8s.io/client-go/tools/pager k8s.io/client-go/tools/reference k8s.io/client-go/transport +k8s.io/client-go/util/apply k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/consistencydetector k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry +k8s.io/client-go/util/watchlist k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.30.3 -## explicit; go 1.22.0 +# k8s.io/component-base v0.32.1 +## explicit; go 1.23.0 +k8s.io/component-base/cli/flag k8s.io/component-base/featuregate k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry @@ -1080,7 +1048,7 @@ k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/klog/v2 v2.120.1 +# k8s.io/klog/v2 v2.130.1 ## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1089,7 +1057,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +# k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f ## explicit; go 1.20 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common @@ -1098,14 +1066,13 @@ k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 -k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20241210054802-24370beab758 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1117,16 +1084,15 @@ k8s.io/utils/net k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/ptr -k8s.io/utils/strings/slices k8s.io/utils/trace -# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 -## explicit; go 1.20 +# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 +## explicit; go 1.21 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.18.4 -## explicit; go 1.22.0 +# sigs.k8s.io/controller-runtime v0.20.1 +## explicit; go 1.23.0 sigs.k8s.io/controller-runtime/pkg/certwatcher sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics sigs.k8s.io/controller-runtime/pkg/client @@ -1144,11 +1110,11 @@ sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/metrics sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/webhook/conversion -# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd -## explicit; go 1.18 +# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 +## explicit; go 1.21 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.2 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go index 6af92b448..0831f3e63 100644 --- a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go +++ b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc v3.21.12 // source: konnectivity-client/proto/client/client.proto package client @@ -43,6 +43,7 @@ const ( PacketType_CLOSE_RSP PacketType = 3 PacketType_DATA PacketType = 4 PacketType_DIAL_CLS PacketType = 5 + PacketType_DRAIN PacketType = 6 ) // Enum value maps for PacketType. @@ -54,6 +55,7 @@ var ( 3: "CLOSE_RSP", 4: "DATA", 5: "DIAL_CLS", + 6: "DRAIN", } PacketType_value = map[string]int32{ "DIAL_REQ": 0, @@ -62,6 +64,7 @@ var ( "CLOSE_RSP": 3, "DATA": 4, "DIAL_CLS": 5, + "DRAIN": 6, } ) @@ -106,6 +109,7 @@ type Packet struct { // *Packet_CloseRequest // *Packet_CloseResponse // *Packet_CloseDial + // *Packet_Drain Payload isPacket_Payload `protobuf_oneof:"payload"` } @@ -197,6 +201,13 @@ func (x *Packet) GetCloseDial() *CloseDial { return nil } +func (x *Packet) GetDrain() *Drain { + if x, ok := x.GetPayload().(*Packet_Drain); ok { + return x.Drain + } + return nil +} + type isPacket_Payload interface { isPacket_Payload() } @@ -225,6 +236,10 @@ type Packet_CloseDial struct { CloseDial *CloseDial `protobuf:"bytes,7,opt,name=closeDial,proto3,oneof"` } +type Packet_Drain struct { + Drain *Drain `protobuf:"bytes,8,opt,name=drain,proto3,oneof"` +} + func (*Packet_DialRequest) isPacket_Payload() {} func (*Packet_DialResponse) isPacket_Payload() {} @@ -237,6 +252,8 @@ func (*Packet_CloseResponse) isPacket_Payload() {} func (*Packet_CloseDial) isPacket_Payload() {} +func (*Packet_Drain) isPacket_Payload() {} + type DialRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -522,6 +539,44 @@ func (x *CloseDial) GetRandom() int64 { return 0 } +type Drain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Drain) Reset() { + *x = Drain{} + if protoimpl.UnsafeEnabled { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Drain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Drain) ProtoMessage() {} + +func (x *Drain) ProtoReflect() protoreflect.Message { + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Drain.ProtoReflect.Descriptor instead. +func (*Drain) Descriptor() ([]byte, []int) { + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{6} +} + type Data struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -538,7 +593,7 @@ type Data struct { func (x *Data) Reset() { *x = Data{} if protoimpl.UnsafeEnabled { - mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -551,7 +606,7 @@ func (x *Data) String() string { func (*Data) ProtoMessage() {} func (x *Data) ProtoReflect() protoreflect.Message { - mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[6] + mi := &file_konnectivity_client_proto_client_client_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +619,7 @@ func (x *Data) ProtoReflect() protoreflect.Message { // Deprecated: Use Data.ProtoReflect.Descriptor instead. func (*Data) Descriptor() ([]byte, []int) { - return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{6} + return file_konnectivity_client_proto_client_client_proto_rawDescGZIP(), []int{7} } func (x *Data) GetConnectID() int64 { @@ -594,7 +649,7 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x0a, 0x2d, 0x6b, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xd1, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, + 0xf1, 0x02, 0x0a, 0x06, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, @@ -614,7 +669,9 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x48, 0x00, 0x52, 0x09, 0x63, - 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, + 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x05, 0x64, 0x72, 0x61, 0x69, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x48, + 0x00, 0x52, 0x05, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x5b, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x18, @@ -636,26 +693,27 @@ var file_konnectivity_client_proto_client_client_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x22, 0x23, 0x0a, 0x09, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x61, - 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x4e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x2a, 0x5e, 0x0a, 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x00, - 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x44, 0x41, 0x54, 0x41, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x43, - 0x4c, 0x53, 0x10, 0x05, 0x32, 0x2f, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x07, 0x2e, - 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x07, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x73, 0x69, 0x67, 0x73, 0x2e, 0x6b, 0x38, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2d, 0x6e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x6b, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x64, 0x6f, 0x6d, 0x22, 0x07, 0x0a, 0x05, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x22, 0x4e, 0x0a, + 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x2a, 0x69, 0x0a, + 0x0a, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, + 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, + 0x4c, 0x5f, 0x52, 0x53, 0x50, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, + 0x5f, 0x52, 0x45, 0x51, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, + 0x52, 0x53, 0x50, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x41, 0x10, 0x04, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x43, 0x4c, 0x53, 0x10, 0x05, 0x12, 0x09, 0x0a, + 0x05, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x10, 0x06, 0x32, 0x2f, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x12, 0x07, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x07, 0x2e, 0x50, 0x61, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, 0x5a, 0x44, 0x73, 0x69, 0x67, + 0x73, 0x2e, 0x6b, 0x38, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2d, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2f, 0x6b, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2d, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -671,7 +729,7 @@ func file_konnectivity_client_proto_client_client_proto_rawDescGZIP() []byte { } var file_konnectivity_client_proto_client_client_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_konnectivity_client_proto_client_client_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_konnectivity_client_proto_client_client_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_konnectivity_client_proto_client_client_proto_goTypes = []interface{}{ (PacketType)(0), // 0: PacketType (*Packet)(nil), // 1: Packet @@ -680,23 +738,25 @@ var file_konnectivity_client_proto_client_client_proto_goTypes = []interface{}{ (*CloseRequest)(nil), // 4: CloseRequest (*CloseResponse)(nil), // 5: CloseResponse (*CloseDial)(nil), // 6: CloseDial - (*Data)(nil), // 7: Data + (*Drain)(nil), // 7: Drain + (*Data)(nil), // 8: Data } var file_konnectivity_client_proto_client_client_proto_depIdxs = []int32{ 0, // 0: Packet.type:type_name -> PacketType 2, // 1: Packet.dialRequest:type_name -> DialRequest 3, // 2: Packet.dialResponse:type_name -> DialResponse - 7, // 3: Packet.data:type_name -> Data + 8, // 3: Packet.data:type_name -> Data 4, // 4: Packet.closeRequest:type_name -> CloseRequest 5, // 5: Packet.closeResponse:type_name -> CloseResponse 6, // 6: Packet.closeDial:type_name -> CloseDial - 1, // 7: ProxyService.Proxy:input_type -> Packet - 1, // 8: ProxyService.Proxy:output_type -> Packet - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 7, // 7: Packet.drain:type_name -> Drain + 1, // 8: ProxyService.Proxy:input_type -> Packet + 1, // 9: ProxyService.Proxy:output_type -> Packet + 9, // [9:10] is the sub-list for method output_type + 8, // [8:9] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_konnectivity_client_proto_client_client_proto_init() } @@ -778,6 +838,18 @@ func file_konnectivity_client_proto_client_client_proto_init() { } } file_konnectivity_client_proto_client_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Drain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_konnectivity_client_proto_client_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Data); i { case 0: return &v.state @@ -797,6 +869,7 @@ func file_konnectivity_client_proto_client_client_proto_init() { (*Packet_CloseRequest)(nil), (*Packet_CloseResponse)(nil), (*Packet_CloseDial)(nil), + (*Packet_Drain)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -804,7 +877,7 @@ func file_konnectivity_client_proto_client_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_konnectivity_client_proto_client_client_proto_rawDesc, NumEnums: 1, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto index 811278e62..006e0ac83 100644 --- a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto +++ b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto @@ -30,6 +30,7 @@ enum PacketType { CLOSE_RSP = 3; DATA = 4; DIAL_CLS = 5; + DRAIN = 6; } message Packet { @@ -42,6 +43,7 @@ message Packet { CloseRequest closeRequest = 5; CloseResponse closeResponse = 6; CloseDial closeDial = 7; + Drain drain = 8; } } @@ -85,6 +87,11 @@ message CloseDial { int64 random = 1; } +message Drain { + // A hint from an Agent to Server that it is pending termination. + // A Server should prefer non-draining agents for new dials. +} + message Data { // connectID to connect to int64 connectID = 1; diff --git a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go index b8d07fe55..5a0d6a2a8 100644 --- a/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go +++ b/constraint/vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.12.4 +// - protoc v3.21.12 // source: konnectivity-client/proto/client/client.proto package client diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 2b9b60d8d..c32324098 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -17,9 +17,11 @@ limitations under the License. package certwatcher import ( + "bytes" "context" "crypto/tls" "fmt" + "os" "sync" "time" @@ -33,18 +35,24 @@ import ( var log = logf.RuntimeLog.WithName("certwatcher") -// CertWatcher watches certificate and key files for changes. When either file -// changes, it reads and parses both and calls an optional callback with the new -// certificate. +const defaultWatchInterval = 10 * time.Second + +// CertWatcher watches certificate and key files for changes. +// It always returns the cached version, +// but periodically reads and parses certificate and key for changes +// and calls an optional callback with the new certificate. type CertWatcher struct { sync.RWMutex currentCert *tls.Certificate watcher *fsnotify.Watcher + interval time.Duration certPath string keyPath string + cachedKeyPEMBlock []byte + // callback is a function to be invoked when the certificate changes. callback func(tls.Certificate) } @@ -56,6 +64,7 @@ func New(certPath, keyPath string) (*CertWatcher, error) { cw := &CertWatcher{ certPath: certPath, keyPath: keyPath, + interval: defaultWatchInterval, } // Initial read of certificate and key. @@ -71,6 +80,12 @@ func New(certPath, keyPath string) (*CertWatcher, error) { return cw, nil } +// WithWatchInterval sets the watch interval and returns the CertWatcher pointer +func (cw *CertWatcher) WithWatchInterval(interval time.Duration) *CertWatcher { + cw.interval = interval + return cw +} + // RegisterCallback registers a callback to be invoked when the certificate changes. func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { cw.Lock() @@ -112,12 +127,20 @@ func (cw *CertWatcher) Start(ctx context.Context) error { go cw.Watch() - log.Info("Starting certificate watcher") - - // Block until the context is done. - <-ctx.Done() + ticker := time.NewTicker(cw.interval) + defer ticker.Stop() - return cw.watcher.Close() + log.Info("Starting certificate poll+watcher", "interval", cw.interval) + for { + select { + case <-ctx.Done(): + return cw.watcher.Close() + case <-ticker.C: + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "failed read certificate") + } + } + } } // Watch reads events from the watcher's channel and reacts to changes. @@ -131,7 +154,6 @@ func (cw *CertWatcher) Watch() { } cw.handleEvent(event) - case err, ok := <-cw.watcher.Errors: // Channel is closed. if !ok { @@ -143,20 +165,48 @@ func (cw *CertWatcher) Watch() { } } +// updateCachedCertificate checks if the new certificate differs from the cache, +// updates it and returns the result if it was updated or not +func (cw *CertWatcher) updateCachedCertificate(cert *tls.Certificate, keyPEMBlock []byte) bool { + cw.Lock() + defer cw.Unlock() + + if cw.currentCert != nil && + bytes.Equal(cw.currentCert.Certificate[0], cert.Certificate[0]) && + bytes.Equal(cw.cachedKeyPEMBlock, keyPEMBlock) { + log.V(7).Info("certificate already cached") + return false + } + cw.currentCert = cert + cw.cachedKeyPEMBlock = keyPEMBlock + return true +} + // ReadCertificate reads the certificate and key files from disk, parses them, -// and updates the current certificate on the watcher. If a callback is set, it +// and updates the current certificate on the watcher if updated. If a callback is set, it // is invoked with the new certificate. func (cw *CertWatcher) ReadCertificate() error { metrics.ReadCertificateTotal.Inc() - cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + certPEMBlock, err := os.ReadFile(cw.certPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + keyPEMBlock, err := os.ReadFile(cw.keyPath) if err != nil { metrics.ReadCertificateErrors.Inc() return err } - cw.Lock() - cw.currentCert = &cert - cw.Unlock() + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + if !cw.updateCachedCertificate(&cert, keyPEMBlock) { + return nil + } log.Info("Updated current TLS certificate") @@ -173,32 +223,20 @@ func (cw *CertWatcher) ReadCertificate() error { func (cw *CertWatcher) handleEvent(event fsnotify.Event) { // Only care about events which may modify the contents of the file. - if !(isWrite(event) || isRemove(event) || isCreate(event)) { - return - } - - log.V(1).Info("certificate event", "event", event) - - // If the file was removed, re-add the watch. - if isRemove(event) { + switch { + case event.Op.Has(fsnotify.Write): + case event.Op.Has(fsnotify.Create): + case event.Op.Has(fsnotify.Chmod), event.Op.Has(fsnotify.Remove): + // If the file was removed or renamed, re-add the watch to the previous name if err := cw.watcher.Add(event.Name); err != nil { log.Error(err, "error re-watching file") } + default: + return } + log.V(1).Info("certificate event", "event", event) if err := cw.ReadCertificate(); err != nil { log.Error(err, "error re-reading certificate") } } - -func isWrite(event fsnotify.Event) bool { - return event.Op.Has(fsnotify.Write) -} - -func isCreate(event fsnotify.Event) bool { - return event.Op.Has(fsnotify.Create) -} - -func isRemove(event fsnotify.Event) bool { - return event.Op.Has(fsnotify.Remove) -} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go index 05869eff0..f128abbcf 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" ) diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 3c0206bea..1d4ce264c 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -72,7 +72,10 @@ func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper m // IsGVKNamespaced returns true if the object having the provided // GVK is namespace scoped. func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + // Fetch the RESTMapping using the complete GVK. If we exclude the Version, the Version set + // will be populated using the cached Group if available. This can lead to failures updating + // the cache with new Versions of CRDs registered at runtime. + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { return false, fmt.Errorf("failed to get restmapping: %w", err) } diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index 927be22b4..ad898617f 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" + "k8s.io/utils/ptr" ) // NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic @@ -41,6 +42,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM if err != nil { return nil, err } + return &mapper{ mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), client: client, @@ -53,11 +55,15 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM // client for discovery information to do REST mappings. type mapper struct { mapper meta.RESTMapper - client discovery.DiscoveryInterface + client discovery.AggregatedDiscoveryInterface knownGroups map[string]*restmapper.APIGroupResources apiGroups map[string]*metav1.APIGroup + initialDiscoveryDone bool + // mutex to provide thread-safe mapper reloading. + // It protects all fields in the mapper as well as methods + // that have the `Locked` suffix. mu sync.RWMutex } @@ -159,28 +165,42 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er versions = nil } + m.mu.Lock() + defer m.mu.Unlock() // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. - if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByName(groupName) + // + // We always run this once, because if the server supports aggregated discovery, this will + // load everything with two api calls which we assume is overall cheaper. + if len(versions) == 0 || !m.initialDiscoveryDone { + apiGroup, didAggregatedDiscovery, err := m.findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked(groupName) if err != nil { return err } - if apiGroup != nil { + if apiGroup != nil && len(versions) == 0 { for _, version := range apiGroup.Versions { versions = append(versions, version.Version) } } - } - m.mu.Lock() - defer m.mu.Unlock() - - // Create or fetch group resources from cache. - groupResources := &restmapper.APIGroupResources{ - Group: metav1.APIGroup{Name: groupName}, - VersionedResources: make(map[string][]metav1.APIResource), + // No need to do anything further if aggregatedDiscovery is supported and we did a lookup + if didAggregatedDiscovery { + failedGroups := make(map[schema.GroupVersion]error) + for _, version := range versions { + if m.knownGroups[groupName] == nil || m.knownGroups[groupName].VersionedResources[version] == nil { + failedGroups[schema.GroupVersion{Group: groupName, Version: version}] = &meta.NoResourceMatchError{ + PartialResource: schema.GroupVersionResource{ + Group: groupName, + Version: version, + }} + } + } + if len(failedGroups) > 0 { + return ptr.To(ErrResourceDiscoveryFailed(failedGroups)) + } + return nil + } } // Update information for group resources about versioned resources. @@ -194,13 +214,26 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er return fmt.Errorf("failed to get API group resources: %w", err) } - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } + m.addGroupVersionResourcesToCacheAndReloadLocked(groupVersionResources) + return nil +} +// addGroupVersionResourcesToCacheAndReloadLocked does what the name suggests. The mutex must be held when +// calling it. +func (m *mapper) addGroupVersionResourcesToCacheAndReloadLocked(gvr map[schema.GroupVersion]*metav1.APIResourceList) { // Update information for group resources about the API group by adding new versions. - // Ignore the versions that are already registered. - for groupVersion, resources := range groupVersionResources { + // Ignore the versions that are already registered + for groupVersion, resources := range gvr { + var groupResources *restmapper.APIGroupResources + if _, ok := m.knownGroups[groupVersion.Group]; ok { + groupResources = m.knownGroups[groupVersion.Group] + } else { + groupResources = &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupVersion.Group}, + VersionedResources: make(map[string][]metav1.APIResource), + } + } + version := groupVersion.Version groupResources.VersionedResources[version] = resources.APIResources @@ -214,60 +247,56 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er if !found { groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ - GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + GroupVersion: metav1.GroupVersion{Group: groupVersion.Group, Version: version}.String(), Version: version, }) } - } - // Update data in the cache. - m.knownGroups[groupName] = groupResources + // Update data in the cache. + m.knownGroups[groupVersion.Group] = groupResources + } - // Finally, update the group with received information and regenerate the mapper. + // Finally, reload the mapper. updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) for _, agr := range m.knownGroups { updatedGroupResources = append(updatedGroupResources, agr) } m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil } -// findAPIGroupByNameLocked returns API group by its name. -func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { - // Looking in the cache first. - { - m.mu.RLock() - group, ok := m.apiGroups[groupName] - m.mu.RUnlock() - if ok { - return group, nil - } +// findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked tries to find the passed apiGroup. +// If the server supports aggregated discovery, it will always perform that. +func (m *mapper) findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked(groupName string) (_ *metav1.APIGroup, didAggregatedDiscovery bool, _ error) { + // Looking in the cache first + group, ok := m.apiGroups[groupName] + if ok { + return group, false, nil } // Update the cache if nothing was found. - apiGroups, err := m.client.ServerGroups() + apiGroups, maybeResources, _, err := m.client.GroupsAndMaybeResources() if err != nil { - return nil, fmt.Errorf("failed to get server groups: %w", err) + return nil, false, fmt.Errorf("failed to get server groups: %w", err) } if len(apiGroups.Groups) == 0 { - return nil, fmt.Errorf("received an empty API groups list") + return nil, false, fmt.Errorf("received an empty API groups list") } - m.mu.Lock() + m.initialDiscoveryDone = true + if len(maybeResources) > 0 { + didAggregatedDiscovery = true + m.addGroupVersionResourcesToCacheAndReloadLocked(maybeResources) + } for i := range apiGroups.Groups { group := &apiGroups.Groups[i] m.apiGroups[group.Name] = group } - m.mu.Unlock() // Looking in the cache again. - m.mu.RLock() - defer m.mu.RUnlock() - // Don't return an error here if the API group is not present. // The reloaded RESTMapper will take care of returning a NoMatchError. - return m.apiGroups[groupName], nil + return m.apiGroups[groupName], didAggregatedDiscovery, nil } // fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. @@ -283,10 +312,10 @@ func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ... if apierrors.IsNotFound(err) { // If the version is not found, we remove the group from the cache // so it gets refreshed on the next call. - if m.isAPIGroupCached(groupVersion) { + if m.isAPIGroupCachedLocked(groupVersion) { delete(m.apiGroups, groupName) } - if m.isGroupVersionCached(groupVersion) { + if m.isGroupVersionCachedLocked(groupVersion) { delete(m.knownGroups, groupName) } continue @@ -308,8 +337,8 @@ func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ... return groupVersionResources, nil } -// isGroupVersionCached checks if a version for a group is cached in the known groups cache. -func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { +// isGroupVersionCachedLocked checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCachedLocked(gv schema.GroupVersion) bool { if cachedGroup, ok := m.knownGroups[gv.Group]; ok { _, cached := cachedGroup.VersionedResources[gv.Version] return cached @@ -318,8 +347,8 @@ func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { return false } -// isAPIGroupCached checks if a version for a group is cached in the api groups cache. -func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { +// isAPIGroupCachedLocked checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCachedLocked(gv schema.GroupVersion) bool { cachedGroup, ok := m.apiGroups[gv.Group] if !ok { return false diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index e6c075eb0..6d8744017 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -50,28 +50,10 @@ type Options struct { // Cache, if provided, is used to read objects from the cache. Cache *CacheOptions - // WarningHandler is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - WarningHandler WarningHandlerOptions - // DryRun instructs the client to only perform dry run requests. DryRun *bool } -// WarningHandlerOptions are options for configuring a -// warning handler for the client which is responsible -// for surfacing API Server warnings. -type WarningHandlerOptions struct { - // SuppressWarnings decides if the warnings from the - // API server are suppressed or surfaced in the client. - SuppressWarnings bool - // AllowDuplicateLogs does not deduplicate the to-be - // logged surfaced warnings messages. See - // log.WarningHandlerOptions for considerations - // regarding deduplication - AllowDuplicateLogs bool -} - // CacheOptions are options for creating a cache-backed client. type CacheOptions struct { // Reader is a cache-backed reader that will be used to read objects from the cache. @@ -91,6 +73,12 @@ type NewClientFunc func(config *rest.Config, options Options) (Client, error) // New returns a new Client using the provided config and Options. // +// By default, the client surfaces warnings returned by the server. To +// suppress warnings, set config.WarningHandler = rest.NoWarnings{}. To +// define custom behavior, implement the rest.WarningHandler interface. +// See [sigs.k8s.io/controller-runtime/pkg/log.KubeAPIWarningLogger] for +// an example. +// // The client's read behavior is determined by Options.Cache. // If either Options.Cache or Options.Cache.Reader is nil, // the client reads directly from the API server. @@ -124,17 +112,12 @@ func newClient(config *rest.Config, options Options) (*client, error) { config.UserAgent = rest.DefaultKubernetesUserAgent() } - if !options.WarningHandler.SuppressWarnings { - // surface warnings - logger := log.Log.WithName("KubeAPIWarningLogger") - // Set a WarningHandler, the default WarningHandler - // is log.KubeAPIWarningLogger with deduplication enabled. - // See log.KubeAPIWarningLoggerOptions for considerations - // regarding deduplication. + if config.WarningHandler == nil { + // By default, we surface warnings. config.WarningHandler = log.NewKubeAPIWarningLogger( - logger, + log.Log.WithName("KubeAPIWarningLogger"), log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.WarningHandler.AllowDuplicateLogs, + Deduplicate: false, }, ) } diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go new file mode 100644 index 000000000..659b3d44c --- /dev/null +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldValidation wraps a Client and configures field validation, by +// default, for all write requests from this client. Users can override field +// validation for individual write requests. +func WithFieldValidation(c Client, validation FieldValidation) Client { + return &clientWithFieldValidation{ + validation: validation, + client: c, + Reader: c, + } +} + +type clientWithFieldValidation struct { + validation FieldValidation + client Client + Reader +} + +func (c *clientWithFieldValidation) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append([]CreateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append([]UpdateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append([]PatchOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) Scheme() *runtime.Scheme { return c.client.Scheme() } +func (c *clientWithFieldValidation) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +func (c *clientWithFieldValidation) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c *clientWithFieldValidation) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c *clientWithFieldValidation) Status() StatusWriter { + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: c.client.Status(), + } +} + +func (c *clientWithFieldValidation) SubResource(subresource string) SubResourceClient { + srClient := c.client.SubResource(subresource) + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: srClient, + SubResourceReader: srClient, + } +} + +type subresourceClientWithFieldValidation struct { + validation FieldValidation + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (c *subresourceClientWithFieldValidation) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return c.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return c.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return c.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{c.validation}, opts...)...) +} diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 3cd745e4c..3b282fc2c 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -94,16 +94,16 @@ type SubResourceClientConstructor interface { // - ServiceAccount token creation: // sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} // token := &authenticationv1.TokenRequest{} - // c.SubResourceClient("token").Create(ctx, sa, token) + // c.SubResource("token").Create(ctx, sa, token) // // - Pod eviction creation: // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} - // c.SubResourceClient("eviction").Create(ctx, pod, &policyv1.Eviction{}) + // c.SubResource("eviction").Create(ctx, pod, &policyv1.Eviction{}) // // - Pod binding creation: // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} // binding := &corev1.Binding{Target: corev1.ObjectReference{Name: "my-node"}} - // c.SubResourceClient("binding").Create(ctx, pod, binding) + // c.SubResource("binding").Create(ctx, pod, binding) // // - CertificateSigningRequest approval: // csr := &certificatesv1.CertificateSigningRequest{ @@ -115,17 +115,17 @@ type SubResourceClientConstructor interface { // }}, // }, // } - // c.SubResourceClient("approval").Update(ctx, csr) + // c.SubResource("approval").Update(ctx, csr) // // - Scale retrieval: // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} // scale := &autoscalingv1.Scale{} - // c.SubResourceClient("scale").Get(ctx, dep, scale) + // c.SubResource("scale").Get(ctx, dep, scale) // // - Scale update: // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} // scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} - // c.SubResourceClient("scale").Update(ctx, dep, client.WithSubResourceBody(scale)) + // c.SubResource("scale").Update(ctx, dep, client.WithSubResourceBody(scale)) SubResource(subResource string) SubResourceClient } @@ -193,7 +193,7 @@ type IndexerFunc func(Object) []string // FieldIndexer knows how to index over a particular "field" such that it // can later be used by a field selector. type FieldIndexer interface { - // IndexFields adds an index with the given field name on the given object type + // IndexField adds an index with the given field name on the given object type // by using the given function to extract the value for that field. If you want // compatibility with the Kubernetes API server, only return one key, and only use // fields that the API server supports. Otherwise, you can return multiple keys, diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 798506f48..db50ed8fe 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -169,6 +169,39 @@ func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { opts.FieldManager = string(f) } +// FieldValidation configures field validation for the given requests. +type FieldValidation string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToPatch(opts *PatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToCreate(opts *CreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldValidation = string(f) +} + // }}} // {{{ Create Options @@ -187,6 +220,24 @@ type CreateOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw CreateOptions, as passed to the API server. Raw *metav1.CreateOptions } @@ -203,6 +254,7 @@ func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { o.Raw.DryRun = o.DryRun o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -223,6 +275,9 @@ func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { if o.FieldManager != "" { co.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + co.FieldValidation = o.FieldValidation + } if o.Raw != nil { co.Raw = o.Raw } @@ -679,6 +734,24 @@ type UpdateOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw UpdateOptions, as passed to the API server. Raw *metav1.UpdateOptions } @@ -695,6 +768,7 @@ func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { o.Raw.DryRun = o.DryRun o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -717,6 +791,9 @@ func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { if o.FieldManager != "" { uo.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + uo.FieldValidation = o.FieldValidation + } if o.Raw != nil { uo.Raw = o.Raw } @@ -745,6 +822,24 @@ type PatchOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw PatchOptions, as passed to the API server. Raw *metav1.PatchOptions } @@ -771,6 +866,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { o.Raw.DryRun = o.DryRun o.Raw.Force = o.Force o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } @@ -787,6 +883,9 @@ func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { if o.FieldManager != "" { po.FieldManager = o.FieldManager } + if o.FieldValidation != "" { + po.FieldValidation = o.FieldValidation + } if o.Raw != nil { po.Raw = o.Raw } diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go index 5fdd657cd..49f6b149b 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -229,7 +229,6 @@ func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { // Uninstall each CRD for _, crd := range options.CRDs { - crd := crd log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) if err := cs.Delete(context.TODO(), crd); err != nil { // If CRD is not found, we can consider success @@ -251,7 +250,6 @@ func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefin // Create each CRD for _, crd := range crds { - crd := crd log.V(1).Info("installing CRD", "crd", crd.GetName()) existingCrd := crd.DeepCopy() err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go index e4e54e472..f6bfe95cc 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/webhook.go @@ -294,10 +294,10 @@ func (o *WebhookInstallOptions) setupCA() error { return fmt.Errorf("unable to marshal webhook serving certs: %w", err) } - if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { return fmt.Errorf("unable to write webhook serving cert to disk: %w", err) } - if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { return fmt.Errorf("unable to write webhook serving key to disk: %w", err) } @@ -313,14 +313,12 @@ func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhook // Create each webhook for _, hook := range mutHooks { - hook := hook log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) if err := ensureCreated(cs, hook); err != nil { return err } } for _, hook := range valHooks { - hook := hook log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) if err := ensureCreated(cs, hook); err != nil { return err diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go index c9a1a232e..b3592eccf 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go @@ -384,10 +384,10 @@ func (s *APIServer) populateAPIServerCerts() error { return err } - if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0640); err != nil { return err } - if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0640); err != nil { return err } @@ -404,10 +404,10 @@ func (s *APIServer) populateAPIServerCerts() error { return err } - if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0640); err != nil { return err } - return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) //nolint:gosec + return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0640) } // Stop stops this process gracefully, waits for its termination, and cleans up diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go index 16c86a712..b44035ebf 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/auth.go @@ -128,7 +128,7 @@ func (c *CertAuthn) Start() error { return fmt.Errorf("start called before configure") } caCrt := c.ca.CA.CertBytes() - if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { //nolint:gosec + if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) } diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go index 03f252524..0d541921e 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/process/process.go @@ -215,7 +215,7 @@ func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh // there's probably certs *somewhere*, // but it's fine to just skip validating // them for health checks during testing - InsecureSkipVerify: true, //nolint:gosec + InsecureSkipVerify: true, }, }, } diff --git a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go index cff1de4c1..cd7ccc773 100644 --- a/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go +++ b/constraint/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -16,15 +16,6 @@ limitations under the License. package metrics -import ( - "github.com/prometheus/client_golang/prometheus" - "k8s.io/client-go/util/workqueue" -) - -// This file is copied and adapted from k8s.io/component-base/metrics/prometheus/workqueue -// which registers metrics to the k8s legacy Registry. We require very -// similar functionality, but must register metrics to a different Registry. - // Metrics subsystem and all keys used by the workqueue. const ( WorkQueueSubsystem = "workqueue" @@ -36,95 +27,3 @@ const ( LongestRunningProcessorKey = "longest_running_processor_seconds" RetriesKey = "retries_total" ) - -var ( - depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: WorkQueueSubsystem, - Name: DepthKey, - Help: "Current depth of workqueue", - }, []string{"name"}) - - adds = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: WorkQueueSubsystem, - Name: AddsKey, - Help: "Total number of adds handled by workqueue", - }, []string{"name"}) - - latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: WorkQueueSubsystem, - Name: QueueLatencyKey, - Help: "How long in seconds an item stays in workqueue before being requested", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), - }, []string{"name"}) - - workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: WorkQueueSubsystem, - Name: WorkDurationKey, - Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), - }, []string{"name"}) - - unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: WorkQueueSubsystem, - Name: UnfinishedWorkKey, - Help: "How many seconds of work has been done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }, []string{"name"}) - - longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: WorkQueueSubsystem, - Name: LongestRunningProcessorKey, - Help: "How many seconds has the longest running " + - "processor for workqueue been running.", - }, []string{"name"}) - - retries = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: WorkQueueSubsystem, - Name: RetriesKey, - Help: "Total number of retries handled by workqueue", - }, []string{"name"}) -) - -func init() { - Registry.MustRegister(depth) - Registry.MustRegister(adds) - Registry.MustRegister(latency) - Registry.MustRegister(workDuration) - Registry.MustRegister(unfinished) - Registry.MustRegister(longestRunningProcessor) - Registry.MustRegister(retries) - - workqueue.SetProvider(workqueueMetricsProvider{}) -} - -type workqueueMetricsProvider struct{} - -func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { - return depth.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { - return adds.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { - return latency.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { - return workDuration.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { - return unfinished.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { - return longestRunningProcessor.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { - return retries.WithLabelValues(name) -} diff --git a/constraint/vendor/sigs.k8s.io/json/Makefile b/constraint/vendor/sigs.k8s.io/json/Makefile index 07b8bfa85..fb6cf040f 100644 --- a/constraint/vendor/sigs.k8s.io/json/Makefile +++ b/constraint/vendor/sigs.k8s.io/json/Makefile @@ -19,7 +19,7 @@ vet: go vet sigs.k8s.io/json @echo "checking for external dependencies" - @deps=$$(go mod graph); \ + @deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \ if [ -n "$${deps}" ]; then \ echo "only stdlib dependencies allowed, found:"; \ echo "$${deps}"; \ diff --git a/constraint/vendor/sigs.k8s.io/json/OWNERS b/constraint/vendor/sigs.k8s.io/json/OWNERS index 0fadafbdd..a08a434e6 100644 --- a/constraint/vendor/sigs.k8s.io/json/OWNERS +++ b/constraint/vendor/sigs.k8s.io/json/OWNERS @@ -2,5 +2,5 @@ approvers: - deads2k - - lavalamp + - jpbetz - liggitt diff --git a/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index 6a13cf2df..d538ac119 100644 --- a/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -21,10 +21,10 @@ import ( // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. If v is nil or not a pointer, -// Unmarshal returns an InvalidUnmarshalError. +// Unmarshal returns an [InvalidUnmarshalError]. // // Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, +// [Marshal] uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of @@ -33,28 +33,28 @@ import ( // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // -// To unmarshal JSON into a value implementing the Unmarshaler interface, -// Unmarshal calls that value's UnmarshalJSON method, including +// To unmarshal JSON into a value implementing [Unmarshaler], +// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including // when the input is a JSON null. -// Otherwise, if the value implements encoding.TextUnmarshaler -// and the input is a JSON quoted string, Unmarshal calls that value's -// UnmarshalText method with the unquoted form of the string. +// Otherwise, if the value implements [encoding.TextUnmarshaler] +// and the input is a JSON quoted string, Unmarshal calls +// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string. // // To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), +// keys to the keys used by [Marshal] (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. By // default, object keys which don't have a corresponding struct field are -// ignored (see Decoder.DisallowUnknownFields for an alternative). +// ignored (see [Decoder.DisallowUnknownFields] for an alternative). // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null +// - bool, for JSON booleans +// - float64, for JSON numbers +// - string, for JSON strings +// - []interface{}, for JSON arrays +// - map[string]interface{}, for JSON objects +// - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length // to zero and then appends each element to the slice. @@ -72,16 +72,15 @@ import ( // use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal // reuses the existing map, keeping existing entries. Unmarshal then stores // key-value pairs from the JSON object into the map. The map's key type must -// either be any string type, an integer, implement json.Unmarshaler, or -// implement encoding.TextUnmarshaler. +// either be any string type, an integer, or implement [encoding.TextUnmarshaler]. // -// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError]. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. // If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. In any +// an [UnmarshalTypeError] describing the earliest such error. In any // case, it's not guaranteed that all the remaining fields following // the problematic one will be unmarshaled into the target object. // @@ -119,7 +118,7 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. // -// By convention, to approximate the behavior of Unmarshal itself, +// By convention, to approximate the behavior of [Unmarshal] itself, // Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error @@ -157,8 +156,8 @@ func (e *UnmarshalFieldError) Error() string { return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() } -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) +// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal]. +// (The argument to [Unmarshal] must be a non-nil pointer.) type InvalidUnmarshalError struct { Type reflect.Type } @@ -573,17 +572,10 @@ func (d *decodeState) array(v reflect.Value) error { break } - // Get element of array, growing if necessary. + // Expand slice length, growing the slice if necessary. if v.Kind() == reflect.Slice { - // Grow slice if necessary if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) + v.Grow(1) } if i >= v.Len() { v.SetLen(i + 1) @@ -620,13 +612,11 @@ func (d *decodeState) array(v reflect.Value) error { if i < v.Len() { if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) for ; i < v.Len(); i++ { - v.Index(i).Set(z) + v.Index(i).SetZero() // zero remainder of array } } else { - v.SetLen(i) + v.SetLen(i) // truncate the slice } } if i == 0 && v.Kind() == reflect.Slice { @@ -636,7 +626,7 @@ func (d *decodeState) array(v reflect.Value) error { } var nullLiteral = []byte("null") -var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() // object consumes an object from d.data[d.off-1:], decoding into v. // The first byte ('{') of the object has been read already. @@ -776,7 +766,7 @@ func (d *decodeState) object(v reflect.Value) error { if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { - mapElem.Set(reflect.Zero(elemType)) + mapElem.SetZero() } subv = mapElem if checkDuplicateField != nil { @@ -784,28 +774,14 @@ func (d *decodeState) object(v reflect.Value) error { } d.appendStrictFieldStackKey(string(key)) } else { - var f *field - if i, ok := fields.nameIndex[string(key)]; ok { - // Found an exact name match. - f = &fields.list[i] - if checkDuplicateField != nil { - checkDuplicateField(i, f.name) - } - } else if !d.caseSensitive { - // Fall back to the expensive case-insensitive - // linear search. - for i := range fields.list { - ff := &fields.list[i] - if ff.equalFold(ff.nameBytes, key) { - f = ff - if checkDuplicateField != nil { - checkDuplicateField(i, f.name) - } - break - } - } + f := fields.byExactName[string(key)] + if f == nil && !d.caseSensitive { + f = fields.byFoldedName[string(foldName(key))] } if f != nil { + if checkDuplicateField != nil { + checkDuplicateField(f.listIndex, f.name) + } subv = v destring = f.quoted for _, i := range f.index { @@ -874,33 +850,35 @@ func (d *decodeState) object(v reflect.Value) error { if v.Kind() == reflect.Map { kt := t.Key() var kv reflect.Value - switch { - case reflect.PointerTo(kt).Implements(textUnmarshalerType): + if reflect.PointerTo(kt).Implements(textUnmarshalerType) { kv = reflect.New(kt) if err := d.literalStore(item, kv, true); err != nil { return err } kv = kv.Elem() - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(kt) - default: + } else { switch kt.Kind() { + case reflect.String: + kv = reflect.New(kt).Elem() + kv.SetString(string(key)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: s := string(key) n, err := strconv.ParseInt(s, 10, 64) - if err != nil || reflect.Zero(kt).OverflowInt(n) { + if err != nil || kt.OverflowInt(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } - kv = reflect.ValueOf(n).Convert(kt) + kv = reflect.New(kt).Elem() + kv.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: s := string(key) n, err := strconv.ParseUint(s, 10, 64) - if err != nil || reflect.Zero(kt).OverflowUint(n) { + if err != nil || kt.OverflowUint(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) break } - kv = reflect.ValueOf(n).Convert(kt) + kv = reflect.New(kt).Elem() + kv.SetUint(n) default: panic("json: Unexpected key type") // should never occur } @@ -950,12 +928,12 @@ func (d *decodeState) convertNumber(s string) (any, error) { f, err := strconv.ParseFloat(s, 64) if err != nil { - return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)} } return f, nil } -var numberType = reflect.TypeOf(Number("")) +var numberType = reflect.TypeFor[Number]() // literalStore decodes a literal stored in item into v. // @@ -965,7 +943,7 @@ var numberType = reflect.TypeOf(Number("")) func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { // Check for unmarshaler. if len(item) == 0 { - //Empty string given + // Empty string given. d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) return nil } @@ -1012,7 +990,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } switch v.Kind() { case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) + v.SetZero() // otherwise, ignore null for primitives/string } case 't', 'f': // true, false @@ -1064,10 +1042,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } v.SetBytes(b[:n]) case reflect.String: - if v.Type() == numberType && !isValidNumber(string(s)) { + t := string(s) + if v.Type() == numberType && !isValidNumber(t) { return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) } - v.SetString(string(s)) + v.SetString(t) case reflect.Interface: if v.NumMethod() == 0 { v.Set(reflect.ValueOf(string(s))) @@ -1083,13 +1062,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } panic(phasePanicMsg) } - s := string(item) switch v.Kind() { default: if v.Kind() == reflect.String && v.Type() == numberType { // s must be a valid number, because it's // already been tokenized. - v.SetString(s) + v.SetString(string(item)) break } if fromQuoted { @@ -1097,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) case reflect.Interface: - n, err := d.convertNumber(s) + n, err := d.convertNumber(string(item)) if err != nil { d.saveError(err) break @@ -1109,25 +1087,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool v.Set(reflect.ValueOf(n)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) + n, err := strconv.ParseInt(string(item), 10, 64) if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) + n, err := strconv.ParseUint(string(item), 10, 64) if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetUint(n) case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) + n, err := strconv.ParseFloat(string(item), v.Type().Bits()) if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())}) break } v.SetFloat(n) diff --git a/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index 5b67251fb..eb73bff58 100644 --- a/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/constraint/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -12,12 +12,13 @@ package json import ( "bytes" + "cmp" "encoding" "encoding/base64" "fmt" "math" "reflect" - "sort" + "slices" "strconv" "strings" "sync" @@ -28,29 +29,30 @@ import ( // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method and encodes the result as a JSON string. +// If an encountered value implements [Marshaler] +// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON] +// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the +// value implements [encoding.TextMarshaler] instead, Marshal calls +// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string. // The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. +// [Unmarshaler.UnmarshalJSON]. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // -// Floating point, integer, and Number values encode as JSON numbers. +// Floating point, integer, and [Number] values encode as JSON numbers. +// NaN and +/-Inf values will return an [UnsupportedValueError]. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // So that the JSON will be safe to embed inside HTML